Add vendor dir (#16) (#4)

* Add vendor dir so builds dont require dep

* Pin specific version go-eth version
This commit is contained in:
Matt K 2018-01-29 13:44:18 -06:00 committed by GitHub
parent 82119b3c4b
commit 293dd2e848
4319 changed files with 1448696 additions and 26 deletions

3
.gitignore vendored
View File

@ -1,11 +1,10 @@
.idea
Gododir/godobin-*
test_data_dir/
vendor/
contracts/*
environments/*.toml
Vagrantfile
vagrant_bootstrap.sh
vagrant*.sh
.vagrant
test_scripts/
vulcanizedb

View File

@ -10,8 +10,6 @@ addons:
go_import_path: github.com/vulcanize/vulcanizedb
before_install:
# dep
- go get -u github.com/golang/dep/cmd/dep
# ginkgo
- go get -u github.com/onsi/ginkgo/ginkgo
# migrate
@ -23,9 +21,6 @@ before_install:
- tar -xzf geth-linux-amd64-1.7.2-1db4ecdc.tar.gz
- sudo cp geth-linux-amd64-1.7.2-1db4ecdc/geth /usr/local/bin
install:
- dep ensure
before_script:
- ./scripts/setup
- nohup ./scripts/start_private_blockchain </dev/null &

154
Gopkg.lock generated
View File

@ -21,7 +21,32 @@
[[projects]]
name = "github.com/ethereum/go-ethereum"
packages = [".","accounts/abi","common","common/hexutil","common/math","common/mclock","core/types","crypto","crypto/ecies","crypto/secp256k1","crypto/sha3","ethclient","event","log","metrics","p2p","p2p/discover","p2p/discv5","p2p/nat","p2p/netutil","params","rlp","rpc","trie"]
packages = [
".",
"accounts/abi",
"common",
"common/hexutil",
"common/math",
"common/mclock",
"core/types",
"crypto",
"crypto/ecies",
"crypto/secp256k1",
"crypto/sha3",
"ethclient",
"event",
"log",
"metrics",
"p2p",
"p2p/discover",
"p2p/discv5",
"p2p/nat",
"p2p/netutil",
"params",
"rlp",
"rpc",
"trie"
]
revision = "4bb3c89d44e372e6a9ab85a8be0c9345265c763a"
version = "v1.7.3"
@ -52,13 +77,31 @@
[[projects]]
branch = "master"
name = "github.com/hashicorp/hcl"
packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"]
packages = [
".",
"hcl/ast",
"hcl/parser",
"hcl/scanner",
"hcl/strconv",
"hcl/token",
"json/parser",
"json/scanner",
"json/token"
]
revision = "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8"
[[projects]]
branch = "master"
name = "github.com/huin/goupnp"
packages = [".","dcps/internetgateway1","dcps/internetgateway2","httpu","scpd","soap","ssdp"]
packages = [
".",
"dcps/internetgateway1",
"dcps/internetgateway2",
"httpu",
"scpd",
"soap",
"ssdp"
]
revision = "dceda08e705b2acee36aab47d765ed801f64cfc7"
[[projects]]
@ -76,13 +119,19 @@
[[projects]]
branch = "master"
name = "github.com/jmoiron/sqlx"
packages = [".","reflectx"]
packages = [
".",
"reflectx"
]
revision = "99f3ad6d85ae53d0fecf788ab62d0e9734b3c117"
[[projects]]
branch = "master"
name = "github.com/lib/pq"
packages = [".","oid"]
packages = [
".",
"oid"
]
revision = "83612a56d3dd153a94a629cd64925371c9adad78"
[[projects]]
@ -105,13 +154,46 @@
[[projects]]
name = "github.com/onsi/ginkgo"
packages = [".","config","internal/codelocation","internal/containernode","internal/failer","internal/leafnodes","internal/remote","internal/spec","internal/spec_iterator","internal/specrunner","internal/suite","internal/testingtproxy","internal/writer","reporters","reporters/stenographer","reporters/stenographer/support/go-colorable","reporters/stenographer/support/go-isatty","types"]
packages = [
".",
"config",
"internal/codelocation",
"internal/containernode",
"internal/failer",
"internal/leafnodes",
"internal/remote",
"internal/spec",
"internal/spec_iterator",
"internal/specrunner",
"internal/suite",
"internal/testingtproxy",
"internal/writer",
"reporters",
"reporters/stenographer",
"reporters/stenographer/support/go-colorable",
"reporters/stenographer/support/go-isatty",
"types"
]
revision = "9eda700730cba42af70d53180f9dcce9266bc2bc"
version = "v1.4.0"
[[projects]]
name = "github.com/onsi/gomega"
packages = [".","format","ghttp","internal/assertion","internal/asyncassertion","internal/oraclematcher","internal/testingtsupport","matchers","matchers/support/goraph/bipartitegraph","matchers/support/goraph/edge","matchers/support/goraph/node","matchers/support/goraph/util","types"]
packages = [
".",
"format",
"ghttp",
"internal/assertion",
"internal/asyncassertion",
"internal/oraclematcher",
"internal/testingtsupport",
"matchers",
"matchers/support/goraph/bipartitegraph",
"matchers/support/goraph/edge",
"matchers/support/goraph/node",
"matchers/support/goraph/util",
"types"
]
revision = "c893efa28eb45626cdaa76c9f653b62488858837"
version = "v1.2.0"
@ -124,7 +206,10 @@
[[projects]]
branch = "master"
name = "github.com/rcrowley/go-metrics"
packages = [".","exp"]
packages = [
".",
"exp"
]
revision = "e181e095bae94582363434144c61a9653aff6e50"
[[projects]]
@ -135,7 +220,10 @@
[[projects]]
name = "github.com/spf13/afero"
packages = [".","mem"]
packages = [
".",
"mem"
]
revision = "bb8f1927f2a9d3ab41c9340aa034f6b803f4359c"
version = "v1.0.2"
@ -172,13 +260,32 @@
[[projects]]
branch = "master"
name = "github.com/syndtr/goleveldb"
packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"]
packages = [
"leveldb",
"leveldb/cache",
"leveldb/comparer",
"leveldb/errors",
"leveldb/filter",
"leveldb/iterator",
"leveldb/journal",
"leveldb/memdb",
"leveldb/opt",
"leveldb/storage",
"leveldb/table",
"leveldb/util"
]
revision = "adf24ef3f94bd13ec4163060b21a5678f22b429b"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","html","html/atom","html/charset","websocket"]
packages = [
"context",
"html",
"html/atom",
"html/charset",
"websocket"
]
revision = "faacc1b5e36e3ff02cbec9661c69ac63dd5a83ad"
[[projects]]
@ -190,7 +297,28 @@
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["encoding","encoding/charmap","encoding/htmlindex","encoding/internal","encoding/internal/identifier","encoding/japanese","encoding/korean","encoding/simplifiedchinese","encoding/traditionalchinese","encoding/unicode","internal/gen","internal/tag","internal/triegen","internal/ucd","internal/utf8internal","language","runes","transform","unicode/cldr","unicode/norm"]
packages = [
"encoding",
"encoding/charmap",
"encoding/htmlindex",
"encoding/internal",
"encoding/internal/identifier",
"encoding/japanese",
"encoding/korean",
"encoding/simplifiedchinese",
"encoding/traditionalchinese",
"encoding/unicode",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"internal/utf8internal",
"language",
"runes",
"transform",
"unicode/cldr",
"unicode/norm"
]
revision = "be25de41fadfae372d6470bda81ca6beb55ef551"
[[projects]]
@ -220,6 +348,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "641a04f7f89572adf1ebd051d0839eb5b03fdc67bb50838bfac98832246636f0"
inputs-digest = "9f01f0f0bbce801c579dccd45229f8a78a21e229e5c93f8a54cb22748b0b25b2"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -36,3 +36,7 @@
[[constraint]]
name = "github.com/spf13/cobra"
version = "0.0.1"
[[constraint]]
name = "github.com/ethereum/go-ethereum"
version = "1.7.3"

View File

@ -1,6 +1,6 @@
# Vulcanize DB
[![Build Status](https://travis-ci.com/8thlight/vulcanizedb.svg?token=3psFYN2533rYjhRbvjte&branch=master)](https://travis-ci.com/8thlight/vulcanizedb)
[![Build Status](https://travis-ci.org/vulcanize/VulcanizeDB.svg?branch=master)](https://travis-ci.org/vulcanize/VulcanizeDB)
### Dependencies
@ -10,11 +10,8 @@
- https://ethereum.github.io/go-ethereum/downloads/
### Installation
```
git clone https://github.com/vulcanize/vulcanizedb.git $GOPATH/src/github.com/vulcanize/vulcanizedb
cd $GOPATH/src/github.com/vulcanize/vulcanizedb
make build
```
`go get github.com/vulcanize/vulcanizedb`
### Setting up the Databases
1. Install Postgres

5
vendor/github.com/BurntSushi/toml/.gitignore generated vendored Normal file
View File

@ -0,0 +1,5 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test

15
vendor/github.com/BurntSushi/toml/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,15 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

3
vendor/github.com/BurntSushi/toml/COMPATIBLE generated vendored Normal file
View File

@ -0,0 +1,3 @@
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

14
vendor/github.com/BurntSushi/toml/COPYING generated vendored Normal file
View File

@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

19
vendor/github.com/BurntSushi/toml/Makefile generated vendored Normal file
View File

@ -0,0 +1,19 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

218
vendor/github.com/BurntSushi/toml/README.md generated vendored Normal file
View File

@ -0,0 +1,218 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

61
vendor/github.com/BurntSushi/toml/_examples/example.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
package main
import (
"fmt"
"time"
"github.com/BurntSushi/toml"
)
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
func main() {
var config tomlConfig
if _, err := toml.DecodeFile("example.toml", &config); err != nil {
fmt.Println(err)
return
}
fmt.Printf("Title: %s\n", config.Title)
fmt.Printf("Owner: %s (%s, %s), Born: %s\n",
config.Owner.Name, config.Owner.Org, config.Owner.Bio,
config.Owner.DOB)
fmt.Printf("Database: %s %v (Max conn. %d), Enabled? %v\n",
config.DB.Server, config.DB.Ports, config.DB.ConnMax,
config.DB.Enabled)
for serverName, server := range config.Servers {
fmt.Printf("Server: %s (%s, %s)\n", serverName, server.IP, server.DC)
}
fmt.Printf("Client data: %v\n", config.Clients.Data)
fmt.Printf("Client hosts: %v\n", config.Clients.Hosts)
}

View File

@ -0,0 +1,35 @@
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]

22
vendor/github.com/BurntSushi/toml/_examples/hard.toml generated vendored Normal file
View File

@ -0,0 +1,22 @@
# Test file for TOML
# Only this one tries to emulate a TOML file written by a user of the kind of parser writers probably hate
# This part you'll really hate
[the]
test_string = "You'll hate me after this - #" # " Annoying, isn't it?
[the.hard]
test_array = [ "] ", " # "] # ] There you go, parse this!
test_array2 = [ "Test #11 ]proved that", "Experiment #9 was a success" ]
# You didn't think it'd as easy as chucking out the last #, did you?
another_test_string = " Same thing, but with a string #"
harder_test_string = " And when \"'s are in the string, along with # \"" # "and comments are there too"
# Things will get harder
[the.hard.bit#]
what? = "You don't think some user won't do that?"
multi_line_array = [
"]",
# ] Oh yes I did
]

View File

@ -0,0 +1,4 @@
# [x] you
# [x.y] don't
# [x.y.z] need these
[x.y.z.w] # for this to work

View File

@ -0,0 +1,6 @@
# DO NOT WANT
[fruit]
type = "apple"
[fruit.type]
apple = "yes"

View File

@ -0,0 +1,35 @@
# This is an INVALID TOML document. Boom.
# Can you spot the error without help?
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T7:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]

View File

@ -0,0 +1,5 @@
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z

View File

@ -0,0 +1 @@
some_key_NAME = "wat"

View File

@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View File

@ -0,0 +1,13 @@
# Implements the TOML test suite interface
This is an implementation of the interface expected by
[toml-test](https://github.com/BurntSushi/toml-test) for my
[toml parser written in Go](https://github.com/BurntSushi/toml).
In particular, it maps TOML data on `stdin` to a JSON format on `stdout`.
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Compatible with `toml-test` version
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)

View File

@ -0,0 +1,90 @@
// Command toml-test-decoder satisfies the toml-test interface for testing
// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"os"
"path"
"time"
"github.com/BurntSushi/toml"
)
func init() {
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
}
func usage() {
log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
if flag.NArg() != 0 {
flag.Usage()
}
var tmp interface{}
if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
log.Fatalf("Error decoding TOML: %s", err)
}
typedTmp := translate(tmp)
if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
log.Fatalf("Error encoding JSON: %s", err)
}
}
func translate(tomlData interface{}) interface{} {
switch orig := tomlData.(type) {
case map[string]interface{}:
typed := make(map[string]interface{}, len(orig))
for k, v := range orig {
typed[k] = translate(v)
}
return typed
case []map[string]interface{}:
typed := make([]map[string]interface{}, len(orig))
for i, v := range orig {
typed[i] = translate(v).(map[string]interface{})
}
return typed
case []interface{}:
typed := make([]interface{}, len(orig))
for i, v := range orig {
typed[i] = translate(v)
}
// We don't really need to tag arrays, but let's be future proof.
// (If TOML ever supports tuples, we'll need this.)
return tag("array", typed)
case time.Time:
return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
case bool:
return tag("bool", fmt.Sprintf("%v", orig))
case int64:
return tag("integer", fmt.Sprintf("%d", orig))
case float64:
return tag("float", fmt.Sprintf("%v", orig))
case string:
return tag("string", orig)
}
panic(fmt.Sprintf("Unknown type: %T", tomlData))
}
func tag(typeName string, data interface{}) map[string]interface{} {
return map[string]interface{}{
"type": typeName,
"value": data,
}
}

View File

@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View File

@ -0,0 +1,13 @@
# Implements the TOML test suite interface for TOML encoders
This is an implementation of the interface expected by
[toml-test](https://github.com/BurntSushi/toml-test) for the
[TOML encoder](https://github.com/BurntSushi/toml).
In particular, it maps JSON data on `stdin` to a TOML format on `stdout`.
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Compatible with `toml-test` version
[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)

View File

@ -0,0 +1,131 @@
// Command toml-test-encoder satisfies the toml-test interface for testing
// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
package main
import (
"encoding/json"
"flag"
"log"
"os"
"path"
"strconv"
"time"
"github.com/BurntSushi/toml"
)
func init() {
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
}
func usage() {
log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
if flag.NArg() != 0 {
flag.Usage()
}
var tmp interface{}
if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
log.Fatalf("Error decoding JSON: %s", err)
}
tomlData := translate(tmp)
if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
log.Fatalf("Error encoding TOML: %s", err)
}
}
func translate(typedJson interface{}) interface{} {
switch v := typedJson.(type) {
case map[string]interface{}:
if len(v) == 2 && in("type", v) && in("value", v) {
return untag(v)
}
m := make(map[string]interface{}, len(v))
for k, v2 := range v {
m[k] = translate(v2)
}
return m
case []interface{}:
tabArray := make([]map[string]interface{}, len(v))
for i := range v {
if m, ok := translate(v[i]).(map[string]interface{}); ok {
tabArray[i] = m
} else {
log.Fatalf("JSON arrays may only contain objects. This " +
"corresponds to only tables being allowed in " +
"TOML table arrays.")
}
}
return tabArray
}
log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
panic("unreachable")
}
func untag(typed map[string]interface{}) interface{} {
t := typed["type"].(string)
v := typed["value"]
switch t {
case "string":
return v.(string)
case "integer":
v := v.(string)
n, err := strconv.Atoi(v)
if err != nil {
log.Fatalf("Could not parse '%s' as integer: %s", v, err)
}
return n
case "float":
v := v.(string)
f, err := strconv.ParseFloat(v, 64)
if err != nil {
log.Fatalf("Could not parse '%s' as float64: %s", v, err)
}
return f
case "datetime":
v := v.(string)
t, err := time.Parse("2006-01-02T15:04:05Z", v)
if err != nil {
log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
}
return t
case "bool":
v := v.(string)
switch v {
case "true":
return true
case "false":
return false
}
log.Fatalf("Could not parse '%s' as a boolean.", v)
case "array":
v := v.([]interface{})
array := make([]interface{}, len(v))
for i := range v {
if m, ok := v[i].(map[string]interface{}); ok {
array[i] = untag(m)
} else {
log.Fatalf("Arrays may only contain other arrays or "+
"primitive values, but found a '%T'.", m)
}
}
return array
}
log.Fatalf("Unrecognized tag type '%s'.", t)
panic("unreachable")
}
func in(key string, m map[string]interface{}) bool {
_, ok := m[key]
return ok
}

14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING generated vendored Normal file
View File

@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

21
vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md generated vendored Normal file
View File

@ -0,0 +1,21 @@
# TOML Validator
If Go is installed, it's simple to try it out:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
You can see the types of every key in a TOML file with:
```bash
tomlv -types some-toml-file.toml
```
At the moment, only one error message is reported at a time. Error messages
include line numbers. No output means that the files given are valid TOML, or
there is a bug in `tomlv`.
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)

61
vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Command tomlv validates TOML documents and prints each key's type.
package main
import (
"flag"
"fmt"
"log"
"os"
"path"
"strings"
"text/tabwriter"
"github.com/BurntSushi/toml"
)
var (
flagTypes = false
)
func init() {
log.SetFlags(0)
flag.BoolVar(&flagTypes, "types", flagTypes,
"When set, the types of every defined key will be shown.")
flag.Usage = usage
flag.Parse()
}
func usage() {
log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
path.Base(os.Args[0]))
flag.PrintDefaults()
os.Exit(1)
}
func main() {
if flag.NArg() < 1 {
flag.Usage()
}
for _, f := range flag.Args() {
var tmp interface{}
md, err := toml.DecodeFile(f, &tmp)
if err != nil {
log.Fatalf("Error in '%s': %s", f, err)
}
if flagTypes {
printTypes(md)
}
}
}
func printTypes(md toml.MetaData) {
tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
for _, key := range md.Keys() {
fmt.Fprintf(tabw, "%s%s\t%s\n",
strings.Repeat(" ", len(key)-1), key, md.Type(key...))
}
tabw.Flush()
}

509
vendor/github.com/BurntSushi/toml/decode.go generated vendored Normal file
View File

@ -0,0 +1,509 @@
package toml
import (
"fmt"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"time"
)
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
return err
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
copy(context, md.context)
rv.Set(reflect.ValueOf(Primitive{
undecoded: data,
context: context,
}))
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
case reflect.Ptr:
elem := reflect.New(rv.Type().Elem())
err := md.unify(data, reflect.Indirect(elem))
if err != nil {
return err
}
rv.Set(elem)
return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
return md.unifyMap(data, rv)
case reflect.Array:
return md.unifyArray(data, rv)
case reflect.Slice:
return md.unifySlice(data, rv)
case reflect.String:
return md.unifyString(data, rv)
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if mapping == nil {
return nil
}
return e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
}
for key, datum := range tmap {
var f *field
fields := cachedTypeFields(rv.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv := rv
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
}
}
}
return nil
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
return nil
}
return badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
}
rv.SetLen(n)
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
fallthrough
case reflect.Float64:
rv.SetFloat(num)
default:
panic("bug")
}
return nil
}
return badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("value %d is out of range for int8", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("value %d is out of range for int16", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("value %d is out of range for int32", num)
}
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("value %d is out of range for uint8", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("value %d is out of range for uint16", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("value %d is out of range for uint32", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
}
return nil
}
return badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
}
s = string(text)
case fmt.Stringer:
s = sdata.String()
case string:
s = sdata
case bool:
s = fmt.Sprintf("%v", sdata)
case int64:
s = fmt.Sprintf("%d", sdata)
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
}
return nil
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
}
}
return v
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirect(reflect.Indirect(v))
}
func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}

121
vendor/github.com/BurntSushi/toml/decode_meta.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}

1447
vendor/github.com/BurntSushi/toml/decode_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

27
vendor/github.com/BurntSushi/toml/doc.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml

568
vendor/github.com/BurntSushi/toml/encode.go generated vendored Normal file
View File

@ -0,0 +1,568 @@
package toml
import (
"bufio"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New(
"toml: cannot encode array with nil element")
errNonString = errors.New(
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
Indent: " ",
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err
}
return enc.w.Flush()
}
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
defer func() {
if r := recover(); r != nil {
if terr, ok := r.(tomlEncodeError); ok {
err = terr.error
return
}
panic(r)
}
}()
enc.encode(key, rv)
return nil
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
return
}
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
}
case reflect.Interface:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Map:
if rv.IsNil() {
return
}
enc.eTable(key, rv)
case reflect.Ptr:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
return
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String:
enc.writeQuoted(rv.String())
default:
panic(e("unexpected primitive type: %s", rv.Kind()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
}
return fstr
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
}
}
enc.wf("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
enc.eMapOrStruct(key, trv)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
enc.eMapOrStruct(key, rv)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
case reflect.Struct:
enc.eStruct(key, rv)
default:
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
}
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
continue
}
enc.encode(key.add(mapKey), mrv)
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexported fields
if f.PkgPath != "" && !f.Anonymous {
continue
}
frv := rv.Field(i)
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index)
continue
}
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct &&
getOptions(f.Tag).name == "" {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
continue
}
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
}
}
if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
continue
}
opts := getOptions(sft.Tag)
if opts.skip {
continue
}
keyName := sft.Name
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && isEmpty(sf) {
continue
}
if opts.omitzero && isZero(sf) {
continue
}
enc.encode(key.add(keyName), sf)
}
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64:
return tomlInteger
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
}
return tomlArray
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
}
type tagOptions struct {
skip bool // "-"
name string
omitempty bool
omitzero bool
}
func getOptions(tag reflect.StructTag) tagOptions {
t := tag.Get("toml")
if t == "-" {
return tagOptions{skip: true}
}
var opts tagOptions
parts := strings.Split(t, ",")
opts.name = parts[0]
for _, s := range parts[1:] {
switch s {
case "omitempty":
opts.omitempty = true
case "omitzero":
opts.omitzero = true
}
}
return opts
}
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return rv.Uint() == 0
case reflect.Float32, reflect.Float64:
return rv.Float() == 0.0
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Bool:
return !rv.Bool()
}
return false
}
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) indentStr(key Key) string {
return strings.Repeat(enc.Indent, len(key)-1)
}
func encPanic(err error) {
panic(tomlEncodeError{err})
}
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
return v
}
}
func isNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return rv.IsNil()
default:
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

615
vendor/github.com/BurntSushi/toml/encode_test.go generated vendored Normal file
View File

@ -0,0 +1,615 @@
package toml
import (
"bytes"
"fmt"
"log"
"net"
"testing"
"time"
)
func TestEncodeRoundTrip(t *testing.T) {
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time
Ipaddress net.IP
}
var inputs = Config{
13,
[]string{"one", "two", "three"},
3.145,
[]int{11, 2, 3, 4},
time.Now(),
net.ParseIP("192.168.59.254"),
}
var firstBuffer bytes.Buffer
e := NewEncoder(&firstBuffer)
err := e.Encode(inputs)
if err != nil {
t.Fatal(err)
}
var outputs Config
if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
t.Logf("Could not decode:\n-----\n%s\n-----\n",
firstBuffer.String())
t.Fatal(err)
}
// could test each value individually, but I'm lazy
var secondBuffer bytes.Buffer
e2 := NewEncoder(&secondBuffer)
err = e2.Encode(outputs)
if err != nil {
t.Fatal(err)
}
if firstBuffer.String() != secondBuffer.String() {
t.Error(
firstBuffer.String(),
"\n\n is not identical to\n\n",
secondBuffer.String())
}
}
// XXX(burntsushi)
// I think these tests probably should be removed. They are good, but they
// ought to be obsolete by toml-test.
func TestEncode(t *testing.T) {
type Embedded struct {
Int int `toml:"_int"`
}
type NonStruct int
date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
dateStr := "2014-05-11T19:30:40Z"
tests := map[string]struct {
input interface{}
wantOutput string
wantError error
}{
"bool field": {
input: struct {
BoolTrue bool
BoolFalse bool
}{true, false},
wantOutput: "BoolTrue = true\nBoolFalse = false\n",
},
"int fields": {
input: struct {
Int int
Int8 int8
Int16 int16
Int32 int32
Int64 int64
}{1, 2, 3, 4, 5},
wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
},
"uint fields": {
input: struct {
Uint uint
Uint8 uint8
Uint16 uint16
Uint32 uint32
Uint64 uint64
}{1, 2, 3, 4, 5},
wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
"\nUint64 = 5\n",
},
"float fields": {
input: struct {
Float32 float32
Float64 float64
}{1.5, 2.5},
wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
},
"string field": {
input: struct{ String string }{"foo"},
wantOutput: "String = \"foo\"\n",
},
"string field and unexported field": {
input: struct {
String string
unexported int
}{"foo", 0},
wantOutput: "String = \"foo\"\n",
},
"datetime field in UTC": {
input: struct{ Date time.Time }{date},
wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
},
"datetime field as primitive": {
// Using a map here to fail if isStructOrMap() returns true for
// time.Time.
input: map[string]interface{}{
"Date": date,
"Int": 1,
},
wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
},
"array fields": {
input: struct {
IntArray0 [0]int
IntArray3 [3]int
}{[0]int{}, [3]int{1, 2, 3}},
wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
},
"slice fields": {
input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
nil, []int{}, []int{1, 2, 3},
},
wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
},
"datetime slices": {
input: struct{ DatetimeSlice []time.Time }{
[]time.Time{date, date},
},
wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
dateStr, dateStr),
},
"nested arrays and slices": {
input: struct {
SliceOfArrays [][2]int
ArrayOfSlices [2][]int
SliceOfArraysOfSlices [][2][]int
ArrayOfSlicesOfArrays [2][][2]int
SliceOfMixedArrays [][2]interface{}
ArrayOfMixedSlices [2][]interface{}
}{
[][2]int{{1, 2}, {3, 4}},
[2][]int{{1, 2}, {3, 4}},
[][2][]int{
{
{1, 2}, {3, 4},
},
{
{5, 6}, {7, 8},
},
},
[2][][2]int{
{
{1, 2}, {3, 4},
},
{
{5, 6}, {7, 8},
},
},
[][2]interface{}{
{1, 2}, {"a", "b"},
},
[2][]interface{}{
{1, 2}, {"a", "b"},
},
},
wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
ArrayOfSlices = [[1, 2], [3, 4]]
SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
SliceOfMixedArrays = [[1, 2], ["a", "b"]]
ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
`,
},
"empty slice": {
input: struct{ Empty []interface{} }{[]interface{}{}},
wantOutput: "Empty = []\n",
},
"(error) slice with element type mismatch (string and integer)": {
input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
wantError: errArrayMixedElementTypes,
},
"(error) slice with element type mismatch (integer and float)": {
input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
wantError: errArrayMixedElementTypes,
},
"slice with elems of differing Go types, same TOML types": {
input: struct {
MixedInts []interface{}
MixedFloats []interface{}
}{
[]interface{}{
int(1), int8(2), int16(3), int32(4), int64(5),
uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
},
[]interface{}{float32(1.5), float64(2.5)},
},
wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
"MixedFloats = [1.5, 2.5]\n",
},
"(error) slice w/ element type mismatch (one is nested array)": {
input: struct{ Mixed []interface{} }{
[]interface{}{1, []interface{}{2}},
},
wantError: errArrayMixedElementTypes,
},
"(error) slice with 1 nil element": {
input: struct{ NilElement1 []interface{} }{[]interface{}{nil}},
wantError: errArrayNilElement,
},
"(error) slice with 1 nil element (and other non-nil elements)": {
input: struct{ NilElement []interface{} }{
[]interface{}{1, nil},
},
wantError: errArrayNilElement,
},
"simple map": {
input: map[string]int{"a": 1, "b": 2},
wantOutput: "a = 1\nb = 2\n",
},
"map with interface{} value type": {
input: map[string]interface{}{"a": 1, "b": "c"},
wantOutput: "a = 1\nb = \"c\"\n",
},
"map with interface{} value type, some of which are structs": {
input: map[string]interface{}{
"a": struct{ Int int }{2},
"b": 1,
},
wantOutput: "b = 1\n\n[a]\n Int = 2\n",
},
"nested map": {
input: map[string]map[string]int{
"a": {"b": 1},
"c": {"d": 2},
},
wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n",
},
"nested struct": {
input: struct{ Struct struct{ Int int } }{
struct{ Int int }{1},
},
wantOutput: "[Struct]\n Int = 1\n",
},
"nested struct and non-struct field": {
input: struct {
Struct struct{ Int int }
Bool bool
}{struct{ Int int }{1}, true},
wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n",
},
"2 nested structs": {
input: struct{ Struct1, Struct2 struct{ Int int } }{
struct{ Int int }{1}, struct{ Int int }{2},
},
wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n",
},
"deeply nested structs": {
input: struct {
Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
}{
struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
struct{ Struct3 *struct{ Int int } }{nil},
},
wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" +
"\n\n[Struct2]\n",
},
"nested struct with nil struct elem": {
input: struct {
Struct struct{ Inner *struct{ Int int } }
}{
struct{ Inner *struct{ Int int } }{nil},
},
wantOutput: "[Struct]\n",
},
"nested struct with no fields": {
input: struct {
Struct struct{ Inner struct{} }
}{
struct{ Inner struct{} }{struct{}{}},
},
wantOutput: "[Struct]\n [Struct.Inner]\n",
},
"struct with tags": {
input: struct {
Struct struct {
Int int `toml:"_int"`
} `toml:"_struct"`
Bool bool `toml:"_bool"`
}{
struct {
Int int `toml:"_int"`
}{1}, true,
},
wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n",
},
"embedded struct": {
input: struct{ Embedded }{Embedded{1}},
wantOutput: "_int = 1\n",
},
"embedded *struct": {
input: struct{ *Embedded }{&Embedded{1}},
wantOutput: "_int = 1\n",
},
"nested embedded struct": {
input: struct {
Struct struct{ Embedded } `toml:"_struct"`
}{struct{ Embedded }{Embedded{1}}},
wantOutput: "[_struct]\n _int = 1\n",
},
"nested embedded *struct": {
input: struct {
Struct struct{ *Embedded } `toml:"_struct"`
}{struct{ *Embedded }{&Embedded{1}}},
wantOutput: "[_struct]\n _int = 1\n",
},
"embedded non-struct": {
input: struct{ NonStruct }{5},
wantOutput: "NonStruct = 5\n",
},
"array of tables": {
input: struct {
Structs []*struct{ Int int } `toml:"struct"`
}{
[]*struct{ Int int }{{1}, {3}},
},
wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n",
},
"array of tables order": {
input: map[string]interface{}{
"map": map[string]interface{}{
"zero": 5,
"arr": []map[string]int{
{
"friend": 5,
},
},
},
},
wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n",
},
"(error) top-level slice": {
input: []struct{ Int int }{{1}, {2}, {3}},
wantError: errNoKey,
},
"(error) slice of slice": {
input: struct {
Slices [][]struct{ Int int }
}{
[][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
},
wantError: errArrayNoTable,
},
"(error) map no string key": {
input: map[int]string{1: ""},
wantError: errNonString,
},
"(error) empty key name": {
input: map[string]int{"": 1},
wantError: errAnything,
},
"(error) empty map name": {
input: map[string]interface{}{
"": map[string]int{"v": 1},
},
wantError: errAnything,
},
}
for label, test := range tests {
encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
}
}
func TestEncodeNestedTableArrays(t *testing.T) {
type song struct {
Name string `toml:"name"`
}
type album struct {
Name string `toml:"name"`
Songs []song `toml:"songs"`
}
type springsteen struct {
Albums []album `toml:"albums"`
}
value := springsteen{
[]album{
{"Born to Run",
[]song{{"Jungleland"}, {"Meeting Across the River"}}},
{"Born in the USA",
[]song{{"Glory Days"}, {"Dancing in the Dark"}}},
},
}
expected := `[[albums]]
name = "Born to Run"
[[albums.songs]]
name = "Jungleland"
[[albums.songs]]
name = "Meeting Across the River"
[[albums]]
name = "Born in the USA"
[[albums.songs]]
name = "Glory Days"
[[albums.songs]]
name = "Dancing in the Dark"
`
encodeExpected(t, "nested table arrays", value, expected, nil)
}
func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
type Alpha struct {
V int
}
type Beta struct {
V int
}
type Conf struct {
V int
A Alpha
B []Beta
}
val := Conf{
V: 1,
A: Alpha{2},
B: []Beta{{3}},
}
expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n"
encodeExpected(t, "array hash with normal hash order", val, expected, nil)
}
func TestEncodeWithOmitEmpty(t *testing.T) {
type simple struct {
Bool bool `toml:"bool,omitempty"`
String string `toml:"string,omitempty"`
Array [0]byte `toml:"array,omitempty"`
Slice []int `toml:"slice,omitempty"`
Map map[string]string `toml:"map,omitempty"`
}
var v simple
encodeExpected(t, "fields with omitempty are omitted when empty", v, "", nil)
v = simple{
Bool: true,
String: " ",
Slice: []int{2, 3, 4},
Map: map[string]string{"foo": "bar"},
}
expected := `bool = true
string = " "
slice = [2, 3, 4]
[map]
foo = "bar"
`
encodeExpected(t, "fields with omitempty are not omitted when non-empty",
v, expected, nil)
}
func TestEncodeWithOmitZero(t *testing.T) {
type simple struct {
Number int `toml:"number,omitzero"`
Real float64 `toml:"real,omitzero"`
Unsigned uint `toml:"unsigned,omitzero"`
}
value := simple{0, 0.0, uint(0)}
expected := ""
encodeExpected(t, "simple with omitzero, all zero", value, expected, nil)
value.Number = 10
value.Real = 20
value.Unsigned = 5
expected = `number = 10
real = 20.0
unsigned = 5
`
encodeExpected(t, "simple with omitzero, non-zero", value, expected, nil)
}
func TestEncodeOmitemptyWithEmptyName(t *testing.T) {
type simple struct {
S []int `toml:",omitempty"`
}
v := simple{[]int{1, 2, 3}}
expected := "S = [1, 2, 3]\n"
encodeExpected(t, "simple with omitempty, no name, non-empty field",
v, expected, nil)
}
func TestEncodeAnonymousStruct(t *testing.T) {
type Inner struct{ N int }
type Outer0 struct{ Inner }
type Outer1 struct {
Inner `toml:"inner"`
}
v0 := Outer0{Inner{3}}
expected := "N = 3\n"
encodeExpected(t, "embedded anonymous untagged struct", v0, expected, nil)
v1 := Outer1{Inner{3}}
expected = "[inner]\n N = 3\n"
encodeExpected(t, "embedded anonymous tagged struct", v1, expected, nil)
}
func TestEncodeAnonymousStructPointerField(t *testing.T) {
type Inner struct{ N int }
type Outer0 struct{ *Inner }
type Outer1 struct {
*Inner `toml:"inner"`
}
v0 := Outer0{}
expected := ""
encodeExpected(t, "nil anonymous untagged struct pointer field", v0, expected, nil)
v0 = Outer0{&Inner{3}}
expected = "N = 3\n"
encodeExpected(t, "non-nil anonymous untagged struct pointer field", v0, expected, nil)
v1 := Outer1{}
expected = ""
encodeExpected(t, "nil anonymous tagged struct pointer field", v1, expected, nil)
v1 = Outer1{&Inner{3}}
expected = "[inner]\n N = 3\n"
encodeExpected(t, "non-nil anonymous tagged struct pointer field", v1, expected, nil)
}
func TestEncodeIgnoredFields(t *testing.T) {
type simple struct {
Number int `toml:"-"`
}
value := simple{}
expected := ""
encodeExpected(t, "ignored field", value, expected, nil)
}
func encodeExpected(
t *testing.T, label string, val interface{}, wantStr string, wantErr error,
) {
var buf bytes.Buffer
enc := NewEncoder(&buf)
err := enc.Encode(val)
if err != wantErr {
if wantErr != nil {
if wantErr == errAnything && err != nil {
return
}
t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
} else {
t.Errorf("%s: Encode failed: %s", label, err)
}
}
if err != nil {
return
}
if got := buf.String(); wantStr != got {
t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
label, wantStr, got)
}
}
func ExampleEncoder_Encode() {
date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
var config = map[string]interface{}{
"date": date,
"counts": []int{1, 1, 2, 3, 5, 8},
"hash": map[string]string{
"key1": "val1",
"key2": "val2",
},
}
buf := new(bytes.Buffer)
if err := NewEncoder(buf).Encode(config); err != nil {
log.Fatal(err)
}
fmt.Println(buf.String())
// Output:
// counts = [1, 1, 2, 3, 5, 8]
// date = 2010-03-14T18:00:00Z
//
// [hash]
// key1 = "val1"
// key2 = "val2"
}

19
vendor/github.com/BurntSushi/toml/encoding_types.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View File

@ -0,0 +1,18 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

953
vendor/github.com/BurntSushi/toml/lex.go generated vendored Normal file
View File

@ -0,0 +1,953 @@
package toml
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
itemRawString
itemMultilineString
itemRawMultilineString
itemBool
itemInteger
itemFloat
itemDatetime
itemArray // the start of an array
itemArrayEnd
itemTableStart
itemTableEnd
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
const (
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
type lexer struct {
input string
start int
pos int
line int
state stateFn
items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
}
func (lx *lexer) nextItem() item {
for {
select {
case item := <-lx.items:
return item
default:
lx.state = lx.state(lx)
}
}
}
func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
}
return lx
}
func (lx *lexer) push(state stateFn) {
lx.stack = append(lx.stack, state)
}
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
return last
}
func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r
}
// ignore skips over the pending input before this point.
func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
}
// accept consumes the next rune if it's equal to `valid`.
func (lx *lexer) accept(valid rune) bool {
if lx.next() == valid {
return true
}
lx.backup()
return false
}
// peek returns but does not consume the next rune in the input.
func (lx *lexer) peek() rune {
r := lx.next()
lx.backup()
return r
}
// skip ignores all input that matches the given predicate.
func (lx *lexer) skip(pred func(rune) bool) {
for {
r := lx.next()
if pred(r) {
continue
}
lx.backup()
lx.ignore()
return
}
}
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
}
return nil
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
return lexCommentStart
case tableStart:
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
}
// At this point, the only valid item can be a key, so we back up
// and let the key lexer do the rest.
lx.backup()
lx.push(lexTopEnd)
return lexKeyStart
}
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
return lexTopEnd
case isNL(r):
lx.ignore()
return lexTop
case r == eof:
lx.emit(itemEOF)
return nil
}
return lx.errorf("expected a top-level item to end with a newline, "+
"comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
// it starts with a character other than '.' and ']'.
// It assumes that '[' has already been consumed.
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
} else {
lx.emit(itemTableStart)
lx.push(lexTableEnd)
}
return lexTableNameStart
}
func lexTableEnd(lx *lexer) stateFn {
lx.emit(itemTableEnd)
return lexTopEnd
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("unexpected end of table name " +
"(table names cannot be empty)")
case r == tableSep:
return lx.errorf("unexpected table separator " +
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
default:
return lexBareTableName
}
}
// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareTableName
}
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
lx.ignore()
return lexTableNameStart
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("expected '.' or ']' to end table name, "+
"but got %q instead", r)
}
}
// lexKeyStart consumes a key name up until the first non-whitespace character.
// lexKeyStart will ignore whitespace.
func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.emit(itemKeyStart)
lx.push(lexKeyEnd)
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
}
}
// lexBareKey consumes the text of a bare key. Assumes that the first character
// (which is not whitespace) has not yet been consumed.
func lexBareKey(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
lx.backup()
lx.emit(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
lx.emit(itemText)
return lexKeyEnd
default:
return lx.errorf("bare keys cannot contain %q", r)
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
// lexValue starts the consumption of a value anywhere a value is expected.
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
}
switch r {
case arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
return lexMultilineString
}
lx.backup()
}
lx.ignore() // ignore the '"'
return lexString
case rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
return lexMultilineRawString
}
lx.backup()
}
lx.ignore() // ignore the "'"
return lexRawString
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
// user wrote something like
// x = foo
// (i.e. not 'true' or 'false' but is something else word-like.)
lx.backup()
return lexBool
}
return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
lx.backup()
lx.push(lexArrayValueEnd)
return lexValue
}
// lexArrayValueEnd consumes everything between the end of an array value and
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf(
"expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
}
// lexArrayEnd finishes the lexing of an array.
// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
lx.backup()
lx.emit(itemString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexString
}
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape
case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
// It assumes that the beginning "'" has already been consumed and ignored.
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
return lexMultilineString
}
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
}
func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'b':
fallthrough
case 't':
fallthrough
case 'n':
fallthrough
case 'f':
fallthrough
case 'r':
fallthrough
case '"':
fallthrough
case '\\':
return lx.pop()
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '_':
return lexNumber
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
// lexNumberOrDate consumes either an integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '-':
return lexDatetime
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDatetime consumes a Datetime, to a first approximation.
// The parser validates that it matches one of the accepted formats.
func lexDatetime(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexDatetime
}
switch r {
case '-', 'T', ':', '.', 'Z':
return lexDatetime
}
lx.backup()
lx.emit(itemDatetime)
return lx.pop()
}
// lexNumberStart consumes either an integer or a float. It assumes that a sign
// has already been read, but that *no* digits have been consumed.
// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
// We MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumber
}
switch r {
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexFloat consumes the elements of a float. It allows any sequence of
// float-like characters, so floats emitted by the lexer are only a first
// approximation and must be validated by the parser.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
switch r {
case '_', '.', '-', '+', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
// lexBool consumes a bool string: 'true' or 'false.
func lexBool(lx *lexer) stateFn {
var rs []rune
for {
r := lx.next()
if !unicode.IsLetter(r) {
lx.backup()
break
}
rs = append(rs, r)
}
s := string(rs)
switch s {
case "true", "false":
lx.emit(itemBool)
return lx.pop()
}
return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
// itemCommentStart and consume no characters, passing control to lexComment.
func lexCommentStart(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemCommentStart)
return lexComment
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
if isNL(r) || r == eof {
lx.emit(itemText)
return lx.pop()
}
lx.next()
return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn {
lx.ignore()
return nextState
}
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
return "Text"
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
case itemInteger:
return "Integer"
case itemFloat:
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}

592
vendor/github.com/BurntSushi/toml/parse.go generated vendored Normal file
View File

@ -0,0 +1,592 @@
package toml
import (
"fmt"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
// A list of keys in the order that they appear in the TOML data.
ordered []Key
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
return
}
panic(r)
}
}()
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
}
for {
item := p.next()
if item.typ == itemEOF {
break
}
p.topLevel(item)
}
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
}
func (p *parser) next() item {
it := p.lx.nextItem()
if it.typ == itemError {
p.panicf("%s", it.val)
}
return it
}
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
func (p *parser) expect(typ itemType) item {
it := p.next()
p.assertEqual(typ, it.typ)
return it
}
func (p *parser) assertEqual(expected, got itemType) {
if expected != got {
p.bug("Expected '%s' but got '%s'.", expected, got)
}
}
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemTableEnd, kg.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
}
}
// Gets a string for a key (or part of a key in a table name).
func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
}
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
var t time.Time
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
accept = false
continue
}
accept = true
}
return accept
}
// numPeriodsOK checks whether every period in s is followed by a digit.
func numPeriodsOK(s string) bool {
period := false
for _, r := range s {
if period && !isDigit(r) {
return false
}
period = r == '.'
}
return !period
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
hashContext := p.mapping
keyContext := make(Key, 0)
// We only need implicit hashes for key[0:-1]
for _, k := range key[0 : len(key)-1] {
_, ok = hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
hashContext[k] = make(map[string]interface{})
}
// If the hash context is actually an array of tables, then set
// the hash context to the last element in that array.
//
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
case []map[string]interface{}:
hashContext = t[len(t)-1]
case map[string]interface{}:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
}
}
p.context = keyContext
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
}
p.context = append(p.context, key[len(key)-1])
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
case []map[string]interface{}:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
// Otherwise, we have a concrete key trying to override a previous
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
}
// current returns the full key name of the current context.
func (p *parser) current() string {
if len(p.currentKey) == 0 {
return p.context.String()
}
if len(p.context) == 0 {
return p.currentKey
}
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
}
}
return strings.Join(esc, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
s := []byte(str)
r := 0
for r < len(s) {
if s[r] != '\\' {
c, size := utf8.DecodeRune(s[r:])
r += size
replaced = append(replaced, c)
continue
}
r += 1
if r >= len(s) {
p.bug("Escape sequence at end of string.")
return ""
}
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
case 't':
replaced = append(replaced, rune(0x0009))
r += 1
case 'n':
replaced = append(replaced, rune(0x000A))
r += 1
case 'f':
replaced = append(replaced, rune(0x000C))
r += 1
case 'r':
replaced = append(replaced, rune(0x000D))
r += 1
case '"':
replaced = append(replaced, rune(0x0022))
r += 1
case '\\':
replaced = append(replaced, rune(0x005C))
r += 1
case 'u':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
replaced = append(replaced, escaped)
r += 9
}
}
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

1
vendor/github.com/BurntSushi/toml/session.vim generated vendored Normal file
View File

@ -0,0 +1 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

91
vendor/github.com/BurntSushi/toml/type_check.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

242
vendor/github.com/BurntSushi/toml/type_fields.go generated vendored Normal file
View File

@ -0,0 +1,242 @@
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
opts := getOptions(sf.Tag)
if opts.skip {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := opts.name != ""
name := opts.name
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}

16
vendor/github.com/aristanetworks/goarista/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
language: go
go:
- 1.9
- tip
before_install:
- go get -v github.com/golang/lint/golint
- go get -v -t -d ./...
after_success:
- make coverdata
- bash <(curl -s https://codecov.io/bash)
script:
- make -j4 check GOTEST_FLAGS=-v
notifications:
slack:
secure: MO/3LqbyALbi9vAY3pZetp/LfRuKEPAYEUya7XKmTWA3OFHYkTGqJWNosVkFJd6eSKwnc3HP4jlKADEBNVxADHzcA3uMPUQi1mIcNk/Ps1WWMNDv1liE2XOoOmHSHZ/8ksk6TNq83x+d17ZffYq8KAH6iKNKvllO1JzQPgJJdf+cNXQQlg6uPSe+ggMpjqVLkKcHqA4L3/BWo6fNcyvkqaN3uXcEzYPi7Nb2q9tl0ja6ToyZV4H6SinwitZmpedN3RkBcm4fKmGyw5ikzH93ycA5SvWrnXTh1dJvq6DU0FV7iwI6oqPTbAUc3FE5g7aEkK0qVR21s2j+KNaOLnuX10ZGQFwj2r3SW2REHq4j+qqFla/2EmSFZJt3GXYS+plmGCxqCgyjSw6tTi7LaGZ/mWBJEA9/EaXG1NkwlQYx5tdUMeGj77OczjXClynpb2hJ7MM2b32Rnp0JmNaXAh01SmClo+8nDWuksAsIdPtWsbF0/XHmEJiqpu8ojvVXOQIbPt43bjG7PS1t5jaRAU/N1n56SiCGgCSGd3Ui5eX5vmgWdpZMl8NG05G4LFsgmkdphRT5fru0C2PrhNZYRDGWs63XKapBxsvfqGzdHxTtYuaDjHjrI+9w0BC/8kEzSWoPmabQ5ci4wf4DeplcIay4tDMgMSo8pGAf52vrne4rmUo=
on_success: change

25
vendor/github.com/aristanetworks/goarista/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,25 @@
All contributors are required to sign a "Contributor License Agreement" at
<TBD>
The following organizations and people have contributed code to this library.
(Please keep both lists sorted alphabetically.)
Arista Networks, Inc.
Benoit Sigoure
Fabrice Rabaute
The list of individual contributors for code currently in HEAD can be obtained
at any time with the following script:
find . -type f \
| while read i; do \
git blame -t $i 2>/dev/null; \
done \
| sed 's/^[0-9a-f]\{8\} [^(]*(\([^)]*\) [-+0-9 ]\{14,\}).*/\1/;s/ *$//' \
| awk '{a[$0]++; t++} END{for(n in a) print n}' \
| sort

177
vendor/github.com/aristanetworks/goarista/COPYING generated vendored Normal file
View File

@ -0,0 +1,177 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

14
vendor/github.com/aristanetworks/goarista/Dockerfile generated vendored Normal file
View File

@ -0,0 +1,14 @@
# Copyright (c) 2016 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
# TODO: move this to cmd/ockafka (https://github.com/docker/hub-feedback/issues/292)
FROM golang:1.7.3
RUN mkdir -p /go/src/github.com/aristanetworks/goarista/cmd
WORKDIR /go/src/github.com/aristanetworks/goarista
COPY ./ .
RUN go get -d ./cmd/ockafka/... \
&& go install ./cmd/ockafka
ENTRYPOINT ["/go/bin/ockafka"]

59
vendor/github.com/aristanetworks/goarista/Makefile generated vendored Normal file
View File

@ -0,0 +1,59 @@
# Copyright (c) 2015 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
GO := go
TEST_TIMEOUT := 30s
GOTEST_FLAGS :=
DEFAULT_GOPATH := $${GOPATH%%:*}
GOPATH_BIN := $(DEFAULT_GOPATH)/bin
GOPATH_PKG := $(DEFAULT_GOPATH)/pkg
GOLINT := $(GOPATH_BIN)/golint
GOFOLDERS := find . -type d ! -path "./.git/*"
all: install
install:
$(GO) install ./...
check: vet test fmtcheck lint
COVER_PKGS := key test
COVER_MODE := count
coverdata:
echo 'mode: $(COVER_MODE)' >coverage.out
for dir in $(COVER_PKGS); do \
$(GO) test -covermode=$(COVER_MODE) -coverprofile=cov.out-t ./$$dir || exit; \
tail -n +2 cov.out-t >> coverage.out && \
rm cov.out-t; \
done;
coverage: coverdata
$(GO) tool cover -html=coverage.out
rm -f coverage.out
fmtcheck:
errors=`gofmt -l .`; if test -n "$$errors"; then echo Check these files for style errors:; echo "$$errors"; exit 1; fi
find . -name '*.go' ! -name '*.pb.go' -exec ./check_line_len.awk {} +
./check_copyright_notice.sh
vet:
$(GO) vet ./...
lint:
lint=`$(GOFOLDERS) | xargs -L 1 $(GOLINT) | fgrep -v .pb.go`; if test -n "$$lint"; then echo "$$lint"; exit 1; fi
# The above is ugly, but unfortunately golint doesn't exit 1 when it finds
# lint. See https://github.com/golang/lint/issues/65
test:
$(GO) test $(GOTEST_FLAGS) -timeout=$(TEST_TIMEOUT) ./...
docker:
docker build -f cmd/occlient/Dockerfile .
clean:
rm -rf $(GOPATH_PKG)/*/github.com/aristanetworks/goarista
$(GO) clean ./...
.PHONY: all check coverage coverdata docker fmtcheck install lint test vet

68
vendor/github.com/aristanetworks/goarista/README.md generated vendored Normal file
View File

@ -0,0 +1,68 @@
# Arista Go library [![Build Status](https://travis-ci.org/aristanetworks/goarista.svg?branch=master)](https://travis-ci.org/aristanetworks/goarista) [![codecov.io](http://codecov.io/github/aristanetworks/goarista/coverage.svg?branch=master)](http://codecov.io/github/aristanetworks/goarista?branch=master) [![GoDoc](https://godoc.org/github.com/aristanetworks/goarista?status.png)](https://godoc.org/github.com/aristanetworks/goarista) [![Go Report Card](https://goreportcard.com/badge/github.com/aristanetworks/goarista)](https://goreportcard.com/report/github.com/aristanetworks/goarista)
## areflect
Helper functions to work with the `reflect` package. Contains
`ForceExport()`, which bypasses the check in `reflect.Value` that
prevents accessing unexported attributes.
## monotime
Provides access to a fast monotonic clock source, to fill in the gap in the
[Go standard library, which lacks one](https://github.com/golang/go/issues/12914).
Don't use `time.Now()` in code that needs to time things or otherwise assume
that time passes at a constant rate, instead use `monotime.Now()`.
## cmd
See the [cmd](cmd) directory.
## dscp
Provides `ListenTCPWithTOS()`, which is a replacement for `net.ListenTCP()`
that allows specifying the ToS (Type of Service), to specify DSCP / ECN /
class of service flags to use for incoming connections. Requires `go1.9`.
## key
Provides a common type used across various Arista projects, named `key.Key`,
which is used to work around the fact that Go can't let one
use a non-hashable type as a key to a `map`, and we sometimes need to use
a `map[string]interface{}` (or something containing one) as a key to maps.
As a result, we frequently use `map[key.Key]interface{}` instead of just
`map[interface{}]interface{}` when we need a generic key-value collection.
## path
Provides a common type used across various Arista projects, named `path.Path`,
which is the representation of a path broken down into individual elements.
Each element is a `key.Key`. The type `path.Map` may be used for mapping paths
to values. It allows for some fuzzy matching.
## lanz
A client for [LANZ](https://eos.arista.com/latency-analyzer-lanz-architectures-and-configuration/)
streaming servers. It connects to a LANZ streaming server,
listens for notifications, decodes them and sends the LANZ protobuf on the
provided channel.
## monitor
A library to help expose monitoring metrics on top of the
[`expvar`](https://golang.org/pkg/expvar/) infrastructure.
## netns
`netns.Do(namespace, cb)` provides a handy mechanism to execute the given
callback `cb` in the given [network namespace](https://lwn.net/Articles/580893/).
## pathmap
DEPRECATED; use`path.Map` instead.
## test
This is a [Go](http://golang.org/) library to help in writing unit tests.
## Examples
TBD

View File

@ -0,0 +1,38 @@
// Copyright (c) 2014 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// Package areflect provides utilities to help with reflection.
package areflect
import (
"reflect"
"unsafe"
)
// ForceExport returns a new reflect.Value that is identical to the one passed
// in argument except that it's considered as an exported symbol even if in
// reality it isn't.
//
// The `reflect' package intentionally makes it impossible to access the value
// of an unexported attribute. The implementation of reflect.DeepEqual() cheats
// as it bypasses this check. Unfortunately, we can't use the same cheat, which
// prevents us from re-implementing DeepEqual properly or implementing some other
// reflection-based tools. So this is our cheat on top of theirs. It makes
// the given reflect.Value appear as if it was exported.
//
// This function requires go1.6 or newer.
func ForceExport(v reflect.Value) reflect.Value {
// constants from reflect/value.go
const flagStickyRO uintptr = 1 << 5
const flagEmbedRO uintptr = 1 << 6 // new in go1.6 (was flagIndir before)
const flagRO uintptr = flagStickyRO | flagEmbedRO
ptr := unsafe.Pointer(&v)
rv := (*struct {
typ unsafe.Pointer // a *reflect.rtype (reflect.Type)
ptr unsafe.Pointer // The value wrapped by this reflect.Value
flag uintptr
})(ptr)
rv.flag &= ^flagRO // Unset the flag so this value appears to be exported.
return v
}

View File

@ -0,0 +1,36 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package areflect
import (
"reflect"
"testing"
)
type embedme struct {
}
type somestruct struct {
a uint32
embedme
}
func TestForcePublic(t *testing.T) {
c := somestruct{a: 42}
v := reflect.ValueOf(c)
// Without the call to forceExport(), the following line would crash with
// "panic: reflect.Value.Interface: cannot return value obtained from
// unexported field or method".
a := ForceExport(v.FieldByName("a")).Interface()
if i, ok := a.(uint32); !ok {
t.Fatalf("Should have gotten a uint32 but got a %T", a)
} else if i != 42 {
t.Fatalf("Should have gotten 42 but got a %d", i)
}
e := ForceExport(v.FieldByName("embedme")).Interface()
if _, ok := e.(embedme); !ok {
t.Fatalf("Should have gotten a embedme but got a %T", e)
}
}

View File

@ -0,0 +1,19 @@
#!/bin/sh
# Copyright (c) 2017 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
# egrep that comes with our Linux distro doesn't like \d, so use [0-9]
notice='Copyright \(c\) 20[0-9][0-9] Arista Networks, Inc.'
files=`git diff-tree --no-commit-id --name-only --diff-filter=ACMR -r HEAD | \
egrep '\.(go|proto|py|sh)$' | grep -v '\.pb\.go$'`
status=0
for file in $files; do
if ! egrep -q "$notice" $file; then
echo "$file: missing or incorrect copyright notice"
status=1
fi
done
exit $status

25
vendor/github.com/aristanetworks/goarista/check_line_len.awk generated vendored Executable file
View File

@ -0,0 +1,25 @@
#!/usr/bin/awk -f
# Copyright (c) 2015 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
BEGIN {
max = 100;
}
# Expand tabs to 4 spaces.
{
gsub(/\t/, " ");
}
length() > max {
errors++;
print FILENAME ":" FNR ": Line too long (" length() "/" max ")";
}
END {
if (errors >= 125) {
errors = 125;
}
exit errors;
}

View File

@ -0,0 +1,16 @@
# OpenConfig clients
The `oc*` commands are clients for the [OpenConfig](http://openconfig.net) gRPC interface.
# importsort
`importsort` is a utility for sorting and sectioning import blocks in go code.
# Running
After installing [Go](https://golang.org/dl/) and setting the [GOPATH](https://golang.org/doc/code.html#GOPATH) environment variable to the path to your workspace, you can just run:
```
go get github.com/aristanetworks/goarista/cmd/<cmd>
$GOPATH/bin/<cmd>
```

View File

@ -0,0 +1,149 @@
# gnmi
`gnmi` is a command-line client for interacting with a
[gNMI service](https://github.com/openconfig/reference/tree/master/rpc/gnmi).
# Installation
After installing [Go](https://golang.org/dl/) run:
```
go get github.com/aristanetworks/goarista/cmd/gnmi
```
This will install the `gnmi` binary in the `bin` directory
under [GOPATH](https://golang.org/doc/code.html#GOPATH).
# Usage
## Options
* `-addr ADDR:PORT`
Address of the gNMI endpoint (REQUIRED)
* `-username USERNAME`
Username to authenticate with
* `-password PASSWORD`
Password to authenticate with
* `-tls`
Enable TLS
* `-cafile PATH`
Path to server TLS certificate file
* `-certfile PATH`
Path to client TLS certificate file
* `-keyfile PATH`
Path to client TLS private key file
## Operations
`gnmi` supports the following operations: `capabilites`, `get`,
`subscribe`, `update`, `replace`, and `delete`.
### capabilities
`capabilities` prints the result of calling the
[Capabilities gNMI RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#32-capability-discovery).
Example:
```
$ gnmi [OPTIONS] capabilities
```
### get
`get` requires a path and calls the
[Get gNMI RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths).
Example:
Get all configuration in the default network instance:
```
$ gnmi [OPTIONS] get '/network-instances/network-instance[name=default]'
```
### subscribe
`subscribe` requires a path and calls the
[Subscribe gNMI RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35-subscribing-to-telemetry-updates).
This command will continuously print out results until signalled to
exit, for example by typing `Ctrl-C`.
Example:
Subscribe to interface counters:
```
$ gnmi [OPTIONS] subscribe '/interfaces/interface[name=*]/state/counters'
```
### update/replace/delete
`update`, `replace`, and `delete` are used to
[modify the configuration of a gNMI endpoint](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#34-modifying-state).
All of these operations take a path that must specify a single node
element. In other words all list members must be fully-specified.
`delete` takes a path and will delete that path.
Example:
Delete BGP configuration in the default network instance:
```
$ gnmi [OPTIONS] delete '/network-instances/network-instance[name=default]/protocols/protocol[name=BGP][identifier=BGP]/'
```
`update` and `replace` both take a path and a value in JSON
format. See
[here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#344-modes-of-update-replace-versus-update)
for documentation on the differences between `update` and `replace`.
Examples:
Disable interface Ethernet3/42:
```
gnmi [OPTIONS] update '/interfaces/interface[name=Ethernet3/42]/config/enabled' 'false'
```
Replace the BGP global configuration:
```
gnmi [OPTIONS] replace '/network-instances/network-instance[name=default]/protocols/protocol[name=BGP][identifier=BGP]/bgp/global' '{"config":{"as": 1234, "router-id": "1.2.3.4"}}'
```
Note: String values must be quoted. For example, setting the hostname to `"tor13"`:
```
gnmi [OPTIONS] update '/system/config/hostname' '"tor13"'
```
### CLI requests
`gnmi` offers the ability to send CLI text inside an `update` or
`replace` operation. This is achieved by doing an `update` or
`replace` and using `"cli"` as the path and a set of configure-mode
CLI commands separated by `\n`.
Example:
Configure the idle-timeout on SSH connections
```
gnmi [OPTIONS] update 'cli' 'management ssh
idle-timeout 300'
```
## Paths
Paths in `gnmi` use a simplified xpath style. Path elements are
separated by `/`. Selectors may be used on list to select certain
members. Selectors are of the form `[key-leaf=value]`. All members of a
list may be selected by not specifying any selectors, or by using a
`*` as the value in a selector. The following are equivalent:
* `/interfaces/interface`
* `/interfaces/interface[name=*]`
All characters, including `/` are allowed inside a selector value. The
character `]` must be escaped, for example `[key=[\]]` selects the
element in the list whose `key` leaf is value `[]`.
See more examples of paths in the examples above.
See
[here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths)
for more information.

View File

@ -0,0 +1,128 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"context"
"flag"
"fmt"
"os"
"github.com/aristanetworks/goarista/gnmi"
"github.com/aristanetworks/glog"
pb "github.com/openconfig/gnmi/proto/gnmi"
)
// TODO: Make this more clear
var help = `Usage of gnmi:
gnmi -addr ADDRESS:PORT [options...]
capabilities
get PATH+
subscribe PATH+
((update|replace PATH JSON)|(delete PATH))+
`
func exitWithError(s string) {
flag.Usage()
fmt.Fprintln(os.Stderr, s)
os.Exit(1)
}
func main() {
cfg := &gnmi.Config{}
flag.StringVar(&cfg.Addr, "addr", "", "Address of gNMI gRPC server")
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
flag.Usage = func() {
fmt.Fprintln(os.Stderr, help)
flag.PrintDefaults()
}
flag.Parse()
if cfg.Addr == "" {
exitWithError("error: address not specified")
}
args := flag.Args()
ctx := gnmi.NewContext(context.Background(), cfg)
client := gnmi.Dial(cfg)
var setOps []*gnmi.Operation
for i := 0; i < len(args); i++ {
switch args[i] {
case "capabilities":
if len(setOps) != 0 {
exitWithError("error: 'capabilities' not allowed after 'merge|replace|delete'")
}
err := gnmi.Capabilities(ctx, client)
if err != nil {
glog.Fatal(err)
}
return
case "get":
if len(setOps) != 0 {
exitWithError("error: 'get' not allowed after 'merge|replace|delete'")
}
err := gnmi.Get(ctx, client, gnmi.SplitPaths(args[i+1:]))
if err != nil {
glog.Fatal(err)
}
return
case "subscribe":
if len(setOps) != 0 {
exitWithError("error: 'subscribe' not allowed after 'merge|replace|delete'")
}
respChan := make(chan *pb.SubscribeResponse)
errChan := make(chan error)
defer close(respChan)
defer close(errChan)
go gnmi.Subscribe(ctx, client, gnmi.SplitPaths(args[i+1:]), respChan, errChan)
for {
select {
case resp := <-respChan:
if err := gnmi.LogSubscribeResponse(resp); err != nil {
exitWithError(err.Error())
}
case err := <-errChan:
exitWithError(err.Error())
}
}
case "update", "replace", "delete":
if len(args) == i+1 {
exitWithError("error: missing path")
}
op := &gnmi.Operation{
Type: args[i],
}
i++
op.Path = gnmi.SplitPath(args[i])
if op.Type != "delete" {
if len(args) == i+1 {
exitWithError("error: missing JSON")
}
i++
op.Val = args[i]
}
setOps = append(setOps, op)
default:
exitWithError(fmt.Sprintf("error: unknown operation %q", args[i]))
}
}
if len(setOps) == 0 {
flag.Usage()
os.Exit(1)
}
err := gnmi.Set(ctx, client, setOps)
if err != nil {
glog.Fatal(err)
}
}

View File

@ -0,0 +1,245 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"go/build"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"golang.org/x/tools/go/vcs"
)
// Implementation taken from "isStandardImportPath" in go's source.
func isStdLibPath(path string) bool {
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
elem := path[:i]
return !strings.Contains(elem, ".")
}
// sortImports takes in an "import" body and returns it sorted
func sortImports(in []byte, sections []string) []byte {
type importLine struct {
index int // index into inLines
path string // import path used for sorting
}
// imports holds all the import lines, separated by section. The
// first section is for stdlib imports, the following sections
// hold the user specified sections, the final section is for
// everything else.
imports := make([][]importLine, len(sections)+2)
addImport := func(section, index int, importPath string) {
imports[section] = append(imports[section], importLine{index, importPath})
}
stdlib := 0
offset := 1
other := len(imports) - 1
inLines := bytes.Split(in, []byte{'\n'})
for i, line := range inLines {
if len(line) == 0 {
continue
}
start := bytes.IndexByte(line, '"')
if start == -1 {
continue
}
if comment := bytes.Index(line, []byte("//")); comment > -1 && comment < start {
continue
}
start++ // skip '"'
end := bytes.IndexByte(line[start:], '"') + start
s := string(line[start:end])
found := false
for j, sect := range sections {
if strings.HasPrefix(s, sect) && (len(sect) == len(s) || s[len(sect)] == '/') {
addImport(j+offset, i, s)
found = true
break
}
}
if found {
continue
}
if isStdLibPath(s) {
addImport(stdlib, i, s)
} else {
addImport(other, i, s)
}
}
out := make([]byte, 0, len(in)+2)
needSeperator := false
for _, section := range imports {
if len(section) == 0 {
continue
}
if needSeperator {
out = append(out, '\n')
}
sort.Slice(section, func(a, b int) bool {
return section[a].path < section[b].path
})
for _, s := range section {
out = append(out, inLines[s.index]...)
out = append(out, '\n')
}
needSeperator = true
}
return out
}
func genFile(in []byte, sections []string) ([]byte, error) {
out := make([]byte, 0, len(in)+3) // Add some fudge to avoid re-allocation
for {
const importLine = "\nimport (\n"
const importLineLen = len(importLine)
importStart := bytes.Index(in, []byte(importLine))
if importStart == -1 {
break
}
// Save to `out` everything up to and including "import(\n"
out = append(out, in[:importStart+importLineLen]...)
in = in[importStart+importLineLen:]
importLen := bytes.Index(in, []byte("\n)\n"))
if importLen == -1 {
return nil, errors.New(`parsing error: missing ")"`)
}
// Sort body of "import" and write it to `out`
out = append(out, sortImports(in[:importLen], sections)...)
out = append(out, []byte(")")...)
in = in[importLen+2:]
}
// Write everything leftover to out
out = append(out, in...)
return out, nil
}
// returns true if the file changed
func processFile(filename string, writeFile, listDiffFiles bool, sections []string) (bool, error) {
in, err := ioutil.ReadFile(filename)
if err != nil {
return false, err
}
out, err := genFile(in, sections)
if err != nil {
return false, err
}
equal := bytes.Equal(in, out)
if listDiffFiles {
return !equal, nil
}
if !writeFile {
os.Stdout.Write(out)
return !equal, nil
}
if equal {
return false, nil
}
temp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
if err != nil {
return false, err
}
defer os.RemoveAll(temp.Name())
s, err := os.Stat(filename)
if err != nil {
return false, err
}
if _, err = temp.Write(out); err != nil {
return false, err
}
if err := temp.Close(); err != nil {
return false, err
}
if err := os.Chmod(temp.Name(), s.Mode()); err != nil {
return false, err
}
if err := os.Rename(temp.Name(), filename); err != nil {
return false, err
}
return true, nil
}
// maps directory to vcsRoot
var vcsRootCache = make(map[string]string)
func vcsRootImportPath(f string) (string, error) {
path, err := filepath.Abs(f)
if err != nil {
return "", err
}
dir := filepath.Dir(path)
if root, ok := vcsRootCache[dir]; ok {
return root, nil
}
gopath := build.Default.GOPATH
var root string
_, root, err = vcs.FromDir(dir, filepath.Join(gopath, "src"))
if err != nil {
return "", err
}
vcsRootCache[dir] = root
return root, nil
}
func main() {
writeFile := flag.Bool("w", false, "write result to file instead of stdout")
listDiffFiles := flag.Bool("l", false, "list files whose formatting differs from importsort")
var sections multistring
flag.Var(&sections, "s", "package `prefix` to define an import section,"+
` ex: "cvshub.com/company". May be specified multiple times.`+
" If not specified the repository root is used.")
flag.Parse()
checkVCSRoot := sections == nil
for _, f := range flag.Args() {
if checkVCSRoot {
root, err := vcsRootImportPath(f)
if err != nil {
fmt.Fprintf(os.Stderr, "error determining VCS root for file %q: %s", f, err)
continue
} else {
sections = multistring{root}
}
}
diff, err := processFile(f, *writeFile, *listDiffFiles, sections)
if err != nil {
fmt.Fprintf(os.Stderr, "error while proccessing file %q: %s", f, err)
continue
}
if *listDiffFiles && diff {
fmt.Println(f)
}
}
}
type multistring []string
func (m *multistring) String() string {
return strings.Join(*m, ", ")
}
func (m *multistring) Set(s string) error {
*m = append(*m, s)
return nil
}

View File

@ -0,0 +1,40 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"io/ioutil"
"testing"
)
const (
goldFile = "testdata/test.go.gold"
inFile = "testdata/test.go.in"
)
func TestImportSort(t *testing.T) {
in, err := ioutil.ReadFile(inFile)
if err != nil {
t.Fatal(err)
}
gold, err := ioutil.ReadFile(goldFile)
if err != nil {
t.Fatal(err)
}
sections := []string{"foobar", "cvshub.com/foobar"}
if out, err := genFile(gold, sections); err != nil {
t.Fatal(err)
} else if !bytes.Equal(out, gold) {
t.Errorf("importsort on %s file produced a change", goldFile)
t.Log(string(out))
}
if out, err := genFile(in, sections); err != nil {
t.Fatal(err)
} else if !bytes.Equal(out, gold) {
t.Errorf("importsort on %s different than gold", inFile)
t.Log(string(out))
}
}

View File

@ -0,0 +1,52 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"strings"
"foobar"
"foobar/baz"
"cvshub.com/foobar/import"
)
import (
"bytes"
"strings"
"foobar"
"foobar/baz"
"cvshub.com/foobar/import"
"cvshub.com/other/import"
)
func foobar() {}
import (
z "bytes"
"strings"
"foobar"
_ "foobar/baz" // in line comment
. "foobar/qux" // in line comment
"cvshub.com/foobar/import"
)
import (
"bytes"
"cvshub.com/foobar/import"
)
import (
"cvshub.com/foobar/import"
)
func main() {
foobar()
}

View File

@ -0,0 +1,47 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"strings"
"foobar"
"foobar/baz"
"cvshub.com/foobar/import"
)
import (
"bytes"
"strings"
"foobar"
"foobar/baz"
"cvshub.com/foobar/import"
"cvshub.com/other/import"
)
func foobar() {}
import (
// Comment going away
"cvshub.com/foobar/import"
"strings"
_ "foobar/baz" // in line comment
"foobar"
z "bytes"
. "foobar/qux" // in line comment
)
import (
"cvshub.com/foobar/import"
"bytes"
)
import (
"cvshub.com/foobar/import"
)
func main() {
foobar()
}

View File

@ -0,0 +1,3 @@
# occli
# DEPRECATED
Please use [gnmi](../gnmi) instead.

View File

@ -0,0 +1,29 @@
# ockafka
Client for the gRPC OpenConfig service for subscribing to the configuration and
state of a network device and feeding the stream to Kafka.
## Sample usage
Subscribe to all updates on the Arista device at `10.0.1.2` and stream to a local
Kafka instance:
```
ockafka -addrs 10.0.1.2
```
Subscribe to temperature sensors from 2 switches and stream to a remote Kafka instance:
```
ockafka -addrs 10.0.1.2,10.0.1.3 -kafkaaddrs kafka:9092 -subscribe /Sysdb/environment/temperature/status/tempSensor
```
Start in a container:
```
docker run aristanetworks/ockafka -addrs 10.0.1.1 -kafkaaddrs kafka:9092
```
## Kafka/Elastic integration demo
The following video demoes integration with Kafka and Elastic using [this Logstash instance](https://github.com/aristanetworks/docker-logstash):
[![video preview](http://img.youtube.com/vi/WsyFmxMwXYQ/0.jpg)](https://youtu.be/WsyFmxMwXYQ)

View File

@ -0,0 +1,69 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// The occlient tool is a client for the gRPC service for getting and setting the
// OpenConfig configuration and state of a network device.
package main
import (
"flag"
"fmt"
"strings"
"sync"
"github.com/aristanetworks/goarista/kafka"
"github.com/aristanetworks/goarista/kafka/openconfig"
"github.com/aristanetworks/goarista/kafka/producer"
"github.com/aristanetworks/goarista/openconfig/client"
"github.com/Shopify/sarama"
"github.com/aristanetworks/glog"
"github.com/golang/protobuf/proto"
)
var keysFlag = flag.String("kafkakeys", "",
"Keys for kafka messages (comma-separated, default: the value of -addrs")
func newProducer(addresses []string, topic, key, dataset string) (producer.Producer, error) {
glog.Infof("Connected to Kafka brokers at %s", addresses)
encodedKey := sarama.StringEncoder(key)
p, err := producer.New(openconfig.NewEncoder(topic, encodedKey, dataset), addresses, nil)
if err != nil {
return nil, fmt.Errorf("Failed to create Kafka producer: %s", err)
}
return p, nil
}
func main() {
username, password, subscriptions, grpcAddrs, opts := client.ParseFlags()
if *keysFlag == "" {
*keysFlag = strings.Join(grpcAddrs, ",")
}
keys := strings.Split(*keysFlag, ",")
if len(grpcAddrs) != len(keys) {
glog.Fatal("Please provide the same number of addresses and Kafka keys")
}
addresses := strings.Split(*kafka.Addresses, ",")
wg := new(sync.WaitGroup)
for i, grpcAddr := range grpcAddrs {
key := keys[i]
p, err := newProducer(addresses, *kafka.Topic, key, grpcAddr)
if err != nil {
glog.Fatal(err)
} else {
glog.Infof("Initialized Kafka producer for %s", grpcAddr)
}
publish := func(addr string, message proto.Message) {
p.Write(message)
}
wg.Add(1)
p.Start()
defer p.Stop()
c := client.New(username, password, grpcAddr, opts)
go c.Subscribe(wg, subscriptions, publish)
}
wg.Wait()
}

View File

@ -0,0 +1,37 @@
# ocprometheus
This is a client for the OpenConfig gRPC interface that pushes telemetry to
Prometheus. Numerical and boolean (converted to 1 for true and 0 for false) are
supported. Non-numerical data isn't supported by Prometheus and is silently
dropped. Arrays (even with numeric values) are not yet supported.
This tool requires a config file to specify how to map the path of the
notificatons coming out of the OpenConfig gRPC interface onto Prometheus
metric names, and how to extract labels from the path. For example, the
following rule, excerpt from `sampleconfig.yml`:
```yaml
metrics:
- name: tempSensor
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/(?P<type>(?:maxT|t)emperature)/value
help: Temperature and Maximum Temperature
...
```
Applied to an update for the path
`/Sysdb/environment/temperature/status/tempSensor/TempSensor1/temperature/value`
will lead to the metric name `tempSensor` and labels `sensor=TempSensor1` and `type=temperature`.
Basically, named groups are used to extract (optional) metrics.
Unnamed groups will be given labels names like "unnamedLabelX" (where X is the group's position).
The timestamps from the notifications are not preserved since Prometheus uses a pull model and
doesn't have (yet) support for exporter specified timestamps.
Prometheus 2.0 will probably support timestamps.
## Usage
See the `-help` output, but here's an example to push all the metrics defined
in the sample config file:
```
ocprometheus -addrs <switch-hostname>:6042 -config sampleconfig.json
```

View File

@ -0,0 +1,156 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"encoding/json"
"strings"
"sync"
"github.com/aristanetworks/glog"
"github.com/golang/protobuf/proto"
"github.com/openconfig/reference/rpc/openconfig"
"github.com/prometheus/client_golang/prometheus"
)
// A metric source.
type source struct {
addr string
path string
}
// Since the labels are fixed per-path and per-device we can cache them here,
// to avoid recomputing them.
type labelledMetric struct {
metric prometheus.Metric
labels []string
}
type collector struct {
// Protects access to metrics map
m sync.Mutex
metrics map[source]*labelledMetric
config *Config
}
func newCollector(config *Config) *collector {
return &collector{
metrics: make(map[source]*labelledMetric),
config: config,
}
}
// Process a notfication and update or create the corresponding metrics.
func (c *collector) update(addr string, message proto.Message) {
resp, ok := message.(*openconfig.SubscribeResponse)
if !ok {
glog.Errorf("Unexpected type of message: %T", message)
return
}
notif := resp.GetUpdate()
if notif == nil {
return
}
device := strings.Split(addr, ":")[0]
prefix := "/" + strings.Join(notif.Prefix.Element, "/")
// Process deletes first
for _, del := range notif.Delete {
path := prefix + "/" + strings.Join(del.Element, "/")
key := source{addr: device, path: path}
c.m.Lock()
delete(c.metrics, key)
c.m.Unlock()
}
// Process updates next
for _, update := range notif.Update {
// We only use JSON encoded values
if update.Value == nil || update.Value.Type != openconfig.Type_JSON {
glog.V(9).Infof("Ignoring incompatible update value in %s", update)
continue
}
path := prefix + "/" + strings.Join(update.Path.Element, "/")
value, suffix, ok := parseValue(update)
if !ok {
continue
}
if suffix != "" {
path += "/" + suffix
}
src := source{addr: device, path: path}
c.m.Lock()
// Use the cached labels and descriptor if available
if m, ok := c.metrics[src]; ok {
m.metric = prometheus.MustNewConstMetric(m.metric.Desc(), prometheus.GaugeValue, value,
m.labels...)
c.m.Unlock()
continue
}
c.m.Unlock()
// Get the descriptor and labels for this source
desc, labelValues := c.config.getDescAndLabels(src)
if desc == nil {
glog.V(8).Infof("Ignoring unmatched update at %s:%s: %+v", device, path, update.Value)
continue
}
c.m.Lock()
// Save the metric and labels in the cache
metric := prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, value, labelValues...)
c.metrics[src] = &labelledMetric{
metric: metric,
labels: labelValues,
}
c.m.Unlock()
}
}
func parseValue(update *openconfig.Update) (float64, string, bool) {
// All metrics in Prometheus are floats, so only try to unmarshal as float64.
var intf interface{}
if err := json.Unmarshal(update.Value.Value, &intf); err != nil {
glog.Errorf("Can't parse value in update %v: %v", update, err)
return 0, "", false
}
switch value := intf.(type) {
case float64:
return value, "", true
case map[string]interface{}:
if vIntf, ok := value["value"]; ok {
if val, ok := vIntf.(float64); ok {
return val, "value", true
}
}
case bool:
if value {
return 1, "", true
}
return 0, "", true
default:
glog.V(9).Infof("Ignorig non-numeric update: %v", update)
}
return 0, "", false
}
// Describe implements prometheus.Collector interface
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
c.config.getAllDescs(ch)
}
// Collect implements prometheus.Collector interface
func (c *collector) Collect(ch chan<- prometheus.Metric) {
c.m.Lock()
for _, m := range c.metrics {
ch <- m.metric
}
c.m.Unlock()
}

View File

@ -0,0 +1,219 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"testing"
"github.com/aristanetworks/goarista/test"
"github.com/openconfig/reference/rpc/openconfig"
"github.com/prometheus/client_golang/prometheus"
)
func makeMetrics(cfg *Config, expValues map[source]float64) map[source]*labelledMetric {
expMetrics := map[source]*labelledMetric{}
for k, v := range expValues {
desc, labels := cfg.getDescAndLabels(k)
if desc == nil || labels == nil {
panic("cfg.getDescAndLabels returned nil")
}
expMetrics[k] = &labelledMetric{
metric: prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, labels...),
labels: labels,
}
}
return expMetrics
}
func makeResponse(notif *openconfig.Notification) *openconfig.SubscribeResponse {
return &openconfig.SubscribeResponse{
Response: &openconfig.SubscribeResponse_Update{Update: notif},
}
}
func TestUpdate(t *testing.T) {
config := []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
- /Sysdb/bridging/igmpsnooping/forwarding/forwarding/status
metrics:
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/speed/value
help: Fan Speed
- name: igmpSnoopingInf
path: /Sysdb/igmpsnooping/vlanStatus/(?P<vlan>.+)/ethGroup/(?P<mac>.+)/intf/(?P<intf>.+)
help: IGMP snooping status`)
cfg, err := parseConfig(config)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
coll := newCollector(cfg)
notif := &openconfig.Notification{
Prefix: &openconfig.Path{Element: []string{"Sysdb"}},
Update: []*openconfig.Update{
{
Path: &openconfig.Path{
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
},
Value: &openconfig.Value{
Type: openconfig.Type_JSON,
Value: []byte("42"),
},
},
{
Path: &openconfig.Path{
Element: []string{"environment", "cooling", "status", "fan", "speed"},
},
Value: &openconfig.Value{
Type: openconfig.Type_JSON,
Value: []byte("{\"value\": 45}"),
},
},
{
Path: &openconfig.Path{
Element: []string{"igmpsnooping", "vlanStatus", "2050", "ethGroup",
"01:00:5e:01:01:01", "intf", "Cpu"},
},
Value: &openconfig.Value{
Type: openconfig.Type_JSON,
Value: []byte("true"),
},
},
},
}
expValues := map[source]float64{
source{
addr: "10.1.1.1",
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
}: 42,
source{
addr: "10.1.1.1",
path: "/Sysdb/environment/cooling/status/fan/speed/value",
}: 45,
source{
addr: "10.1.1.1",
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:01/intf/Cpu",
}: 1,
}
coll.update("10.1.1.1:6042", makeResponse(notif))
expMetrics := makeMetrics(cfg, expValues)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
// Update one value, and one path which is not a metric
notif = &openconfig.Notification{
Prefix: &openconfig.Path{Element: []string{"Sysdb"}},
Update: []*openconfig.Update{
{
Path: &openconfig.Path{
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
},
Value: &openconfig.Value{
Type: openconfig.Type_JSON,
Value: []byte("52"),
},
},
{
Path: &openconfig.Path{
Element: []string{"environment", "doesntexist", "status"},
},
Value: &openconfig.Value{
Type: openconfig.Type_JSON,
Value: []byte("{\"value\": 45}"),
},
},
},
}
src := source{
addr: "10.1.1.1",
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
}
expValues[src] = 52
coll.update("10.1.1.1:6042", makeResponse(notif))
expMetrics = makeMetrics(cfg, expValues)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
// Same path, different device
notif = &openconfig.Notification{
Prefix: &openconfig.Path{Element: []string{"Sysdb"}},
Update: []*openconfig.Update{
{
Path: &openconfig.Path{
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
},
Value: &openconfig.Value{
Type: openconfig.Type_JSON,
Value: []byte("42"),
},
},
},
}
src.addr = "10.1.1.2"
expValues[src] = 42
coll.update("10.1.1.2:6042", makeResponse(notif))
expMetrics = makeMetrics(cfg, expValues)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
// Delete a path
notif = &openconfig.Notification{
Prefix: &openconfig.Path{Element: []string{"Sysdb"}},
Delete: []*openconfig.Path{
{
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
},
},
}
src.addr = "10.1.1.1"
delete(expValues, src)
coll.update("10.1.1.1:6042", makeResponse(notif))
expMetrics = makeMetrics(cfg, expValues)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
// Non-numeric update
notif = &openconfig.Notification{
Prefix: &openconfig.Path{Element: []string{"Sysdb"}},
Update: []*openconfig.Update{
{
Path: &openconfig.Path{
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
},
Value: &openconfig.Value{
Type: openconfig.Type_JSON,
Value: []byte("\"test\""),
},
},
},
}
coll.update("10.1.1.1:6042", makeResponse(notif))
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
}

View File

@ -0,0 +1,119 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"fmt"
"regexp"
"strconv"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/yaml.v2"
)
// Config is the representation of ocprometheus's YAML config file.
type Config struct {
// Per-device labels.
DeviceLabels map[string]prometheus.Labels
// Prefixes to subscribe to.
Subscriptions []string
// Metrics to collect and how to munge them.
Metrics []*MetricDef
}
// MetricDef is the representation of a metric definiton in the config file.
type MetricDef struct {
// Path is a regexp to match on the Update's full path.
// The regexp must be a prefix match.
// The regexp can define named capture groups to use as labels.
Path string
// Path compiled as a regexp.
re *regexp.Regexp
// Metric name.
Name string
// Metric help string.
Help string
// This map contains the metric descriptors for this metric for each device.
devDesc map[string]*prometheus.Desc
// This is the default metric descriptor for devices that don't have explicit descs.
desc *prometheus.Desc
}
// Parses the config and creates the descriptors for each path and device.
func parseConfig(cfg []byte) (*Config, error) {
config := &Config{
DeviceLabels: make(map[string]prometheus.Labels),
}
if err := yaml.Unmarshal(cfg, config); err != nil {
return nil, fmt.Errorf("Failed to parse config: %v", err)
}
for _, def := range config.Metrics {
def.re = regexp.MustCompile(def.Path)
// Extract label names
reNames := def.re.SubexpNames()[1:]
labelNames := make([]string, len(reNames))
for i, n := range reNames {
labelNames[i] = n
if n == "" {
labelNames[i] = "unnamedLabel" + strconv.Itoa(i+1)
}
}
// Create a default descriptor only if there aren't any per-device labels,
// or if it's explicitly declared
if len(config.DeviceLabels) == 0 || len(config.DeviceLabels["*"]) > 0 {
def.desc = prometheus.NewDesc(def.Name, def.Help, labelNames, config.DeviceLabels["*"])
}
// Add per-device descriptors
def.devDesc = make(map[string]*prometheus.Desc)
for device, labels := range config.DeviceLabels {
if device == "*" {
continue
}
def.devDesc[device] = prometheus.NewDesc(def.Name, def.Help, labelNames, labels)
}
}
return config, nil
}
// Returns the descriptor corresponding to the device and path, and labels extracted from the path.
// If the device and path doesn't match any metrics, returns nil.
func (c *Config) getDescAndLabels(s source) (*prometheus.Desc, []string) {
for _, def := range c.Metrics {
if groups := def.re.FindStringSubmatch(s.path); groups != nil {
if desc, ok := def.devDesc[s.addr]; ok {
return desc, groups[1:]
}
return def.desc, groups[1:]
}
}
return nil, nil
}
// Sends all the descriptors to the channel.
func (c *Config) getAllDescs(ch chan<- *prometheus.Desc) {
for _, def := range c.Metrics {
// Default descriptor might not be present
if def.desc != nil {
ch <- def.desc
}
for _, desc := range def.devDesc {
ch <- desc
}
}
}

View File

@ -0,0 +1,417 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"regexp"
"testing"
"github.com/aristanetworks/goarista/test"
"github.com/prometheus/client_golang/prometheus"
)
func TestParseConfig(t *testing.T) {
tCases := []struct {
input []byte
config Config
}{
{
input: []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/fan/speed/value
help: Fan Speed`),
config: Config{
DeviceLabels: map[string]prometheus.Labels{
"10.1.1.1": {
"lab1": "val1",
"lab2": "val2",
},
"*": {
"lab1": "val3",
"lab2": "val4",
},
},
Subscriptions: []string{
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
},
Metrics: []*MetricDef{
{
Path: "/Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
re: regexp.MustCompile(
"/Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
Name: "intfCounter",
Help: "Per-Interface Bytes/Errors/Discards Counters",
devDesc: map[string]*prometheus.Desc{
"10.1.1.1": prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
desc: prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
},
{
Path: "/Sysdb/environment/cooling/fan/speed/value",
re: regexp.MustCompile("/Sysdb/environment/cooling/fan/speed/value"),
Name: "fanSpeed",
Help: "Fan Speed",
devDesc: map[string]*prometheus.Desc{
"10.1.1.1": prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
desc: prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
},
},
},
},
{
input: []byte(`
devicelabels:
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/fan/speed/value
help: Fan Speed`),
config: Config{
DeviceLabels: map[string]prometheus.Labels{
"*": {
"lab1": "val3",
"lab2": "val4",
},
},
Subscriptions: []string{
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
},
Metrics: []*MetricDef{
{
Path: "/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
re: regexp.MustCompile(
"/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
Name: "intfCounter",
Help: "Per-Interface Bytes/Errors/Discards Counters",
devDesc: map[string]*prometheus.Desc{},
desc: prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"intf"},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
},
{
Path: "/Sysdb/environment/cooling/fan/speed/value",
re: regexp.MustCompile("/Sysdb/environment/cooling/fan/speed/value"),
Name: "fanSpeed",
Help: "Fan Speed",
devDesc: map[string]*prometheus.Desc{},
desc: prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
},
},
},
},
{
input: []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/fan/speed/value
help: Fan Speed`),
config: Config{
DeviceLabels: map[string]prometheus.Labels{
"10.1.1.1": {
"lab1": "val1",
"lab2": "val2",
},
},
Subscriptions: []string{
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
},
Metrics: []*MetricDef{
{
Path: "/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
re: regexp.MustCompile(
"/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
Name: "intfCounter",
Help: "Per-Interface Bytes/Errors/Discards Counters",
devDesc: map[string]*prometheus.Desc{
"10.1.1.1": prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
},
{
Path: "/Sysdb/environment/cooling/fan/speed/value",
re: regexp.MustCompile("/Sysdb/environment/cooling/fan/speed/value"),
Name: "fanSpeed",
Help: "Fan Speed",
devDesc: map[string]*prometheus.Desc{
"10.1.1.1": prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
},
},
},
},
{
input: []byte(`
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/fan/speed/value
help: Fan Speed`),
config: Config{
DeviceLabels: map[string]prometheus.Labels{},
Subscriptions: []string{
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
},
Metrics: []*MetricDef{
{
Path: "/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
re: regexp.MustCompile(
"/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
Name: "intfCounter",
Help: "Per-Interface Bytes/Errors/Discards Counters",
devDesc: map[string]*prometheus.Desc{},
desc: prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"intf"}, prometheus.Labels{}),
},
{
Path: "/Sysdb/environment/cooling/fan/speed/value",
re: regexp.MustCompile("/Sysdb/environment/cooling/fan/speed/value"),
Name: "fanSpeed",
Help: "Fan Speed",
devDesc: map[string]*prometheus.Desc{},
desc: prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{}),
},
},
},
},
}
for i, c := range tCases {
cfg, err := parseConfig(c.input)
if err != nil {
t.Errorf("Unexpected error in case %d: %v", i+1, err)
continue
}
if !test.DeepEqual(*cfg, c.config) {
t.Errorf("Test case %d: mismatch %v", i+1, test.Diff(*cfg, c.config))
}
}
}
func TestGetDescAndLabels(t *testing.T) {
config := []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/speed/value
help: Fan Speed`)
cfg, err := parseConfig(config)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
tCases := []struct {
src source
desc *prometheus.Desc
labels []string
}{
{
src: source{
addr: "10.1.1.1",
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
},
desc: prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
labels: []string{"lag", "Ethernet1"},
},
{
src: source{
addr: "10.2.2.2",
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
},
desc: prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
labels: []string{"lag", "Ethernet1"},
},
{
src: source{
addr: "10.2.2.2",
path: "/Sysdb/environment/cooling/status/fan/speed/value",
},
desc: prometheus.NewDesc("fanSpeed", "Fan Speed",
[]string{},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
labels: []string{},
},
{
src: source{
addr: "10.2.2.2",
path: "/Sysdb/environment/nonexistent",
},
desc: nil,
labels: nil,
},
}
for i, c := range tCases {
desc, labels := cfg.getDescAndLabels(c.src)
if !test.DeepEqual(desc, c.desc) {
t.Errorf("Test case %d: desc mismatch %v", i+1, test.Diff(desc, c.desc))
}
if !test.DeepEqual(labels, c.labels) {
t.Errorf("Test case %d: labels mismatch %v", i+1, test.Diff(labels, c.labels))
}
}
}
func TestGetAllDescs(t *testing.T) {
tCases := []struct {
config []byte
descs []*prometheus.Desc
}{
{
config: []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/speed/value
help: Fan Speed`),
descs: []*prometheus.Desc{
prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
},
{
config: []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/speed/value
help: Fan Speed`),
descs: []*prometheus.Desc{
prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
},
}
for i, c := range tCases {
cfg, err := parseConfig(c.config)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ch := make(chan *prometheus.Desc, 10)
cfg.getAllDescs(ch)
j := 0
for d := range ch {
if !test.DeepEqual(c.descs[j], d) {
t.Errorf("Test case %d: desc %d mismatch %v", i+1, j+1, test.Diff(c.descs[j], d))
}
j++
if j == len(c.descs) {
break
}
}
select {
case <-ch:
t.Errorf("Test case %d: too many descs", i+1)
default:
}
}
}

View File

@ -0,0 +1,60 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// The ocprometheus implements a Prometheus exporter for OpenConfig telemetry data.
package main
import (
"flag"
"io/ioutil"
"net/http"
"sync"
"github.com/aristanetworks/goarista/openconfig/client"
"github.com/aristanetworks/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func main() {
listenaddr := flag.String("listenaddr", ":8080", "Address on which to expose the metrics")
url := flag.String("url", "/metrics", "URL where to expose the metrics")
configFlag := flag.String("config", "",
"Config to turn OpenConfig telemetry into Prometheus metrics")
username, password, subscriptions, addrs, opts := client.ParseFlags()
if *configFlag == "" {
glog.Fatal("You need specify a config file using -config flag")
}
cfg, err := ioutil.ReadFile(*configFlag)
if err != nil {
glog.Fatalf("Can't read config file %q: %v", *configFlag, err)
}
config, err := parseConfig(cfg)
if err != nil {
glog.Fatal(err)
}
// Ignore the default "subscribe-to-everything" subscription of the
// -subscribe flag.
if subscriptions[0] == "" {
subscriptions = subscriptions[1:]
}
// Add the subscriptions from the config file.
subscriptions = append(subscriptions, config.Subscriptions...)
coll := newCollector(config)
prometheus.MustRegister(coll)
wg := new(sync.WaitGroup)
for _, addr := range addrs {
wg.Add(1)
c := client.New(username, password, addr, opts)
go c.Subscribe(wg, subscriptions, coll.update)
}
http.Handle(*url, promhttp.Handler())
glog.Fatal(http.ListenAndServe(*listenaddr, nil))
}

View File

@ -0,0 +1,55 @@
# Per-device labels. Optional
# Exactly the same set of labels must be specified for each device.
# If device address is *, the labels apply to all devices not listed explicitly.
# If any explicit device if listed below, then you need to specify all devices you're subscribed to,
# or have a wildcard entry. Otherwise, updates from non-listed devices will be ignored.
deviceLabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
# Subscriptions to OpenConfig paths.
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
- /Sysdb/environment/temperature/status
- /Sysdb/interface/counter/eth/lag
- /Sysdb/interface/counter/eth/slice/phy
# Prometheus metrics configuration.
# If you use named capture groups in the path, they will be extracted into labels with the same name.
# All fields are mandatory.
metrics:
- name: intfCounter
path: /Sysdb/interface/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(?P<type>(Octets|Errors|Discards))
help: Per-Interface Bytes/Errors/Discards Counters
- name: intfPktCounter
path: /Sysdb/interface/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))Pkt
help: Per-Interface Unicast/Multicast/Broadcast Packer Counters
- name: intfPfcClassCounter
path: /Sysdb/interface/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/ethStatistics/(?P<direction>(?:in|out))PfcClassFrames
help: Per-Interface Input/Output PFC Frames Counters
- name: tempSensor
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/(?P<type>(?:maxT|t)emperature)/value
help: Temperature and Maximum Temperature
- name: tempSensorAlert
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/alertRaisedCount
help: Temperature Alerts Counter
- name: currentSensor
path: /Sysdb/environment/power/status/currentSensor/(?P<sensor>.+)/current/value
help: Current Levels
- name: powerSensor
path: /Sysdb/environment/power/status/powerSupply/(?P<sensor>.+)/(?P<direction>(input|output))Power/value
help: Input/Output Power Levels
- name: voltageSensor
path: /Sysdb/environment/power/status/voltageSensor/(?P<sensor>.+)/voltage/value
help: Voltage Levels
- name: railCurrentSensor
path: /Sysdb/environment/power/status/voltageSensor/(?P<sensor>.+)/current/value
help: Rail Current Levels
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/(?P<fan>.+)/speed/value
help: Fan Speed

View File

@ -0,0 +1,21 @@
# ocredis
This is a client for the OpenConfig gRPC interface that publishes data to
Redis. Values are stored in JSON. Every update is pushed to Redis twice:
1. as a [hash map](http://redis.io/topics/data-types-intro#hashes) update,
where the path in Redis is the path to the entity or collection (aka
container or list, in YANG speak) and the keys of the hash are the
attributes (leaf names, in YANG speak).
2. as a [`PUBLISH`](http://redis.io/commands/publish) command sent onto
the path to the entity or collection, so that consumers can receive
updates in a streaming fashion from Redis.
## Usage
See the `-help` output, but here's an example to push all the temperature
sensors into Redis. You can also not pass any `-subscribe` flag to push
_everything_ into Redis.
```
ocredis -subscribe /Sysdb/environment/temperature -addrs <switch-hostname>:6042 -redis <redis-hostname>:6379
```

View File

@ -0,0 +1,189 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// The ocredis tool is a client for the OpenConfig gRPC interface that
// subscribes to state and pushes it to Redis, using Redis' support for hash
// maps and for publishing events that can be subscribed to.
package main
import (
"encoding/json"
"flag"
"strings"
"sync"
occlient "github.com/aristanetworks/goarista/openconfig/client"
"github.com/aristanetworks/glog"
"github.com/golang/protobuf/proto"
"github.com/openconfig/reference/rpc/openconfig"
redis "gopkg.in/redis.v4"
)
var clusterMode = flag.Bool("cluster", false, "Whether the redis server is a cluster")
var redisFlag = flag.String("redis", "",
"Comma separated list of Redis servers to push updates to")
var redisPassword = flag.String("redispass", "", "Password of redis server/cluster")
// baseClient allows us to represent both a redis.Client and redis.ClusterClient.
type baseClient interface {
Close() error
ClusterInfo() *redis.StringCmd
HDel(string, ...string) *redis.IntCmd
HMSet(string, map[string]string) *redis.StatusCmd
Ping() *redis.StatusCmd
Pipelined(func(*redis.Pipeline) error) ([]redis.Cmder, error)
Publish(string, string) *redis.IntCmd
}
var client baseClient
func main() {
username, password, subscriptions, hostAddrs, opts := occlient.ParseFlags()
if *redisFlag == "" {
glog.Fatal("Specify the address of the Redis server to write to with -redis")
}
redisAddrs := strings.Split(*redisFlag, ",")
if !*clusterMode && len(redisAddrs) > 1 {
glog.Fatal("Please pass only 1 redis address in noncluster mode or enable cluster mode")
}
if *clusterMode {
client = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: redisAddrs,
Password: *redisPassword,
})
} else {
client = redis.NewClient(&redis.Options{
Addr: *redisFlag,
Password: *redisPassword,
})
}
defer client.Close()
// TODO: Figure out ways to handle being in the wrong mode:
// Connecting to cluster in non cluster mode - we get a MOVED error on the first HMSET
// Connecting to a noncluster in cluster mode - we get stuck forever
_, err := client.Ping().Result()
if err != nil {
glog.Fatal("Failed to connect to client: ", err)
}
ocPublish := func(addr string, message proto.Message) {
resp, ok := message.(*openconfig.SubscribeResponse)
if !ok {
glog.Errorf("Unexpected type of message: %T", message)
return
}
if notif := resp.GetUpdate(); notif != nil {
bufferToRedis(addr, notif)
}
}
wg := new(sync.WaitGroup)
for _, hostAddr := range hostAddrs {
wg.Add(1)
c := occlient.New(username, password, hostAddr, opts)
go c.Subscribe(wg, subscriptions, ocPublish)
}
wg.Wait()
}
type redisData struct {
key string
hmset map[string]string
hdel []string
pub map[string]interface{}
}
func bufferToRedis(addr string, notif *openconfig.Notification) {
path := addr + "/" + joinPath(notif.Prefix)
data := &redisData{key: path}
if len(notif.Update) != 0 {
hmset := make(map[string]string, len(notif.Update))
// Updates to publish on the pub/sub.
pub := make(map[string]interface{}, len(notif.Update))
for _, update := range notif.Update {
key := joinPath(update.Path)
value := convertUpdate(update)
pub[key] = value
marshaledValue, err := json.Marshal(value)
if err != nil {
glog.Fatalf("Failed to JSON marshal update %#v", update)
}
hmset[key] = string(marshaledValue)
}
data.hmset = hmset
data.pub = pub
}
if len(notif.Delete) != 0 {
hdel := make([]string, len(notif.Delete))
for i, del := range notif.Delete {
hdel[i] = joinPath(del)
}
data.hdel = hdel
}
pushToRedis(data)
}
func pushToRedis(data *redisData) {
_, err := client.Pipelined(func(pipe *redis.Pipeline) error {
if data.hmset != nil {
if reply := client.HMSet(data.key, data.hmset); reply.Err() != nil {
glog.Fatal("Redis HMSET error: ", reply.Err())
}
redisPublish(data.key, "updates", data.pub)
}
if data.hdel != nil {
if reply := client.HDel(data.key, data.hdel...); reply.Err() != nil {
glog.Fatal("Redis HDEL error: ", reply.Err())
}
redisPublish(data.key, "deletes", data.hdel)
}
return nil
})
if err != nil {
glog.Fatal("Failed to send Pipelined commands: ", err)
}
}
func redisPublish(path, kind string, payload interface{}) {
js, err := json.Marshal(map[string]interface{}{
"kind": kind,
"payload": payload,
})
if err != nil {
glog.Fatalf("JSON error: %s", err)
}
if reply := client.Publish(path, string(js)); reply.Err() != nil {
glog.Fatal("Redis PUBLISH error: ", reply.Err())
}
}
func joinPath(path *openconfig.Path) string {
return strings.Join(path.Element, "/")
}
func convertUpdate(update *openconfig.Update) interface{} {
switch update.Value.Type {
case openconfig.Type_JSON:
var value interface{}
err := json.Unmarshal(update.Value.Value, &value)
if err != nil {
glog.Fatalf("Malformed JSON update %q in %s", update.Value.Value, update)
}
return value
case openconfig.Type_BYTES:
return update.Value.Value
default:
glog.Fatalf("Unhandled type of value %v in %s", update.Value.Type, update)
return nil
}
}

View File

@ -0,0 +1,12 @@
# ocsplunk
Client for the gRPC OpenConfig service which subscribes to the configuration and
state of a network device and sends it to the Splunk HTTP Event Collector.
## Sample usage
```
ocsplunk -addr 10.0.1.2 -splunkurls https://splunk:8088 -splunktoken 00000000-0000-0000-0000-000000000000
```
![preview](preview.png)

View File

@ -0,0 +1,118 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"net/http"
"os"
"strings"
"time"
"github.com/aristanetworks/goarista/gnmi"
"github.com/fuyufjh/splunk-hec-go"
pb "github.com/openconfig/gnmi/proto/gnmi"
)
func exitWithError(s string) {
fmt.Fprintln(os.Stderr, s)
os.Exit(1)
}
func main() {
// gNMI options
cfg := &gnmi.Config{}
flag.StringVar(&cfg.Addr, "addr", "localhost", "gNMI gRPC server `address`")
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
subscribePaths := flag.String("paths", "/", "Comma-separated list of paths to subscribe to")
// Splunk options
splunkURLs := flag.String("splunkurls", "https://localhost:8088",
"Comma-separated list of URLs of the Splunk servers")
splunkToken := flag.String("splunktoken", "", "Token to connect to the Splunk servers")
splunkIndex := flag.String("splunkindex", "", "Index for the data in Splunk")
flag.Parse()
// gNMI connection
ctx := gnmi.NewContext(context.Background(), cfg)
// Store the address without the port so it can be used as the host in the Splunk event.
addr := cfg.Addr
client := gnmi.Dial(cfg)
// Splunk connection
urls := strings.Split(*splunkURLs, ",")
cluster := hec.NewCluster(urls, *splunkToken)
cluster.SetHTTPClient(&http.Client{
Transport: &http.Transport{
// TODO: add flags for TLS
TLSClientConfig: &tls.Config{
// TODO: add flag to enable TLS
InsecureSkipVerify: true,
},
},
})
// gNMI subscription
respChan := make(chan *pb.SubscribeResponse)
errChan := make(chan error)
defer close(respChan)
defer close(errChan)
paths := strings.Split(*subscribePaths, ",")
go gnmi.Subscribe(ctx, client, gnmi.SplitPaths(paths), respChan, errChan)
// Forward subscribe responses to Splunk
for {
select {
// We got a subscribe response
case resp := <-respChan:
response := resp.GetResponse()
update, ok := response.(*pb.SubscribeResponse_Update)
if !ok {
continue
}
// Convert the response into a map[string]interface{}
notification, err := gnmi.NotificationToMap(update.Update)
if err != nil {
exitWithError(err.Error())
}
// Build the Splunk event
path := notification["path"].(string)
delete(notification, "path")
timestamp := notification["timestamp"].(int64)
delete(notification, "timestamp")
// Should this be configurable?
sourceType := "openconfig"
event := &hec.Event{
Host: &addr,
Index: splunkIndex,
Source: &path,
SourceType: &sourceType,
Event: notification,
}
event.SetTime(time.Unix(timestamp/1e9, timestamp%1e9))
// Write the event to Splunk
if err := cluster.WriteEvent(event); err != nil {
exitWithError("failed to write event: " + err.Error())
}
// We got an error
case err := <-errChan:
exitWithError(err.Error())
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 434 KiB

View File

@ -0,0 +1,33 @@
# octsdb
This is a client for the OpenConfig gRPC interface that pushes telemetry to
OpenTSDB. Non-numerical data isn't supported by OpenTSDB and is silently
dropped.
This tool requires a config file to specify how to map the path of the
notificatons coming out of the OpenConfig gRPC interface onto OpenTSDB
metric names, and how to extract tags from the path. For example, the
following rule, excerpt from `sampleconfig.json`:
```json
"metrics": {
"tempSensor": {
"path": "/Sysdb/(environment)/temperature/status/tempSensor/(?P<sensor>.+)/((?:maxT|t)emperature)/value"
},
...
```
Applied to an update for the path
`/Sysdb/environment/temperature/status/tempSensor/TempSensor1/temperature/value`
will lead to the metric name `environment.temperature` and tags `sensor=TempSensor1`.
Basically, un-named groups are used to make up the metric name, and named
groups are used to extract (optional) tags.
## Usage
See the `-help` output, but here's an example to push all the metrics defined
in the sample config file:
```
octsdb -addrs <switch-hostname>:6042 -config sampleconfig.json -text | nc <tsd-hostname> 4242
```

View File

@ -0,0 +1,92 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"regexp"
"strings"
)
// Config is the representation of octsdb's JSON config file.
type Config struct {
// Prefixes to subscribe to.
Subscriptions []string
// MetricPrefix, if set, is used to prefix all the metric names.
MetricPrefix string
// Metrics to collect and how to munge them.
Metrics map[string]*Metric
}
// A Metric to collect and how to massage it into an OpenTSDB put.
type Metric struct {
// Path is a regexp to match on the Update's full path.
// The regexp must be a prefix match.
// The regexp can define named capture groups to use as tags.
Path string
// Path compiled as a regexp.
re *regexp.Regexp
// Additional tags to add to this metric.
Tags map[string]string
}
func loadConfig(path string) (*Config, error) {
cfg, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("Failed to load config: %v", err)
}
config := new(Config)
err = json.Unmarshal(cfg, config)
if err != nil {
return nil, fmt.Errorf("Failed to parse config: %v", err)
}
for _, metric := range config.Metrics {
metric.re = regexp.MustCompile(metric.Path)
}
return config, nil
}
// Match applies this config to the given OpenConfig path.
// If the path doesn't match anything in the config, an empty string
// is returned as the metric name.
func (c *Config) Match(path string) (metricName string, tags map[string]string) {
tags = make(map[string]string)
for _, metric := range c.Metrics {
found := metric.re.FindStringSubmatch(path)
if found == nil {
continue
}
for i, name := range metric.re.SubexpNames() {
if i == 0 {
continue
} else if name == "" {
if metricName != "" {
metricName += "/"
}
metricName += found[i]
} else {
tags[name] = found[i]
}
}
for tag, value := range metric.Tags {
tags[tag] = value
}
break
}
if metricName != "" {
metricName = strings.ToLower(strings.Replace(metricName, "/", ".", -1))
if c.MetricPrefix != "" {
metricName = c.MetricPrefix + "." + metricName
}
}
return
}

View File

@ -0,0 +1,73 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"testing"
"github.com/aristanetworks/goarista/test"
)
func TestConfig(t *testing.T) {
cfg, err := loadConfig("/nonexistent.json")
if err == nil {
t.Fatal("Managed to load a nonexistent config!")
}
cfg, err = loadConfig("sampleconfig.json")
testcases := []struct {
path string
metric string
tags map[string]string
}{{
path: "/Sysdb/environment/cooling/status/fan/Fan1/1/speed/value",
metric: "eos.environment.fan.speed",
tags: map[string]string{"fan": "Fan1/1"},
}, {
path: "/Sysdb/environment/power/status/powerSupply/PowerSupply2/outputPower/value",
metric: "eos.environment.power.output",
tags: map[string]string{"sensor": "PowerSupply2"},
}, {
path: "/Sysdb/environment/power/status/voltageSensor/VoltageSensor23/voltage/value",
metric: "eos.environment.voltage",
tags: map[string]string{"sensor": "VoltageSensor23"},
}, {
path: "/Sysdb/environment/power/status/currentSensor/CurrentSensorP2/1/current/value",
metric: "eos.environment.current",
tags: map[string]string{"sensor": "CurrentSensorP2/1"},
}, {
path: "/Sysdb/environment/temperature/status/tempSensor/" +
"TempSensorP2/1/maxTemperature/value",
metric: "eos.environment.maxtemperature",
tags: map[string]string{"sensor": "TempSensorP2/1"},
}, {
path: "/Sysdb/interface/counter/eth/lag/intfCounterDir/" +
"Port-Channel201/intfCounter/current/statistics/outUcastPkts",
metric: "eos.interface.pkt",
tags: map[string]string{"intf": "Port-Channel201", "direction": "out", "type": "Ucast"},
}, {
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
"Ethernet42/intfCounter/current/statistics/inUcastPkts",
metric: "eos.interface.pkt",
tags: map[string]string{"intf": "Ethernet42", "direction": "in", "type": "Ucast"},
}, {
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
"Ethernet42/intfCounter/lastClear/statistics/inErrors",
}, {
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
"Ethernet42/intfCounter/current/ethStatistics/outPfcClassFrames",
metric: "eos.interface.pfcclassframes",
tags: map[string]string{"intf": "Ethernet42", "direction": "out"},
}}
for i, tcase := range testcases {
actualMetric, actualTags := cfg.Match(tcase.path)
if actualMetric != tcase.metric {
t.Errorf("#%d expected metric %q but got %q", i, tcase.metric, actualMetric)
}
if d := test.Diff(tcase.tags, actualTags); actualMetric != "" && d != "" {
t.Errorf("#%d expected tags %q but got %q: %s", i, tcase.tags, actualTags, d)
}
}
}

View File

@ -0,0 +1,214 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// The octsdb tool pushes OpenConfig telemetry to OpenTSDB.
package main
import (
"bytes"
"encoding/json"
"flag"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/aristanetworks/goarista/openconfig/client"
"github.com/aristanetworks/glog"
"github.com/golang/protobuf/proto"
"github.com/openconfig/reference/rpc/openconfig"
)
func main() {
tsdbFlag := flag.String("tsdb", "",
"Address of the OpenTSDB server where to push telemetry to")
textFlag := flag.Bool("text", false,
"Print the output as simple text")
configFlag := flag.String("config", "",
"Config to turn OpenConfig telemetry into OpenTSDB put requests")
isUDPServerFlag := flag.Bool("isudpserver", false,
"Set to true to run as a UDP to TCP to OpenTSDB server.")
udpAddrFlag := flag.String("udpaddr", "",
"Address of the UDP server to connect to/serve on.")
parityFlag := flag.Int("parityshards", 0,
"Number of parity shards for the Reed Solomon Erasure Coding used for UDP."+
" Clients and servers should have the same number.")
udpTimeoutFlag := flag.Duration("udptimeout", 2*time.Second,
"Timeout for each")
username, password, subscriptions, addrs, opts := client.ParseFlags()
if !(*tsdbFlag != "" || *textFlag || *udpAddrFlag != "") {
glog.Fatal("Specify the address of the OpenTSDB server to write to with -tsdb")
} else if *configFlag == "" {
glog.Fatal("Specify a JSON configuration file with -config")
}
config, err := loadConfig(*configFlag)
if err != nil {
glog.Fatal(err)
}
// Ignore the default "subscribe-to-everything" subscription of the
// -subscribe flag.
if subscriptions[0] == "" {
subscriptions = subscriptions[1:]
}
// Add the subscriptions from the config file.
subscriptions = append(subscriptions, config.Subscriptions...)
// Run a UDP server that forwards messages to OpenTSDB via Telnet (TCP)
if *isUDPServerFlag {
if *udpAddrFlag == "" {
glog.Fatal("Specify the address for the UDP server to listen on with -udpaddr")
}
server, err := newUDPServer(*udpAddrFlag, *tsdbFlag, *parityFlag)
if err != nil {
glog.Fatal("Failed to create UDP server: ", err)
}
glog.Fatal(server.Run())
}
var c OpenTSDBConn
if *textFlag {
c = newTextDumper()
} else if *udpAddrFlag != "" {
c = newUDPClient(*udpAddrFlag, *parityFlag, *udpTimeoutFlag)
} else {
// TODO: support HTTP(S).
c = newTelnetClient(*tsdbFlag)
}
wg := new(sync.WaitGroup)
for _, addr := range addrs {
wg.Add(1)
publish := func(addr string, message proto.Message) {
resp, ok := message.(*openconfig.SubscribeResponse)
if !ok {
glog.Errorf("Unexpected type of message: %T", message)
return
}
if notif := resp.GetUpdate(); notif != nil {
pushToOpenTSDB(addr, c, config, notif)
}
}
c := client.New(username, password, addr, opts)
go c.Subscribe(wg, subscriptions, publish)
}
wg.Wait()
}
func pushToOpenTSDB(addr string, conn OpenTSDBConn, config *Config,
notif *openconfig.Notification) {
if notif.Timestamp <= 0 {
glog.Fatalf("Invalid timestamp %d in %s", notif.Timestamp, notif)
}
host := addr[:strings.IndexRune(addr, ':')]
if host == "localhost" {
// TODO: On Linux this reads /proc/sys/kernel/hostname each time,
// which isn't the most efficient, but at least we don't have to
// deal with detecting hostname changes.
host, _ = os.Hostname()
if host == "" {
glog.Info("could not figure out localhost's hostname")
return
}
}
prefix := "/" + strings.Join(notif.Prefix.Element, "/")
for _, update := range notif.Update {
if update.Value == nil || update.Value.Type != openconfig.Type_JSON {
glog.V(9).Infof("Ignoring incompatible update value in %s", update)
continue
}
value := parseValue(update)
if value == nil {
continue
}
path := prefix + "/" + strings.Join(update.Path.Element, "/")
metricName, tags := config.Match(path)
if metricName == "" {
glog.V(8).Infof("Ignoring unmatched update at %s: %+v", path, update.Value)
continue
}
tags["host"] = host
for i, v := range value {
if len(value) > 1 {
tags["index"] = strconv.Itoa(i)
}
err := conn.Put(&DataPoint{
Metric: metricName,
Timestamp: uint64(notif.Timestamp),
Value: v,
Tags: tags,
})
if err != nil {
glog.Info("Failed to put datapoint: ", err)
}
}
}
}
// parseValue returns either an integer/floating point value of the given update, or if
// the value is a slice of integers/floating point values. If the value is neither of these
// or if any element in the slice is non numerical, parseValue returns nil.
func parseValue(update *openconfig.Update) []interface{} {
var value interface{}
decoder := json.NewDecoder(bytes.NewReader(update.Value.Value))
decoder.UseNumber()
err := decoder.Decode(&value)
if err != nil {
glog.Fatalf("Malformed JSON update %q in %s", update.Value.Value, update)
}
switch value := value.(type) {
case json.Number:
return []interface{}{parseNumber(value, update)}
case []interface{}:
for i, val := range value {
jsonNum, ok := val.(json.Number)
if !ok {
// If any value is not a number, skip it.
glog.Infof("Element %d: %v is %T, not json.Number", i, val, val)
continue
}
num := parseNumber(jsonNum, update)
value[i] = num
}
return value
case map[string]interface{}:
// Special case for simple value types that just have a "value"
// attribute (common case).
if val, ok := value["value"].(json.Number); ok && len(value) == 1 {
return []interface{}{parseNumber(val, update)}
}
default:
glog.V(9).Infof("Ignoring non-numeric or non-numeric slice value in %s", update)
}
return nil
}
// Convert our json.Number to either an int64, uint64, or float64.
func parseNumber(num json.Number, update *openconfig.Update) interface{} {
var value interface{}
var err error
if value, err = num.Int64(); err != nil {
// num is either a large unsigned integer or a floating point.
if strings.Contains(err.Error(), "value out of range") { // Sigh.
value, err = strconv.ParseUint(num.String(), 10, 64)
} else {
value, err = num.Float64()
if err != nil {
glog.Fatalf("Malformed JSON number %q in %s", num, update)
}
}
}
return value
}

View File

@ -0,0 +1,47 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"math"
"testing"
"github.com/aristanetworks/goarista/test"
"github.com/openconfig/reference/rpc/openconfig"
)
func TestParseValue(t *testing.T) { // Because parsing JSON sucks.
testcases := []struct {
input string
expected interface{}
}{
{"42", []interface{}{int64(42)}},
{"-42", []interface{}{int64(-42)}},
{"42.42", []interface{}{float64(42.42)}},
{"-42.42", []interface{}{float64(-42.42)}},
{`"foo"`, []interface{}(nil)},
{"9223372036854775807", []interface{}{int64(math.MaxInt64)}},
{"-9223372036854775808", []interface{}{int64(math.MinInt64)}},
{"9223372036854775808", []interface{}{uint64(math.MaxInt64) + 1}},
{"[1,3,5,7,9]", []interface{}{int64(1), int64(3), int64(5), int64(7), int64(9)}},
{"[1,9223372036854775808,0,-9223372036854775808]", []interface{}{
int64(1),
uint64(math.MaxInt64) + 1,
int64(0),
int64(math.MinInt64)},
},
}
for i, tcase := range testcases {
actual := parseValue(&openconfig.Update{
Value: &openconfig.Value{
Value: []byte(tcase.input),
},
})
if d := test.Diff(tcase.expected, actual); d != "" {
t.Errorf("#%d: %s: %#v vs %#v", i, d, tcase.expected, actual)
}
}
}

View File

@ -0,0 +1,44 @@
{
"comment": "This is a sample configuration",
"subscriptions": [
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
"/Sysdb/environment/temperature/status",
"/Sysdb/interface/counter/eth/lag",
"/Sysdb/interface/counter/eth/slice/phy"
],
"metricPrefix": "eos",
"metrics": {
"intfCounter": {
"path": "/Sysdb/(interface)/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)"
},
"intfPktCounter": {
"path": "/Sysdb/(interface)/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)"
},
"intfPfcClassCounter": {
"path": "/Sysdb/(interface)/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/ethStatistics/(?P<direction>(?:in|out))(PfcClassFrames)"
},
"tempSensor": {
"path": "/Sysdb/(environment)/temperature/status/tempSensor/(?P<sensor>.+)/((?:maxT|t)emperature)/value"
},
"tempSensorAlert": {
"path": "/Sysdb/(environment)/temperature/status/tempSensor/(?P<sensor>.+)/(alertRaisedCount)"
},
"currentSensor": {
"path": "/Sysdb/(environment)/power/status/currentSensor/(?P<sensor>.+)/(current)/value"
},
"powerSensor": {
"path": "/Sysdb/(environment/power)/status/powerSupply/(?P<sensor>.+)/(input|output)Power/value"
},
"voltageSensor": {
"path": "/Sysdb/(environment)/power/status/voltageSensor/(?P<sensor>.+)/(voltage)/value"
},
"railCurrentSensor": {
"path": "/Sysdb/(environment)/power/status/voltageSensor/(?P<sensor>.+)/(current)/value"
},
"fanSpeed": {
"path": "/Sysdb/(environment)/cooling/status/(fan)/(?P<fan>.+)/(speed)/value"
}
}
}

View File

@ -0,0 +1,65 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"net"
"github.com/aristanetworks/glog"
)
type telnetClient struct {
addr string
conn net.Conn
}
func newTelnetClient(addr string) OpenTSDBConn {
return &telnetClient{
addr: addr,
}
}
func readErrors(conn net.Conn) {
var buf [4096]byte
for {
// TODO: We should add a buffer to read line-by-line properly instead
// of using a fixed-size buffer and splitting on newlines manually.
n, err := conn.Read(buf[:])
if n == 0 {
return
} else if n > 0 {
for _, line := range bytes.Split(buf[:n], []byte{'\n'}) {
if s := string(line); s != "" {
glog.Info("tsd replied: ", s)
}
}
}
if err != nil {
return
}
}
}
func (c *telnetClient) Put(d *DataPoint) error {
return c.PutBytes([]byte(d.String()))
}
func (c *telnetClient) PutBytes(d []byte) error {
var err error
if c.conn == nil {
c.conn, err = net.Dial("tcp", c.addr)
if err != nil {
return err
}
go readErrors(c.conn)
}
_, err = c.conn.Write(d)
if err != nil {
c.conn.Close()
c.conn = nil
}
return err
}

View File

@ -0,0 +1,16 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
type textDumper struct{}
func newTextDumper() OpenTSDBConn {
return textDumper{}
}
func (t textDumper) Put(d *DataPoint) error {
print(d.String())
return nil
}

View File

@ -0,0 +1,37 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import "fmt"
// DataPoint for OpenTSDB to store.
type DataPoint struct {
// Metric name.
Metric string `json:"metric"`
// UNIX timestamp with millisecond resolution.
Timestamp uint64 `json:"timestamp"`
// Value of the data point (integer or floating point).
Value interface{} `json:"value"`
// Tags. The host is automatically populated by the OpenTSDBConn.
Tags map[string]string `json:"tags"`
}
func (d *DataPoint) String() string {
var tags string
if len(d.Tags) != 0 {
for tag, value := range d.Tags {
tags += " " + tag + "=" + value
}
}
return fmt.Sprintf("put %s %d %#v%s\n", d.Metric, d.Timestamp/1e9, d.Value, tags)
}
// OpenTSDBConn is a managed connection to an OpenTSDB instance (or cluster).
type OpenTSDBConn interface {
Put(d *DataPoint) error
}

View File

@ -0,0 +1,104 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"math/rand"
"time"
"github.com/aristanetworks/glog"
kcp "github.com/xtaci/kcp-go"
)
type udpClient struct {
addr string
conn *kcp.UDPSession
parity int
timeout time.Duration
}
func newUDPClient(addr string, parity int, timeout time.Duration) OpenTSDBConn {
return &udpClient{
addr: addr,
parity: parity,
timeout: timeout,
}
}
func (c *udpClient) Put(d *DataPoint) error {
var err error
if c.conn == nil {
// Prevent a bunch of clients all disconnecting and attempting to reconnect
// at nearly the same time.
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
c.conn, err = kcp.DialWithOptions(c.addr, nil, 10, c.parity)
if err != nil {
return err
}
c.conn.SetNoDelay(1, 40, 1, 1) // Suggested by kcp-go to lower cpu usage
}
dStr := d.String()
glog.V(3).Info(dStr)
c.conn.SetWriteDeadline(time.Now().Add(c.timeout))
_, err = c.conn.Write([]byte(dStr))
if err != nil {
c.conn.Close()
c.conn = nil
}
return err
}
type udpServer struct {
lis *kcp.Listener
telnet *telnetClient
}
func newUDPServer(udpAddr, tsdbAddr string, parity int) (*udpServer, error) {
lis, err := kcp.ListenWithOptions(udpAddr, nil, 10, parity)
if err != nil {
return nil, err
}
return &udpServer{
lis: lis,
telnet: newTelnetClient(tsdbAddr).(*telnetClient),
}, nil
}
func (c *udpServer) Run() error {
for {
conn, err := c.lis.AcceptKCP()
if err != nil {
return err
}
conn.SetNoDelay(1, 40, 1, 1) // Suggested by kcp-go to lower cpu usage
if glog.V(3) {
glog.Infof("New connection from %s", conn.RemoteAddr())
}
go func() {
defer conn.Close()
var buf [4096]byte
for {
n, err := conn.Read(buf[:])
if err != nil {
if n != 0 { // Not EOF
glog.Error(err)
}
return
}
if glog.V(3) {
glog.Info(string(buf[:n]))
}
err = c.telnet.PutBytes(buf[:n])
if err != nil {
glog.Error(err)
return
}
}
}()
}
}

View File

@ -0,0 +1,3 @@
# openconfigbeat
The code for `openconfigbeat` lives at [aristanetworks/openconfigbeat](https://github.com/aristanetworks/openconfigbeat).

62
vendor/github.com/aristanetworks/goarista/dscp/dial.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// Package dscp provides helper functions to apply DSCP / ECN / CoS flags to sockets.
package dscp
import (
"fmt"
"net"
"reflect"
"time"
)
// DialTCPWithTOS is similar to net.DialTCP but with the socket configured
// to the use the given ToS (Type of Service), to specify DSCP / ECN / class
// of service flags to use for incoming connections.
func DialTCPWithTOS(laddr, raddr *net.TCPAddr, tos byte) (*net.TCPConn, error) {
conn, err := net.DialTCP("tcp", laddr, raddr)
if err != nil {
return nil, err
}
value := reflect.ValueOf(conn)
if err = setTOS(raddr.IP, value, tos); err != nil {
conn.Close()
return nil, err
}
return conn, err
}
// DialTimeoutWithTOS is similar to net.DialTimeout but with the socket configured
// to the use the given ToS (Type of Service), to specify DSCP / ECN / class
// of service flags to use for incoming connections.
func DialTimeoutWithTOS(network, address string, timeout time.Duration, tos byte) (net.Conn,
error) {
conn, err := net.DialTimeout(network, address, timeout)
if err != nil {
return nil, err
}
var ip net.IP
// Unfortunately we have to explicitly switch on the address type here to
// avoid calling net.ResolveIpAddr(), as this would resolve the address
// again leading to a potentially different result.
switch addr := conn.RemoteAddr().(type) {
case *net.TCPAddr:
ip = addr.IP
case *net.UDPAddr:
ip = addr.IP
case *net.IPAddr:
ip = addr.IP
case *net.IPNet:
ip = addr.IP
default:
conn.Close()
return nil, fmt.Errorf("DialTimeoutWithTOS: cannot set TOS on a %s socket", network)
}
if err = setTOS(ip, reflect.ValueOf(conn), tos); err != nil {
conn.Close()
return nil, err
}
return conn, err
}

View File

@ -0,0 +1,53 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package dscp_test
import (
"net"
"testing"
"github.com/aristanetworks/goarista/dscp"
)
func TestDialTCPWithTOS(t *testing.T) {
addr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 0}
listen, err := net.ListenTCP("tcp", addr)
if err != nil {
t.Fatal(err)
}
defer listen.Close()
done := make(chan struct{})
go func() {
conn, err := listen.Accept()
if err != nil {
t.Fatal(err)
}
defer conn.Close()
buf := []byte{'!'}
conn.Write(buf)
n, err := conn.Read(buf)
if n != 1 || err != nil {
t.Fatalf("Read returned %d / %s", n, err)
} else if buf[0] != '!' {
t.Fatalf("Expected to read '!' but got %q", buf)
}
close(done)
}()
conn, err := dscp.DialTCPWithTOS(nil, listen.Addr().(*net.TCPAddr), 40)
if err != nil {
t.Fatal("Connection failed:", err)
}
defer conn.Close()
buf := make([]byte, 1)
n, err := conn.Read(buf)
if n != 1 || err != nil {
t.Fatalf("Read returned %d / %s", n, err)
} else if buf[0] != '!' {
t.Fatalf("Expected to read '!' but got %q", buf)
}
conn.Write(buf)
<-done
}

View File

@ -0,0 +1,27 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// Package dscp provides helper functions to apply DSCP / ECN / CoS flags to sockets.
package dscp
import (
"net"
"reflect"
)
// ListenTCPWithTOS is similar to net.ListenTCP but with the socket configured
// to the use the given ToS (Type of Service), to specify DSCP / ECN / class
// of service flags to use for incoming connections.
func ListenTCPWithTOS(address *net.TCPAddr, tos byte) (*net.TCPListener, error) {
lsnr, err := net.ListenTCP("tcp", address)
if err != nil {
return nil, err
}
value := reflect.ValueOf(lsnr)
if err = setTOS(address.IP, value, tos); err != nil {
lsnr.Close()
return nil, err
}
return lsnr, err
}

View File

@ -0,0 +1,62 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package dscp_test
import (
"net"
"testing"
"github.com/aristanetworks/goarista/dscp"
)
func TestListenTCPWithTOS(t *testing.T) {
testListenTCPWithTOS(t, "127.0.0.1")
//testListenTCPWithTOS(t, "::1")
}
func testListenTCPWithTOS(t *testing.T, ip string) {
// Note: This test doesn't actually verify that the connection uses the
// desired TOS byte, because that's kinda hard to check, but at least it
// verifies that we return a usable TCPListener.
addr := &net.TCPAddr{IP: net.ParseIP(ip), Port: 0}
listen, err := dscp.ListenTCPWithTOS(addr, 40)
if err != nil {
t.Fatal(err)
}
defer listen.Close()
done := make(chan struct{})
go func() {
conn, err := listen.Accept()
if err != nil {
t.Fatal(err)
}
defer conn.Close()
buf := []byte{'!'}
conn.Write(buf)
n, err := conn.Read(buf)
if n != 1 || err != nil {
t.Fatalf("Read returned %d / %s", n, err)
} else if buf[0] != '!' {
t.Fatalf("Expected to read '!' but got %q", buf)
}
close(done)
}()
conn, err := net.Dial(listen.Addr().Network(), listen.Addr().String())
if err != nil {
t.Fatal("Connection failed:", err)
}
defer conn.Close()
buf := make([]byte, 1)
n, err := conn.Read(buf)
if n != 1 || err != nil {
t.Fatalf("Read returned %d / %s", n, err)
} else if buf[0] != '!' {
t.Fatalf("Expected to read '!' but got %q", buf)
}
conn.Write(buf)
<-done
}

View File

@ -0,0 +1,33 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package dscp
import (
"net"
"os"
"reflect"
"golang.org/x/sys/unix"
)
// This works for the UNIX implementation of netFD, i.e. not on Windows and Plan9.
// This kludge is needed until https://github.com/golang/go/issues/9661 is fixed.
// value can be the reflection of a connection or a dialer.
func setTOS(ip net.IP, value reflect.Value, tos byte) error {
netFD := value.Elem().FieldByName("fd").Elem()
fd := int(netFD.FieldByName("pfd").FieldByName("Sysfd").Int())
var proto, optname int
if ip.To4() != nil {
proto = unix.IPPROTO_IP
optname = unix.IP_TOS
} else {
proto = unix.IPPROTO_IPV6
optname = unix.IPV6_TCLASS
}
if err := unix.SetsockoptInt(fd, proto, optname, int(tos)); err != nil {
return os.NewSyscallError("setsockopt", err)
}
return nil
}

View File

@ -0,0 +1,14 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package elasticsearch
import (
"strings"
)
// EscapeFieldName escapes field names for Elasticsearch
func EscapeFieldName(name string) string {
return strings.Replace(name, ".", "_", -1)
}

View File

@ -0,0 +1,30 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package elasticsearch
import (
"testing"
)
func TestEscapeFieldName(t *testing.T) {
testcases := []struct {
unescaped string
escaped string
}{{
unescaped: "239.255.255.250_32_0.0.0.0_0",
escaped: "239_255_255_250_32_0_0_0_0_0",
}, {
unescaped: "foo",
escaped: "foo",
},
}
for _, test := range testcases {
escaped := EscapeFieldName(test.unescaped)
if escaped != test.escaped {
t.Errorf("Failed to escape %q: expected %q, got %q", test.unescaped,
test.escaped, escaped)
}
}
}

View File

@ -0,0 +1,120 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package gnmi
import (
"context"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"strings"
"github.com/aristanetworks/glog"
pb "github.com/openconfig/gnmi/proto/gnmi"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
)
const (
defaultPort = "6042"
)
// Config is the gnmi.Client config
type Config struct {
Addr string
CAFile string
CertFile string
KeyFile string
Password string
Username string
TLS bool
}
// Dial connects to a gnmi service and returns a client
func Dial(cfg *Config) pb.GNMIClient {
var opts []grpc.DialOption
if cfg.TLS || cfg.CAFile != "" || cfg.CertFile != "" {
tlsConfig := &tls.Config{}
if cfg.CAFile != "" {
b, err := ioutil.ReadFile(cfg.CAFile)
if err != nil {
glog.Fatal(err)
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(b) {
glog.Fatalf("credentials: failed to append certificates")
}
tlsConfig.RootCAs = cp
} else {
tlsConfig.InsecureSkipVerify = true
}
if cfg.CertFile != "" {
if cfg.KeyFile == "" {
glog.Fatalf("Please provide both -certfile and -keyfile")
}
cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile)
if err != nil {
glog.Fatal(err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
} else {
opts = append(opts, grpc.WithInsecure())
}
if !strings.ContainsRune(cfg.Addr, ':') {
cfg.Addr += ":" + defaultPort
}
conn, err := grpc.Dial(cfg.Addr, opts...)
if err != nil {
glog.Fatalf("Failed to dial: %s", err)
}
return pb.NewGNMIClient(conn)
}
// NewContext returns a new context with username and password
// metadata if they are set in cfg.
func NewContext(ctx context.Context, cfg *Config) context.Context {
if cfg.Username != "" {
ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs(
"username", cfg.Username,
"password", cfg.Password))
}
return ctx
}
// NewGetRequest returns a GetRequest for the given paths
func NewGetRequest(paths [][]string) (*pb.GetRequest, error) {
req := &pb.GetRequest{
Path: make([]*pb.Path, len(paths)),
}
for i, p := range paths {
gnmiPath, err := ParseGNMIElements(p)
if err != nil {
return nil, err
}
req.Path[i] = gnmiPath
}
return req, nil
}
// NewSubscribeRequest returns a SubscribeRequest for the given paths
func NewSubscribeRequest(paths [][]string) (*pb.SubscribeRequest, error) {
subList := &pb.SubscriptionList{
Subscription: make([]*pb.Subscription, len(paths)),
}
for i, p := range paths {
gnmiPath, err := ParseGNMIElements(p)
if err != nil {
return nil, err
}
subList.Subscription[i] = &pb.Subscription{Path: gnmiPath}
}
return &pb.SubscribeRequest{
Request: &pb.SubscribeRequest_Subscribe{Subscribe: subList}}, nil
}

35
vendor/github.com/aristanetworks/goarista/gnmi/json.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package gnmi
import (
"github.com/openconfig/gnmi/proto/gnmi"
)
// NotificationToMap converts a Notification into a map[string]interface{}
func NotificationToMap(notif *gnmi.Notification) (map[string]interface{}, error) {
m := make(map[string]interface{}, 1)
m["timestamp"] = notif.Timestamp
m["path"] = StrPath(notif.Prefix)
if len(notif.Update) != 0 {
updates := make(map[string]interface{}, len(notif.Update))
var err error
for _, update := range notif.Update {
updates[StrPath(update.Path)] = strUpdateVal(update)
if err != nil {
return nil, err
}
}
m["updates"] = updates
}
if len(notif.Delete) != 0 {
deletes := make([]string, len(notif.Delete))
for i, del := range notif.Delete {
deletes[i] = StrPath(del)
}
m["deletes"] = deletes
}
return m, nil
}

View File

@ -0,0 +1,251 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package gnmi
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"path"
pb "github.com/openconfig/gnmi/proto/gnmi"
"google.golang.org/grpc/codes"
)
// Get sents a GetRequest to the given client.
func Get(ctx context.Context, client pb.GNMIClient, paths [][]string) error {
req, err := NewGetRequest(paths)
if err != nil {
return err
}
resp, err := client.Get(ctx, req)
if err != nil {
return err
}
for _, notif := range resp.Notification {
for _, update := range notif.Update {
fmt.Printf("%s:\n", StrPath(update.Path))
fmt.Println(strUpdateVal(update))
}
}
return nil
}
// Capabilities retuns the capabilities of the client.
func Capabilities(ctx context.Context, client pb.GNMIClient) error {
resp, err := client.Capabilities(ctx, &pb.CapabilityRequest{})
if err != nil {
return err
}
fmt.Printf("Version: %s\n", resp.GNMIVersion)
for _, mod := range resp.SupportedModels {
fmt.Printf("SupportedModel: %s\n", mod)
}
for _, enc := range resp.SupportedEncodings {
fmt.Printf("SupportedEncoding: %s\n", enc)
}
return nil
}
// val may be a path to a file or it may be json. First see if it is a
// file, if so return its contents, otherwise return val
func extractJSON(val string) []byte {
jsonBytes, err := ioutil.ReadFile(val)
if err != nil {
jsonBytes = []byte(val)
}
return jsonBytes
}
// strUpdateVal will return a string representing the value within the supplied update
func strUpdateVal(u *pb.Update) string {
if u.Value != nil {
return string(u.Value.Value) // Backwards compatibility with pre-v0.4 gnmi
}
return strVal(u.Val)
}
// strVal will return a string representing the supplied value
func strVal(val *pb.TypedValue) string {
switch v := val.GetValue().(type) {
case *pb.TypedValue_StringVal:
return v.StringVal
case *pb.TypedValue_JsonIetfVal:
return string(v.JsonIetfVal)
case *pb.TypedValue_IntVal:
return fmt.Sprintf("%v", v.IntVal)
case *pb.TypedValue_UintVal:
return fmt.Sprintf("%v", v.UintVal)
case *pb.TypedValue_BoolVal:
return fmt.Sprintf("%v", v.BoolVal)
case *pb.TypedValue_BytesVal:
return string(v.BytesVal)
case *pb.TypedValue_DecimalVal:
return strDecimal64(v.DecimalVal)
case *pb.TypedValue_LeaflistVal:
return strLeaflist(v.LeaflistVal)
default:
panic(v)
}
}
func strDecimal64(d *pb.Decimal64) string {
var i, frac uint64
if d.Precision > 0 {
div := uint64(10)
it := d.Precision - 1
for it > 0 {
div *= 10
it--
}
i = d.Digits / div
frac = d.Digits % div
} else {
i = d.Digits
}
return fmt.Sprintf("%d.%d", i, frac)
}
// strLeafList builds a human-readable form of a leaf-list. e.g. [1,2,3] or [a,b,c]
func strLeaflist(v *pb.ScalarArray) string {
s := make([]string, 0, len(v.Element))
sz := 2 // []
// convert arbitrary TypedValues to string form
for _, elm := range v.Element {
str := strVal(elm)
s = append(s, str)
sz += len(str) + 1 // %v + ,
}
b := make([]byte, sz)
buf := bytes.NewBuffer(b)
buf.WriteRune('[')
for i := range v.Element {
buf.WriteString(s[i])
if i < len(v.Element)-1 {
buf.WriteRune(',')
}
}
buf.WriteRune(']')
return buf.String()
}
func update(p *pb.Path, val string) *pb.Update {
var v *pb.TypedValue
switch p.Origin {
case "":
v = &pb.TypedValue{
Value: &pb.TypedValue_JsonIetfVal{JsonIetfVal: extractJSON(val)}}
case "cli":
v = &pb.TypedValue{
Value: &pb.TypedValue_AsciiVal{AsciiVal: val}}
default:
panic(fmt.Errorf("unexpected origin: %q", p.Origin))
}
return &pb.Update{Path: p, Val: v}
}
// Operation describes an gNMI operation.
type Operation struct {
Type string
Path []string
Val string
}
func newSetRequest(setOps []*Operation) (*pb.SetRequest, error) {
req := &pb.SetRequest{}
for _, op := range setOps {
p, err := ParseGNMIElements(op.Path)
if err != nil {
return nil, err
}
switch op.Type {
case "delete":
req.Delete = append(req.Delete, p)
case "update":
req.Update = append(req.Update, update(p, op.Val))
case "replace":
req.Replace = append(req.Replace, update(p, op.Val))
}
}
return req, nil
}
// Set sends a SetRequest to the given client.
func Set(ctx context.Context, client pb.GNMIClient, setOps []*Operation) error {
req, err := newSetRequest(setOps)
if err != nil {
return err
}
resp, err := client.Set(ctx, req)
if err != nil {
return err
}
if resp.Message != nil && codes.Code(resp.Message.Code) != codes.OK {
return errors.New(resp.Message.Message)
}
// TODO: Iterate over SetResponse.Response for more detailed error message?
return nil
}
// Subscribe sends a SubscribeRequest to the given client.
func Subscribe(ctx context.Context, client pb.GNMIClient, paths [][]string,
respChan chan<- *pb.SubscribeResponse, errChan chan<- error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := client.Subscribe(ctx)
if err != nil {
errChan <- err
return
}
req, err := NewSubscribeRequest(paths)
if err != nil {
errChan <- err
return
}
if err := stream.Send(req); err != nil {
errChan <- err
return
}
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
return
}
errChan <- err
return
}
respChan <- resp
}
}
// LogSubscribeResponse logs update responses to stderr.
func LogSubscribeResponse(response *pb.SubscribeResponse) error {
switch resp := response.Response.(type) {
case *pb.SubscribeResponse_Error:
return errors.New(resp.Error.Message)
case *pb.SubscribeResponse_SyncResponse:
if !resp.SyncResponse {
return errors.New("initial sync failed")
}
case *pb.SubscribeResponse_Update:
prefix := StrPath(resp.Update.Prefix)
for _, update := range resp.Update.Update {
fmt.Printf("%s = %s\n", path.Join(prefix, StrPath(update.Path)),
strUpdateVal(update))
}
}
return nil
}

View File

@ -0,0 +1,76 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package gnmi
import (
"testing"
"github.com/aristanetworks/goarista/test"
pb "github.com/openconfig/gnmi/proto/gnmi"
)
func TestNewSetRequest(t *testing.T) {
pathFoo := &pb.Path{
Element: []string{"foo"},
Elem: []*pb.PathElem{&pb.PathElem{Name: "foo"}},
}
pathCli := &pb.Path{
Origin: "cli",
}
testCases := map[string]struct {
setOps []*Operation
exp pb.SetRequest
}{
"delete": {
setOps: []*Operation{&Operation{Type: "delete", Path: []string{"foo"}}},
exp: pb.SetRequest{Delete: []*pb.Path{pathFoo}},
},
"update": {
setOps: []*Operation{&Operation{Type: "update", Path: []string{"foo"}, Val: "true"}},
exp: pb.SetRequest{
Update: []*pb.Update{&pb.Update{
Path: pathFoo,
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonIetfVal{JsonIetfVal: []byte("true")}},
}},
},
},
"replace": {
setOps: []*Operation{&Operation{Type: "replace", Path: []string{"foo"}, Val: "true"}},
exp: pb.SetRequest{
Replace: []*pb.Update{&pb.Update{
Path: pathFoo,
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonIetfVal{JsonIetfVal: []byte("true")}},
}},
},
},
"cli-replace": {
setOps: []*Operation{&Operation{Type: "replace", Path: []string{"cli"},
Val: "hostname foo\nip routing"}},
exp: pb.SetRequest{
Replace: []*pb.Update{&pb.Update{
Path: pathCli,
Val: &pb.TypedValue{
Value: &pb.TypedValue_AsciiVal{AsciiVal: "hostname foo\nip routing"}},
}},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
got, err := newSetRequest(tc.setOps)
if err != nil {
t.Fatal(err)
}
if diff := test.Diff(tc.exp, *got); diff != "" {
t.Errorf("unexpected diff: %s", diff)
}
})
}
}

233
vendor/github.com/aristanetworks/goarista/gnmi/path.go generated vendored Normal file
View File

@ -0,0 +1,233 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package gnmi
import (
"bytes"
"fmt"
"sort"
"strings"
pb "github.com/openconfig/gnmi/proto/gnmi"
)
// nextTokenIndex returns the end index of the first token.
func nextTokenIndex(path string) int {
var inBrackets bool
var escape bool
for i, c := range path {
switch c {
case '[':
inBrackets = true
escape = false
case ']':
if !escape {
inBrackets = false
}
escape = false
case '\\':
escape = !escape
case '/':
if !inBrackets && !escape {
return i
}
escape = false
default:
escape = false
}
}
return len(path)
}
// SplitPath splits a gnmi path according to the spec. See
// https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-path-conventions.md
// No validation is done. Behavior is undefined if path is an invalid
// gnmi path. TODO: Do validation?
func SplitPath(path string) []string {
var result []string
if len(path) > 0 && path[0] == '/' {
path = path[1:]
}
for len(path) > 0 {
i := nextTokenIndex(path)
result = append(result, path[:i])
path = path[i:]
if len(path) > 0 && path[0] == '/' {
path = path[1:]
}
}
return result
}
// SplitPaths splits multiple gnmi paths
func SplitPaths(paths []string) [][]string {
out := make([][]string, len(paths))
for i, path := range paths {
out[i] = SplitPath(path)
}
return out
}
// StrPath builds a human-readable form of a gnmi path.
// e.g. /a/b/c[e=f]
func StrPath(path *pb.Path) string {
if path == nil {
return "/"
} else if len(path.Elem) != 0 {
return strPathV04(path)
} else if len(path.Element) != 0 {
return strPathV03(path)
}
return "/"
}
// strPathV04 handles the v0.4 gnmi and later path.Elem member.
func strPathV04(path *pb.Path) string {
buf := &bytes.Buffer{}
for _, elm := range path.Elem {
buf.WriteRune('/')
writeSafeString(buf, elm.Name, '/')
if len(elm.Key) > 0 {
// Sort the keys so that they print in a conistent
// order. We don't have the YANG AST information, so the
// best we can do is sort them alphabetically.
keys := make([]string, 0, len(elm.Key))
for k := range elm.Key {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
buf.WriteRune('[')
buf.WriteString(k)
buf.WriteRune('=')
writeSafeString(buf, elm.Key[k], ']')
buf.WriteRune(']')
}
}
}
return buf.String()
}
// strPathV03 handles the v0.3 gnmi and earlier path.Element member.
func strPathV03(path *pb.Path) string {
return "/" + strings.Join(path.Element, "/")
}
func writeSafeString(buf *bytes.Buffer, s string, esc rune) {
for _, c := range s {
if c == esc || c == '\\' {
buf.WriteRune('\\')
}
buf.WriteRune(c)
}
}
// ParseGNMIElements builds up a gnmi path, from user-supplied text
func ParseGNMIElements(elms []string) (*pb.Path, error) {
if len(elms) == 1 && elms[0] == "cli" {
return &pb.Path{
Origin: "cli",
}, nil
}
var parsed []*pb.PathElem
for _, e := range elms {
n, keys, err := parseElement(e)
if err != nil {
return nil, err
}
parsed = append(parsed, &pb.PathElem{Name: n, Key: keys})
}
return &pb.Path{
Element: elms, // Backwards compatibility with pre-v0.4 gnmi
Elem: parsed,
}, nil
}
// parseElement parses a path element, according to the gNMI specification. See
// https://github.com/openconfig/reference/blame/master/rpc/gnmi/gnmi-path-conventions.md
//
// It returns the first string (the current element name), and an optional map of key name
// value pairs.
func parseElement(pathElement string) (string, map[string]string, error) {
// First check if there are any keys, i.e. do we have at least one '[' in the element
name, keyStart := findUnescaped(pathElement, '[')
if keyStart < 0 {
return name, nil, nil
}
// Error if there is no element name or if the "[" is at the beginning of the path element
if len(name) == 0 {
return "", nil, fmt.Errorf("failed to find element name in %q", pathElement)
}
// Look at the keys now.
keys := make(map[string]string)
keyPart := pathElement[keyStart:]
for keyPart != "" {
k, v, nextKey, err := parseKey(keyPart)
if err != nil {
return "", nil, err
}
keys[k] = v
keyPart = nextKey
}
return name, keys, nil
}
// parseKey returns the key name, key value and the remaining string to be parsed,
func parseKey(s string) (string, string, string, error) {
if s[0] != '[' {
return "", "", "", fmt.Errorf("failed to find opening '[' in %q", s)
}
k, iEq := findUnescaped(s[1:], '=')
if iEq < 0 {
return "", "", "", fmt.Errorf("failed to find '=' in %q", s)
}
if k == "" {
return "", "", "", fmt.Errorf("failed to find key name in %q", s)
}
rhs := s[1+iEq+1:]
v, iClosBr := findUnescaped(rhs, ']')
if iClosBr < 0 {
return "", "", "", fmt.Errorf("failed to find ']' in %q", s)
}
if v == "" {
return "", "", "", fmt.Errorf("failed to find key value in %q", s)
}
next := rhs[iClosBr+1:]
return k, v, next, nil
}
// findUnescaped will return the index of the first unescaped match of 'find', and the unescaped
// string leading up to it.
func findUnescaped(s string, find byte) (string, int) {
// Take a fast track if there are no escape sequences
if strings.IndexByte(s, '\\') == -1 {
i := strings.IndexByte(s, find)
if i < 0 {
return s, -1
}
return s[:i], i
}
// Find the first match, taking care of escaped chars.
buf := &bytes.Buffer{}
var i int
len := len(s)
for i = 0; i < len; {
ch := s[i]
if ch == find {
return buf.String(), i
} else if ch == '\\' && i < len-1 {
i++
ch = s[i]
}
buf.WriteByte(ch)
i++
}
return buf.String(), -1
}

View File

@ -0,0 +1,265 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package gnmi
import (
"fmt"
"testing"
"github.com/aristanetworks/goarista/test"
pb "github.com/openconfig/gnmi/proto/gnmi"
)
func p(s ...string) []string {
return s
}
func TestSplitPath(t *testing.T) {
for i, tc := range []struct {
in string
exp []string
}{{
in: "/foo/bar",
exp: p("foo", "bar"),
}, {
in: "/foo/bar/",
exp: p("foo", "bar"),
}, {
in: "//foo//bar//",
exp: p("", "foo", "", "bar", ""),
}, {
in: "/foo[name=///]/bar",
exp: p("foo[name=///]", "bar"),
}, {
in: `/foo[name=[\\\]/]/bar`,
exp: p(`foo[name=[\\\]/]`, "bar"),
}, {
in: `/foo[name=[\\]/bar`,
exp: p(`foo[name=[\\]`, "bar"),
}, {
in: "/foo[a=1][b=2]/bar",
exp: p("foo[a=1][b=2]", "bar"),
}, {
in: "/foo[a=1\\]2][b=2]/bar",
exp: p("foo[a=1\\]2][b=2]", "bar"),
}, {
in: "/foo[a=1][b=2]/bar\\baz",
exp: p("foo[a=1][b=2]", "bar\\baz"),
}} {
got := SplitPath(tc.in)
if !test.DeepEqual(tc.exp, got) {
t.Errorf("[%d] unexpect split for %q. Expected: %v, Got: %v",
i, tc.in, tc.exp, got)
}
}
}
func TestStrPath(t *testing.T) {
for i, tc := range []struct {
path string
}{{
path: "/",
}, {
path: "/foo/bar",
}, {
path: "/foo[name=a]/bar",
}, {
path: "/foo[a=1][b=2]/bar",
}, {
path: "/foo[a=1\\]2][b=2]/bar",
}, {
path: "/foo[a=1][b=2]/bar\\/baz",
}} {
sElms := SplitPath(tc.path)
pbPath, err := ParseGNMIElements(sElms)
if err != nil {
t.Errorf("failed to parse %s: %s", sElms, err)
}
s := StrPath(pbPath)
if !test.DeepEqual(tc.path, s) {
t.Errorf("[%d] want %s, got %s", i, tc.path, s)
}
}
}
func TestOriginCLIPath(t *testing.T) {
path := "cli"
sElms := SplitPath(path)
pbPath, err := ParseGNMIElements(sElms)
if err != nil {
t.Fatal(err)
}
expected := pb.Path{Origin: "cli"}
if !test.DeepEqual(expected, *pbPath) {
t.Errorf("want %v, got %v", expected, *pbPath)
}
}
func TestStrPathBackwardsCompat(t *testing.T) {
for i, tc := range []struct {
path *pb.Path
str string
}{{
path: &pb.Path{
Element: p("foo[a=1][b=2]", "bar"),
},
str: "/foo[a=1][b=2]/bar",
}} {
got := StrPath(tc.path)
if got != tc.str {
t.Errorf("[%d] want %q, got %q", i, tc.str, got)
}
}
}
func TestParseElement(t *testing.T) {
// test cases
cases := []struct {
// name is the name of the test useful if you want to run a single test
// from the command line -run TestParseElement/<name>
name string
// in is the path element to be parsed
in string
// fieldName is field name (YANG node name) expected to be parsed from the path element.
// Normally this is simply the path element, or if the path element contains keys this is
// the text before the first [
fieldName string
// keys is a map of the expected key value pairs from within the []s in the
// `path element.
//
// For example prefix[ip-prefix=10.0.0.0/24][masklength-range=26..28]
// fieldName would be "prefix"
// keys would be {"ip-prefix": "10.0.0.0/24", "masklength-range": "26..28"}
keys map[string]string
// expectedError is the exact error we expect.
expectedError error
}{{
name: "no_elms",
in: "hello",
fieldName: "hello",
}, {
name: "single_open",
in: "[",
expectedError: fmt.Errorf("failed to find element name in %q", "["),
}, {
name: "no_equal_no_close",
in: "hello[there",
expectedError: fmt.Errorf("failed to find '=' in %q", "[there"),
}, {
name: "no_equals",
in: "hello[there]",
expectedError: fmt.Errorf("failed to find '=' in %q", "[there]"),
}, {
name: "no_left_side",
in: "hello[=there]",
expectedError: fmt.Errorf("failed to find key name in %q", "[=there]"),
}, {
name: "no_right_side",
in: "hello[there=]",
expectedError: fmt.Errorf("failed to find key value in %q", "[there=]"),
}, {
name: "hanging_escape",
in: "hello[there\\",
expectedError: fmt.Errorf("failed to find '=' in %q", "[there\\"),
}, {
name: "single_name_value",
in: "hello[there=where]",
fieldName: "hello",
keys: map[string]string{"there": "where"},
}, {
name: "single_value_with=",
in: "hello[there=whe=r=e]",
fieldName: "hello",
keys: map[string]string{"there": "whe=r=e"},
}, {
name: "single_value_with=_and_escaped_]",
in: `hello[there=whe=\]r=e]`,
fieldName: "hello",
keys: map[string]string{"there": `whe=]r=e`},
}, {
name: "single_value_with[",
in: "hello[there=w[[here]",
fieldName: "hello",
keys: map[string]string{"there": "w[[here"},
}, {
name: "value_single_open",
in: "hello[first=value][",
expectedError: fmt.Errorf("failed to find '=' in %q", "["),
}, {
name: "value_no_close",
in: "hello[there=where][somename",
expectedError: fmt.Errorf("failed to find '=' in %q", "[somename"),
}, {
name: "value_no_equals",
in: "hello[there=where][somename]",
expectedError: fmt.Errorf("failed to find '=' in %q", "[somename]"),
}, {
name: "no_left_side",
in: "hello[there=where][=somevalue]",
expectedError: fmt.Errorf("failed to find key name in %q", "[=somevalue]"),
}, {
name: "no_right_side",
in: "hello[there=where][somename=]",
expectedError: fmt.Errorf("failed to find key value in %q", "[somename=]"),
}, {
name: "two_name_values",
in: "hello[there=where][somename=somevalue]",
fieldName: "hello",
keys: map[string]string{"there": "where", "somename": "somevalue"},
}, {
name: "three_name_values",
in: "hello[there=where][somename=somevalue][anothername=value]",
fieldName: "hello",
keys: map[string]string{"there": "where", "somename": "somevalue",
"anothername": "value"},
}, {
name: "aserisk_value",
in: "hello[there=*][somename=somevalue][anothername=value]",
fieldName: "hello",
keys: map[string]string{"there": "*", "somename": "somevalue",
"anothername": "value"},
}}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
fieldName, keys, err := parseElement(tc.in)
if !test.DeepEqual(tc.expectedError, err) {
t.Fatalf("[%s] expected err %#v, got %#v", tc.name, tc.expectedError, err)
}
if !test.DeepEqual(tc.keys, keys) {
t.Fatalf("[%s] expected output %#v, got %#v", tc.name, tc.keys, keys)
}
if tc.fieldName != fieldName {
t.Fatalf("[%s] expected field name %s, got %s", tc.name, tc.fieldName, fieldName)
}
})
}
}
func BenchmarkPathElementToSigleElementName(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _, _ = parseElement("hello")
}
}
func BenchmarkPathElementTwoKeys(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _, _ = parseElement("hello[hello=world][bye=moon]")
}
}
func BenchmarkPathElementBadKeys(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _, _ = parseElement("hello[hello=world][byemoon]")
}
}
func BenchmarkPathElementMaxKeys(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _, _ = parseElement("hello[name=firstName][name=secondName][name=thirdName]" +
"[name=fourthName][name=fifthName][name=sixthName]")
}
}

26
vendor/github.com/aristanetworks/goarista/iptables.sh generated vendored Executable file
View File

@ -0,0 +1,26 @@
#!/bin/sh
# Copyright (c) 2016 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
DEFAULT_PORT=6042
set -e
if [ "$#" -lt 1 ]
then
echo "usage: $0 <host> [<gNMI port>]"
exit 1
fi
echo "WARNING: if you're not using EOS-INT, EOS-REV-0-1 or EOS 4.18 or earlier please use -allowed_ips on the server instead."
host=$1
port=$DEFAULT_PORT
if [ "$#" -gt 1 ]
then
port=$2
fi
iptables="bash sudo iptables -A INPUT -p tcp --dport $port -j ACCEPT"
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $host "$iptables"
echo "opened TCP port $port on $host"

View File

@ -0,0 +1,45 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package kafka
import (
"os"
"time"
"github.com/Shopify/sarama"
"github.com/aristanetworks/glog"
)
const (
outOfBrokersBackoff = 30 * time.Second
outOfBrokersRetries = 5
)
// NewClient returns a Kafka client
func NewClient(addresses []string) (sarama.Client, error) {
config := sarama.NewConfig()
hostname, err := os.Hostname()
if err != nil {
hostname = ""
}
config.ClientID = hostname
config.Producer.Compression = sarama.CompressionSnappy
config.Producer.Return.Successes = true
var client sarama.Client
retries := outOfBrokersRetries + 1
for retries > 0 {
client, err = sarama.NewClient(addresses, config)
retries--
if err == sarama.ErrOutOfBrokers {
glog.Errorf("Can't connect to the Kafka cluster at %s (%d retries left): %s",
addresses, retries, err)
time.Sleep(outOfBrokersBackoff)
} else {
break
}
}
return client, err
}

View File

@ -0,0 +1,94 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package kafka
import (
"expvar"
"fmt"
"sync/atomic"
"time"
"github.com/aristanetworks/goarista/monitor"
"github.com/Shopify/sarama"
"github.com/aristanetworks/glog"
"github.com/golang/protobuf/proto"
)
// MessageEncoder is an encoder interface
// which handles encoding proto.Message to sarama.ProducerMessage
type MessageEncoder interface {
Encode(proto.Message) ([]*sarama.ProducerMessage, error)
HandleSuccess(*sarama.ProducerMessage)
HandleError(*sarama.ProducerError)
}
// BaseEncoder implements MessageEncoder interface
// and mainly handle monitoring
type BaseEncoder struct {
// Used for monitoring
numSuccesses monitor.Uint
numFailures monitor.Uint
histogram *monitor.LatencyHistogram
}
// counter counts the number Sysdb clients we have, and is used to guarantee that we
// always have a unique name exported to expvar
var counter uint32
// NewBaseEncoder returns a new base MessageEncoder
func NewBaseEncoder(typ string) *BaseEncoder {
// Setup monitoring structures
histName := "kafkaProducerHistogram_" + typ
statsName := "messagesStats"
if id := atomic.AddUint32(&counter, 1); id > 1 {
histName = fmt.Sprintf("%s_%d", histName, id)
statsName = fmt.Sprintf("%s_%d", statsName, id)
}
hist := monitor.NewLatencyHistogram(histName, time.Microsecond, 32, 0.3, 1000, 0)
e := &BaseEncoder{
histogram: hist,
}
statsMap := expvar.NewMap(statsName)
statsMap.Set("successes", &e.numSuccesses)
statsMap.Set("failures", &e.numFailures)
return e
}
// Encode encodes the proto message to a sarama.ProducerMessage
func (e *BaseEncoder) Encode(message proto.Message) ([]*sarama.ProducerMessage,
error) {
// doesn't do anything, but keep it in order for BaseEncoder
// to implement MessageEncoder interface
return nil, nil
}
// HandleSuccess process the metadata of messages from kafka producer Successes channel
func (e *BaseEncoder) HandleSuccess(msg *sarama.ProducerMessage) {
// TODO: Fix this and provide an interface to get the metadata object
metadata, ok := msg.Metadata.(Metadata)
if !ok {
return
}
// TODO: Add a monotonic clock source when one becomes available
e.histogram.UpdateLatencyValues(time.Since(metadata.StartTime))
e.numSuccesses.Add(uint64(metadata.NumMessages))
}
// HandleError process the metadata of messages from kafka producer Errors channel
func (e *BaseEncoder) HandleError(msg *sarama.ProducerError) {
// TODO: Fix this and provide an interface to get the metadata object
metadata, ok := msg.Msg.Metadata.(Metadata)
if !ok {
return
}
// TODO: Add a monotonic clock source when one becomes available
e.histogram.UpdateLatencyValues(time.Since(metadata.StartTime))
glog.Errorf("Kafka Producer error: %s", msg.Error())
e.numFailures.Add(uint64(metadata.NumMessages))
}

Some files were not shown because too many files have changed in this diff Show More