diff --git a/Gopkg.lock b/Gopkg.lock
index 977e5eb8..55fb47d9 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -1,6 +1,17 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+[[projects]]
+ digest = "1:48a213e9dc4880bbbd6999309a476fa4d3cc67560aa7127154cf8ea95bd464c2"
+ name = "github.com/allegro/bigcache"
+ packages = [
+ ".",
+ "queue",
+ ]
+ pruneopts = ""
+ revision = "f31987a23e44c5121ef8c8b2f2ea2e8ffa37b068"
+ version = "v1.1.0"
+
[[projects]]
branch = "master"
digest = "1:a313376bcbcce8ae8bddb8089a7293e0473a0f8e9e3710d6244e09e81875ccf0"
@@ -26,7 +37,7 @@
version = "v1.7.1"
[[projects]]
- digest = "1:c205f1963071408c1fac73c1b37c86ef9b98d80f17e690a2239853cde255ad3d"
+ digest = "1:a9c8210eb5d36a9a6e66953dc3d3cabd3afbbfb4f50baab0db1af1b723254b82"
name = "github.com/ethereum/go-ethereum"
packages = [
".",
@@ -64,8 +75,8 @@
"trie",
]
pruneopts = ""
- revision = "58632d44021bf095b43a1bb2443e6e3690a94739"
- version = "v1.8.18"
+ revision = "24d727b6d6e2c0cde222fa12155c4a6db5caaf2e"
+ version = "v1.8.20"
[[projects]]
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
@@ -537,6 +548,7 @@
"github.com/ethereum/go-ethereum/crypto",
"github.com/ethereum/go-ethereum/ethclient",
"github.com/ethereum/go-ethereum/ethdb",
+ "github.com/ethereum/go-ethereum/log",
"github.com/ethereum/go-ethereum/p2p",
"github.com/ethereum/go-ethereum/p2p/discv5",
"github.com/ethereum/go-ethereum/params",
diff --git a/Gopkg.toml b/Gopkg.toml
index 1636b6c5..82a3ca70 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -42,7 +42,7 @@
name = "github.com/lib/pq"
[[constraint]]
- name = "gopkg.in/Sirupsen/logrus.v1"
+ name = "github.com/sirupsen/logrus"
version = "1.2.0"
[[constraint]]
@@ -51,4 +51,4 @@
[[constraint]]
name = "github.com/ethereum/go-ethereum"
- version = "1.8.18"
+ version = "1.8.20"
diff --git a/vendor/github.com/allegro/bigcache/.gitignore b/vendor/github.com/allegro/bigcache/.gitignore
new file mode 100644
index 00000000..372c42cb
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/.gitignore
@@ -0,0 +1,5 @@
+.idea
+.DS_Store
+/server/server.exe
+/server/server
+CHANGELOG.md
diff --git a/vendor/github.com/allegro/bigcache/.travis.yml b/vendor/github.com/allegro/bigcache/.travis.yml
new file mode 100644
index 00000000..a9d987ef
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/.travis.yml
@@ -0,0 +1,31 @@
+language: go
+
+go:
+ - 1.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+before_install:
+ - go get github.com/modocache/gover
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+ - go get golang.org/x/tools/cmd/goimports
+ - go get github.com/golang/lint/golint
+ - go get github.com/stretchr/testify/assert
+ - go get github.com/gordonklaus/ineffassign
+
+script:
+ - gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
+ - diff <(echo -n) <(gofmt -s -d .)
+ - golint ./... # This won't break the build, just show warnings
+ - ineffassign .
+ - go vet ./...
+ - go test -race -count=1 -coverprofile=queue.coverprofile ./queue
+ - go test -race -count=1 -coverprofile=server.coverprofile ./server
+ - go test -race -count=1 -coverprofile=main.coverprofile
+ - $HOME/gopath/bin/gover
+ - $HOME/gopath/bin/goveralls -coverprofile=gover.coverprofile -service travis-ci
diff --git a/vendor/github.com/allegro/bigcache/LICENSE b/vendor/github.com/allegro/bigcache/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/allegro/bigcache/README.md b/vendor/github.com/allegro/bigcache/README.md
new file mode 100644
index 00000000..cd462d36
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/README.md
@@ -0,0 +1,145 @@
+# BigCache [![Build Status](https://travis-ci.org/allegro/bigcache.svg?branch=master)](https://travis-ci.org/allegro/bigcache) [![Coverage Status](https://coveralls.io/repos/github/allegro/bigcache/badge.svg?branch=master)](https://coveralls.io/github/allegro/bigcache?branch=master) [![GoDoc](https://godoc.org/github.com/allegro/bigcache?status.svg)](https://godoc.org/github.com/allegro/bigcache) [![Go Report Card](https://goreportcard.com/badge/github.com/allegro/bigcache)](https://goreportcard.com/report/github.com/allegro/bigcache)
+
+Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance.
+BigCache keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
+therefore entries (de)serialization in front of the cache will be needed in most use cases.
+
+## Usage
+
+### Simple initialization
+
+```go
+import "github.com/allegro/bigcache"
+
+cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
+
+cache.Set("my-unique-key", []byte("value"))
+
+entry, _ := cache.Get("my-unique-key")
+fmt.Println(string(entry))
+```
+
+### Custom initialization
+
+When cache load can be predicted in advance then it is better to use custom initialization because additional memory
+allocation can be avoided in that way.
+
+```go
+import (
+ "log"
+
+ "github.com/allegro/bigcache"
+)
+
+config := bigcache.Config {
+ // number of shards (must be a power of 2)
+ Shards: 1024,
+ // time after which entry can be evicted
+ LifeWindow: 10 * time.Minute,
+ // rps * lifeWindow, used only in initial memory allocation
+ MaxEntriesInWindow: 1000 * 10 * 60,
+ // max entry size in bytes, used only in initial memory allocation
+ MaxEntrySize: 500,
+ // prints information about additional memory allocation
+ Verbose: true,
+ // cache will not allocate more memory than this limit, value in MB
+ // if value is reached then the oldest entries can be overridden for the new ones
+ // 0 value means no size limit
+ HardMaxCacheSize: 8192,
+ // callback fired when the oldest entry is removed because of its
+ // expiration time or no space left for the new entry. Default value is nil which
+ // means no callback and it prevents from unwrapping the oldest entry.
+ OnRemove: nil,
+ }
+
+cache, initErr := bigcache.NewBigCache(config)
+if initErr != nil {
+ log.Fatal(initErr)
+}
+
+cache.Set("my-unique-key", []byte("value"))
+
+if entry, err := cache.Get("my-unique-key"); err == nil {
+ fmt.Println(string(entry))
+}
+```
+
+## Benchmarks
+
+Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map.
+Benchmark tests were made using an i7-6700K with 32GB of RAM on Windows 10.
+
+### Writes and reads
+
+```bash
+cd caches_bench; go test -bench=. -benchtime=10s ./... -timeout 30m
+
+BenchmarkMapSet-8 2000000 716 ns/op 336 B/op 3 allocs/op
+BenchmarkConcurrentMapSet-8 1000000 1292 ns/op 347 B/op 8 allocs/op
+BenchmarkFreeCacheSet-8 3000000 501 ns/op 371 B/op 3 allocs/op
+BenchmarkBigCacheSet-8 3000000 482 ns/op 303 B/op 2 allocs/op
+BenchmarkMapGet-8 5000000 309 ns/op 24 B/op 1 allocs/op
+BenchmarkConcurrentMapGet-8 2000000 659 ns/op 24 B/op 2 allocs/op
+BenchmarkFreeCacheGet-8 3000000 541 ns/op 152 B/op 3 allocs/op
+BenchmarkBigCacheGet-8 3000000 420 ns/op 152 B/op 3 allocs/op
+BenchmarkBigCacheSetParallel-8 10000000 184 ns/op 313 B/op 3 allocs/op
+BenchmarkFreeCacheSetParallel-8 10000000 195 ns/op 357 B/op 4 allocs/op
+BenchmarkConcurrentMapSetParallel-8 5000000 242 ns/op 200 B/op 6 allocs/op
+BenchmarkBigCacheGetParallel-8 20000000 100 ns/op 152 B/op 4 allocs/op
+BenchmarkFreeCacheGetParallel-8 10000000 133 ns/op 152 B/op 4 allocs/op
+BenchmarkConcurrentMapGetParallel-8 10000000 202 ns/op 24 B/op 2 allocs/op
+```
+
+Writes and reads in bigcache are faster than in freecache.
+Writes to map are the slowest.
+
+### GC pause time
+
+```bash
+cd caches_bench; go run caches_gc_overhead_comparison.go
+
+Number of entries: 20000000
+GC pause for bigcache: 5.8658ms
+GC pause for freecache: 32.4341ms
+GC pause for map: 52.9661ms
+```
+
+Test shows how long are the GC pauses for caches filled with 20mln of entries.
+Bigcache and freecache have very similar GC pause time.
+It is clear that both reduce GC overhead in contrast to map
+which GC pause time took more than 10 seconds.
+
+## How it works
+
+BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)).
+This optimization states that if map without pointers in keys and values is used then GC will omit its content.
+Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries.
+
+Entries are kept in bytes array, to omit GC again.
+Bytes array size can grow to gigabytes without impact on performance
+because GC will only see single pointer to it.
+
+## Bigcache vs Freecache
+
+Both caches provide the same core features but they reduce GC overhead in different ways.
+Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on
+slices to reduce number of pointers.
+
+Results from benchmark tests are presented above.
+One of the advantage of bigcache over freecache is that you don’t need to know
+the size of the cache in advance, because when bigcache is full,
+it can allocate additional memory for new entries instead of
+overwriting existing ones as freecache does currently.
+However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config).
+
+## HTTP Server
+
+This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package.
+
+## More
+
+Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html)
+
+## License
+
+BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE))
diff --git a/vendor/github.com/allegro/bigcache/bigcache.go b/vendor/github.com/allegro/bigcache/bigcache.go
new file mode 100644
index 00000000..3a6f6bd6
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/bigcache.go
@@ -0,0 +1,155 @@
+package bigcache
+
+import (
+ "fmt"
+ "time"
+)
+
+const (
+ minimumEntriesInShard = 10 // Minimum number of entries in single shard
+)
+
+// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
+// It keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
+// therefore entries (de)serialization in front of the cache will be needed in most use cases.
+type BigCache struct {
+ shards []*cacheShard
+ lifeWindow uint64
+ clock clock
+ hash Hasher
+ config Config
+ shardMask uint64
+ maxShardSize uint32
+}
+
+// NewBigCache initialize new instance of BigCache
+func NewBigCache(config Config) (*BigCache, error) {
+ return newBigCache(config, &systemClock{})
+}
+
+func newBigCache(config Config, clock clock) (*BigCache, error) {
+
+ if !isPowerOfTwo(config.Shards) {
+ return nil, fmt.Errorf("Shards number must be power of two")
+ }
+
+ if config.Hasher == nil {
+ config.Hasher = newDefaultHasher()
+ }
+
+ cache := &BigCache{
+ shards: make([]*cacheShard, config.Shards),
+ lifeWindow: uint64(config.LifeWindow.Seconds()),
+ clock: clock,
+ hash: config.Hasher,
+ config: config,
+ shardMask: uint64(config.Shards - 1),
+ maxShardSize: uint32(config.maximumShardSize()),
+ }
+
+ var onRemove func(wrappedEntry []byte)
+ if config.OnRemove == nil {
+ onRemove = cache.notProvidedOnRemove
+ } else {
+ onRemove = cache.providedOnRemove
+ }
+
+ for i := 0; i < config.Shards; i++ {
+ cache.shards[i] = initNewShard(config, onRemove, clock)
+ }
+
+ if config.CleanWindow > 0 {
+ go func() {
+ for t := range time.Tick(config.CleanWindow) {
+ cache.cleanUp(uint64(t.Unix()))
+ }
+ }()
+ }
+
+ return cache, nil
+}
+
+// Get reads entry for the key.
+// It returns an EntryNotFoundError when
+// no entry exists for the given key.
+func (c *BigCache) Get(key string) ([]byte, error) {
+ hashedKey := c.hash.Sum64(key)
+ shard := c.getShard(hashedKey)
+ return shard.get(key, hashedKey)
+}
+
+// Set saves entry under the key
+func (c *BigCache) Set(key string, entry []byte) error {
+ hashedKey := c.hash.Sum64(key)
+ shard := c.getShard(hashedKey)
+ return shard.set(key, hashedKey, entry)
+}
+
+// Delete removes the key
+func (c *BigCache) Delete(key string) error {
+ hashedKey := c.hash.Sum64(key)
+ shard := c.getShard(hashedKey)
+ return shard.del(key, hashedKey)
+}
+
+// Reset empties all cache shards
+func (c *BigCache) Reset() error {
+ for _, shard := range c.shards {
+ shard.reset(c.config)
+ }
+ return nil
+}
+
+// Len computes number of entries in cache
+func (c *BigCache) Len() int {
+ var len int
+ for _, shard := range c.shards {
+ len += shard.len()
+ }
+ return len
+}
+
+// Stats returns cache's statistics
+func (c *BigCache) Stats() Stats {
+ var s Stats
+ for _, shard := range c.shards {
+ tmp := shard.getStats()
+ s.Hits += tmp.Hits
+ s.Misses += tmp.Misses
+ s.DelHits += tmp.DelHits
+ s.DelMisses += tmp.DelMisses
+ s.Collisions += tmp.Collisions
+ }
+ return s
+}
+
+// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
+func (c *BigCache) Iterator() *EntryInfoIterator {
+ return newIterator(c)
+}
+
+func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
+ oldestTimestamp := readTimestampFromEntry(oldestEntry)
+ if currentTimestamp-oldestTimestamp > c.lifeWindow {
+ evict()
+ return true
+ }
+ return false
+}
+
+func (c *BigCache) cleanUp(currentTimestamp uint64) {
+ for _, shard := range c.shards {
+ shard.cleanUp(currentTimestamp)
+ }
+}
+
+func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
+ return c.shards[hashedKey&c.shardMask]
+}
+
+func (c *BigCache) providedOnRemove(wrappedEntry []byte) {
+ c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
+}
+
+func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte) {
+}
diff --git a/vendor/github.com/allegro/bigcache/bigcache_bench_test.go b/vendor/github.com/allegro/bigcache/bigcache_bench_test.go
new file mode 100644
index 00000000..59d0061d
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/bigcache_bench_test.go
@@ -0,0 +1,141 @@
+package bigcache
+
+import (
+ "fmt"
+ "math/rand"
+ "strconv"
+ "testing"
+ "time"
+)
+
+var message = blob('a', 256)
+
+func BenchmarkWriteToCacheWith1Shard(b *testing.B) {
+ writeToCache(b, 1, 100*time.Second, b.N)
+}
+
+func BenchmarkWriteToLimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
+ m := blob('a', 1024)
+ cache, _ := NewBigCache(Config{
+ Shards: 1,
+ LifeWindow: 100 * time.Second,
+ MaxEntriesInWindow: 100,
+ MaxEntrySize: 256,
+ HardMaxCacheSize: 1,
+ })
+
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ cache.Set(fmt.Sprintf("key-%d", i), m)
+ }
+}
+
+func BenchmarkWriteToUnlimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
+ m := blob('a', 1024)
+ cache, _ := NewBigCache(Config{
+ Shards: 1,
+ LifeWindow: 100 * time.Second,
+ MaxEntriesInWindow: 100,
+ MaxEntrySize: 256,
+ })
+
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ cache.Set(fmt.Sprintf("key-%d", i), m)
+ }
+}
+
+func BenchmarkWriteToCache(b *testing.B) {
+ for _, shards := range []int{1, 512, 1024, 8192} {
+ b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
+ writeToCache(b, shards, 100*time.Second, b.N)
+ })
+ }
+}
+
+func BenchmarkReadFromCache(b *testing.B) {
+ for _, shards := range []int{1, 512, 1024, 8192} {
+ b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
+ readFromCache(b, 1024)
+ })
+ }
+}
+
+func BenchmarkIterateOverCache(b *testing.B) {
+
+ m := blob('a', 1)
+
+ for _, shards := range []int{512, 1024, 8192} {
+ b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
+ cache, _ := NewBigCache(Config{
+ Shards: shards,
+ LifeWindow: 1000 * time.Second,
+ MaxEntriesInWindow: max(b.N, 100),
+ MaxEntrySize: 500,
+ })
+
+ for i := 0; i < b.N; i++ {
+ cache.Set(fmt.Sprintf("key-%d", i), m)
+ }
+
+ b.ResetTimer()
+ it := cache.Iterator()
+
+ b.RunParallel(func(pb *testing.PB) {
+ b.ReportAllocs()
+
+ for pb.Next() {
+ if it.SetNext() {
+ it.Value()
+ }
+ }
+ })
+ })
+ }
+}
+
+func BenchmarkWriteToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) {
+ writeToCache(b, 1024, 100*time.Second, 100)
+}
+
+func writeToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) {
+ cache, _ := NewBigCache(Config{
+ Shards: shards,
+ LifeWindow: lifeWindow,
+ MaxEntriesInWindow: max(requestsInLifeWindow, 100),
+ MaxEntrySize: 500,
+ })
+ rand.Seed(time.Now().Unix())
+
+ b.RunParallel(func(pb *testing.PB) {
+ id := rand.Int()
+ counter := 0
+
+ b.ReportAllocs()
+ for pb.Next() {
+ cache.Set(fmt.Sprintf("key-%d-%d", id, counter), message)
+ counter = counter + 1
+ }
+ })
+}
+
+func readFromCache(b *testing.B, shards int) {
+ cache, _ := NewBigCache(Config{
+ Shards: shards,
+ LifeWindow: 1000 * time.Second,
+ MaxEntriesInWindow: max(b.N, 100),
+ MaxEntrySize: 500,
+ })
+ for i := 0; i < b.N; i++ {
+ cache.Set(strconv.Itoa(i), message)
+ }
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ b.ReportAllocs()
+
+ for pb.Next() {
+ cache.Get(strconv.Itoa(rand.Intn(b.N)))
+ }
+ })
+}
diff --git a/vendor/github.com/allegro/bigcache/bigcache_test.go b/vendor/github.com/allegro/bigcache/bigcache_test.go
new file mode 100644
index 00000000..a6a41460
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/bigcache_test.go
@@ -0,0 +1,579 @@
+package bigcache
+
+import (
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var sink []byte
+
+func TestParallel(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(DefaultConfig(5 * time.Second))
+ value := []byte("value")
+ var wg sync.WaitGroup
+ wg.Add(3)
+ keys := 1337
+
+ // when
+ go func() {
+ defer wg.Done()
+ for i := 0; i < keys; i++ {
+ cache.Set(fmt.Sprintf("key%d", i), value)
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ for i := 0; i < keys; i++ {
+ sink, _ = cache.Get(fmt.Sprintf("key%d", i))
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ for i := 0; i < keys; i++ {
+ cache.Delete(fmt.Sprintf("key%d", i))
+ }
+ }()
+
+ // then
+ wg.Wait()
+}
+
+func TestWriteAndGetOnCache(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(DefaultConfig(5 * time.Second))
+ value := []byte("value")
+
+ // when
+ cache.Set("key", value)
+ cachedValue, err := cache.Get("key")
+
+ // then
+ assert.NoError(t, err)
+ assert.Equal(t, value, cachedValue)
+}
+
+func TestConstructCacheWithDefaultHasher(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 16,
+ LifeWindow: 5 * time.Second,
+ MaxEntriesInWindow: 10,
+ MaxEntrySize: 256,
+ })
+
+ assert.IsType(t, fnv64a{}, cache.hash)
+}
+
+func TestWillReturnErrorOnInvalidNumberOfPartitions(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, error := NewBigCache(Config{
+ Shards: 18,
+ LifeWindow: 5 * time.Second,
+ MaxEntriesInWindow: 10,
+ MaxEntrySize: 256,
+ })
+
+ assert.Nil(t, cache)
+ assert.Error(t, error, "Shards number must be power of two")
+}
+
+func TestEntryNotFound(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 16,
+ LifeWindow: 5 * time.Second,
+ MaxEntriesInWindow: 10,
+ MaxEntrySize: 256,
+ })
+
+ // when
+ _, err := cache.Get("nonExistingKey")
+
+ // then
+ assert.EqualError(t, err, "Entry \"nonExistingKey\" not found")
+}
+
+func TestTimingEviction(t *testing.T) {
+ t.Parallel()
+
+ // given
+ clock := mockedClock{value: 0}
+ cache, _ := newBigCache(Config{
+ Shards: 1,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ }, &clock)
+
+ // when
+ cache.Set("key", []byte("value"))
+ clock.set(5)
+ cache.Set("key2", []byte("value2"))
+ _, err := cache.Get("key")
+
+ // then
+ assert.EqualError(t, err, "Entry \"key\" not found")
+}
+
+func TestTimingEvictionShouldEvictOnlyFromUpdatedShard(t *testing.T) {
+ t.Parallel()
+
+ // given
+ clock := mockedClock{value: 0}
+ cache, _ := newBigCache(Config{
+ Shards: 4,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ }, &clock)
+
+ // when
+ cache.Set("key", []byte("value"))
+ clock.set(5)
+ cache.Set("key2", []byte("value 2"))
+ value, err := cache.Get("key")
+
+ // then
+ assert.NoError(t, err, "Entry \"key\" not found")
+ assert.Equal(t, []byte("value"), value)
+}
+
+func TestCleanShouldEvictAll(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 4,
+ LifeWindow: time.Second,
+ CleanWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+
+ // when
+ cache.Set("key", []byte("value"))
+ <-time.After(3 * time.Second)
+ value, err := cache.Get("key")
+
+ // then
+ assert.EqualError(t, err, "Entry \"key\" not found")
+ assert.Equal(t, value, []byte(nil))
+}
+
+func TestOnRemoveCallback(t *testing.T) {
+ t.Parallel()
+
+ // given
+ clock := mockedClock{value: 0}
+ onRemoveInvoked := false
+ onRemove := func(key string, entry []byte) {
+ onRemoveInvoked = true
+ assert.Equal(t, "key", key)
+ assert.Equal(t, []byte("value"), entry)
+ }
+ cache, _ := newBigCache(Config{
+ Shards: 1,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ OnRemove: onRemove,
+ }, &clock)
+
+ // when
+ cache.Set("key", []byte("value"))
+ clock.set(5)
+ cache.Set("key2", []byte("value2"))
+
+ // then
+ assert.True(t, onRemoveInvoked)
+}
+
+func TestCacheLen(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 8,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+ keys := 1337
+
+ // when
+ for i := 0; i < keys; i++ {
+ cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
+ }
+
+ // then
+ assert.Equal(t, keys, cache.Len())
+}
+
+func TestCacheStats(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 8,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+
+ // when
+ for i := 0; i < 100; i++ {
+ cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
+ }
+
+ for i := 0; i < 10; i++ {
+ value, err := cache.Get(fmt.Sprintf("key%d", i))
+ assert.Nil(t, err)
+ assert.Equal(t, string(value), "value")
+ }
+ for i := 100; i < 110; i++ {
+ _, err := cache.Get(fmt.Sprintf("key%d", i))
+ assert.Error(t, err)
+ }
+ for i := 10; i < 20; i++ {
+ err := cache.Delete(fmt.Sprintf("key%d", i))
+ assert.Nil(t, err)
+ }
+ for i := 110; i < 120; i++ {
+ err := cache.Delete(fmt.Sprintf("key%d", i))
+ assert.Error(t, err)
+ }
+
+ // then
+ stats := cache.Stats()
+ assert.Equal(t, stats.Hits, int64(10))
+ assert.Equal(t, stats.Misses, int64(10))
+ assert.Equal(t, stats.DelHits, int64(10))
+ assert.Equal(t, stats.DelMisses, int64(10))
+}
+
+func TestCacheDel(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(DefaultConfig(time.Second))
+
+ // when
+ err := cache.Delete("nonExistingKey")
+
+ // then
+ assert.Equal(t, err.Error(), "Entry \"nonExistingKey\" not found")
+
+ // and when
+ cache.Set("existingKey", nil)
+ err = cache.Delete("existingKey")
+ cachedValue, _ := cache.Get("existingKey")
+
+ // then
+ assert.Nil(t, err)
+ assert.Len(t, cachedValue, 0)
+}
+
+func TestCacheReset(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 8,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+ keys := 1337
+
+ // when
+ for i := 0; i < keys; i++ {
+ cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
+ }
+
+ // then
+ assert.Equal(t, keys, cache.Len())
+
+ // and when
+ cache.Reset()
+
+ // then
+ assert.Equal(t, 0, cache.Len())
+
+ // and when
+ for i := 0; i < keys; i++ {
+ cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
+ }
+
+ // then
+ assert.Equal(t, keys, cache.Len())
+}
+
+func TestIterateOnResetCache(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 8,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+ keys := 1337
+
+ // when
+ for i := 0; i < keys; i++ {
+ cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
+ }
+ cache.Reset()
+
+ // then
+ iterator := cache.Iterator()
+
+ assert.Equal(t, false, iterator.SetNext())
+}
+
+func TestGetOnResetCache(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 8,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+ keys := 1337
+
+ // when
+ for i := 0; i < keys; i++ {
+ cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
+ }
+
+ cache.Reset()
+
+ // then
+ value, err := cache.Get("key1")
+
+ assert.Equal(t, err.Error(), "Entry \"key1\" not found")
+ assert.Equal(t, value, []byte(nil))
+}
+
+func TestEntryUpdate(t *testing.T) {
+ t.Parallel()
+
+ // given
+ clock := mockedClock{value: 0}
+ cache, _ := newBigCache(Config{
+ Shards: 1,
+ LifeWindow: 6 * time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ }, &clock)
+
+ // when
+ cache.Set("key", []byte("value"))
+ clock.set(5)
+ cache.Set("key", []byte("value2"))
+ clock.set(7)
+ cache.Set("key2", []byte("value3"))
+ cachedValue, _ := cache.Get("key")
+
+ // then
+ assert.Equal(t, []byte("value2"), cachedValue)
+}
+
+func TestOldestEntryDeletionWhenMaxCacheSizeIsReached(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 1,
+ LifeWindow: 5 * time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 1,
+ HardMaxCacheSize: 1,
+ })
+
+ // when
+ cache.Set("key1", blob('a', 1024*400))
+ cache.Set("key2", blob('b', 1024*400))
+ cache.Set("key3", blob('c', 1024*800))
+
+ _, key1Err := cache.Get("key1")
+ _, key2Err := cache.Get("key2")
+ entry3, _ := cache.Get("key3")
+
+ // then
+ assert.EqualError(t, key1Err, "Entry \"key1\" not found")
+ assert.EqualError(t, key2Err, "Entry \"key2\" not found")
+ assert.Equal(t, blob('c', 1024*800), entry3)
+}
+
+func TestRetrievingEntryShouldCopy(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 1,
+ LifeWindow: 5 * time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 1,
+ HardMaxCacheSize: 1,
+ })
+ cache.Set("key1", blob('a', 1024*400))
+ value, key1Err := cache.Get("key1")
+
+ // when
+ // override queue
+ cache.Set("key2", blob('b', 1024*400))
+ cache.Set("key3", blob('c', 1024*400))
+ cache.Set("key4", blob('d', 1024*400))
+ cache.Set("key5", blob('d', 1024*400))
+
+ // then
+ assert.Nil(t, key1Err)
+ assert.Equal(t, blob('a', 1024*400), value)
+}
+
+func TestEntryBiggerThanMaxShardSizeError(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 1,
+ LifeWindow: 5 * time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 1,
+ HardMaxCacheSize: 1,
+ })
+
+ // when
+ err := cache.Set("key1", blob('a', 1024*1025))
+
+ // then
+ assert.EqualError(t, err, "entry is bigger than max shard size")
+}
+
+func TestHashCollision(t *testing.T) {
+ t.Parallel()
+
+ ml := &mockedLogger{}
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 16,
+ LifeWindow: 5 * time.Second,
+ MaxEntriesInWindow: 10,
+ MaxEntrySize: 256,
+ Verbose: true,
+ Hasher: hashStub(5),
+ Logger: ml,
+ })
+
+ // when
+ cache.Set("liquid", []byte("value"))
+ cachedValue, err := cache.Get("liquid")
+
+ // then
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("value"), cachedValue)
+
+ // when
+ cache.Set("costarring", []byte("value 2"))
+ cachedValue, err = cache.Get("costarring")
+
+ // then
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("value 2"), cachedValue)
+
+ // when
+ cachedValue, err = cache.Get("liquid")
+
+ // then
+ assert.Error(t, err)
+ assert.Nil(t, cachedValue)
+
+ assert.NotEqual(t, "", ml.lastFormat)
+ assert.Equal(t, cache.Stats().Collisions, int64(1))
+}
+
+func TestNilValueCaching(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 1,
+ LifeWindow: 5 * time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 1,
+ HardMaxCacheSize: 1,
+ })
+
+ // when
+ cache.Set("Kierkegaard", []byte{})
+ cachedValue, err := cache.Get("Kierkegaard")
+
+ // then
+ assert.NoError(t, err)
+ assert.Equal(t, []byte{}, cachedValue)
+
+ // when
+ cache.Set("Sartre", nil)
+ cachedValue, err = cache.Get("Sartre")
+
+ // then
+ assert.NoError(t, err)
+ assert.Equal(t, []byte{}, cachedValue)
+
+ // when
+ cache.Set("Nietzsche", []byte(nil))
+ cachedValue, err = cache.Get("Nietzsche")
+
+ // then
+ assert.NoError(t, err)
+ assert.Equal(t, []byte{}, cachedValue)
+}
+
+type mockedLogger struct {
+ lastFormat string
+ lastArgs []interface{}
+}
+
+func (ml *mockedLogger) Printf(format string, v ...interface{}) {
+ ml.lastFormat = format
+ ml.lastArgs = v
+}
+
+type mockedClock struct {
+ value int64
+}
+
+func (mc *mockedClock) epoch() int64 {
+ return mc.value
+}
+
+func (mc *mockedClock) set(value int64) {
+ mc.value = value
+}
+
+func blob(char byte, len int) []byte {
+ b := make([]byte, len)
+ for index := range b {
+ b[index] = char
+ }
+ return b
+}
diff --git a/vendor/github.com/allegro/bigcache/caches_bench/caches_bench_test.go b/vendor/github.com/allegro/bigcache/caches_bench/caches_bench_test.go
new file mode 100644
index 00000000..d083f7f1
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/caches_bench/caches_bench_test.go
@@ -0,0 +1,219 @@
+package main
+
+import (
+ "fmt"
+ "math/rand"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/allegro/bigcache"
+ "github.com/coocood/freecache"
+)
+
+const maxEntrySize = 256
+
+func BenchmarkMapSet(b *testing.B) {
+ m := make(map[string][]byte)
+ for i := 0; i < b.N; i++ {
+ m[key(i)] = value()
+ }
+}
+
+func BenchmarkConcurrentMapSet(b *testing.B) {
+ var m sync.Map
+ for i := 0; i < b.N; i++ {
+ m.Store(key(i), value())
+ }
+}
+
+func BenchmarkFreeCacheSet(b *testing.B) {
+ cache := freecache.NewCache(b.N * maxEntrySize)
+ for i := 0; i < b.N; i++ {
+ cache.Set([]byte(key(i)), value(), 0)
+ }
+}
+
+func BenchmarkBigCacheSet(b *testing.B) {
+ cache := initBigCache(b.N)
+ for i := 0; i < b.N; i++ {
+ cache.Set(key(i), value())
+ }
+}
+
+func BenchmarkMapGet(b *testing.B) {
+ b.StopTimer()
+ m := make(map[string][]byte)
+ for i := 0; i < b.N; i++ {
+ m[key(i)] = value()
+ }
+
+ b.StartTimer()
+ hitCount := 0
+ for i := 0; i < b.N; i++ {
+ if m[key(i)] != nil {
+ hitCount++
+ }
+ }
+}
+
+func BenchmarkConcurrentMapGet(b *testing.B) {
+ b.StopTimer()
+ var m sync.Map
+ for i := 0; i < b.N; i++ {
+ m.Store(key(i), value())
+ }
+
+ b.StartTimer()
+ hitCounter := 0
+ for i := 0; i < b.N; i++ {
+ _, ok := m.Load(key(i))
+ if ok {
+ hitCounter++
+ }
+ }
+}
+
+func BenchmarkFreeCacheGet(b *testing.B) {
+ b.StopTimer()
+ cache := freecache.NewCache(b.N * maxEntrySize)
+ for i := 0; i < b.N; i++ {
+ cache.Set([]byte(key(i)), value(), 0)
+ }
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ cache.Get([]byte(key(i)))
+ }
+}
+
+func BenchmarkBigCacheGet(b *testing.B) {
+ b.StopTimer()
+ cache := initBigCache(b.N)
+ for i := 0; i < b.N; i++ {
+ cache.Set(key(i), value())
+ }
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ cache.Get(key(i))
+ }
+}
+
+func BenchmarkBigCacheSetParallel(b *testing.B) {
+ cache := initBigCache(b.N)
+ rand.Seed(time.Now().Unix())
+
+ b.RunParallel(func(pb *testing.PB) {
+ id := rand.Intn(1000)
+ counter := 0
+ for pb.Next() {
+ cache.Set(parallelKey(id, counter), value())
+ counter = counter + 1
+ }
+ })
+}
+
+func BenchmarkFreeCacheSetParallel(b *testing.B) {
+ cache := freecache.NewCache(b.N * maxEntrySize)
+ rand.Seed(time.Now().Unix())
+
+ b.RunParallel(func(pb *testing.PB) {
+ id := rand.Intn(1000)
+ counter := 0
+ for pb.Next() {
+ cache.Set([]byte(parallelKey(id, counter)), value(), 0)
+ counter = counter + 1
+ }
+ })
+}
+
+func BenchmarkConcurrentMapSetParallel(b *testing.B) {
+ var m sync.Map
+
+ b.RunParallel(func(pb *testing.PB) {
+ id := rand.Intn(1000)
+ for pb.Next() {
+ m.Store(key(id), value())
+ }
+ })
+}
+
+func BenchmarkBigCacheGetParallel(b *testing.B) {
+ b.StopTimer()
+ cache := initBigCache(b.N)
+ for i := 0; i < b.N; i++ {
+ cache.Set(key(i), value())
+ }
+
+ b.StartTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ counter := 0
+ for pb.Next() {
+ cache.Get(key(counter))
+ counter = counter + 1
+ }
+ })
+}
+
+func BenchmarkFreeCacheGetParallel(b *testing.B) {
+ b.StopTimer()
+ cache := freecache.NewCache(b.N * maxEntrySize)
+ for i := 0; i < b.N; i++ {
+ cache.Set([]byte(key(i)), value(), 0)
+ }
+
+ b.StartTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ counter := 0
+ for pb.Next() {
+ cache.Get([]byte(key(counter)))
+ counter = counter + 1
+ }
+ })
+}
+
+func BenchmarkConcurrentMapGetParallel(b *testing.B) {
+ b.StopTimer()
+ var m sync.Map
+ for i := 0; i < b.N; i++ {
+ m.Store(key(i), value())
+ }
+
+ b.StartTimer()
+ hitCount := 0
+
+ b.RunParallel(func(pb *testing.PB) {
+ id := rand.Intn(1000)
+ for pb.Next() {
+ _, ok := m.Load(key(id))
+ if ok {
+ hitCount++
+ }
+ }
+ })
+}
+
+func key(i int) string {
+ return fmt.Sprintf("key-%010d", i)
+}
+
+func value() []byte {
+ return make([]byte, 100)
+}
+
+func parallelKey(threadID int, counter int) string {
+ return fmt.Sprintf("key-%04d-%06d", threadID, counter)
+}
+
+func initBigCache(entriesInWindow int) *bigcache.BigCache {
+ cache, _ := bigcache.NewBigCache(bigcache.Config{
+ Shards: 256,
+ LifeWindow: 10 * time.Minute,
+ MaxEntriesInWindow: entriesInWindow,
+ MaxEntrySize: maxEntrySize,
+ Verbose: true,
+ })
+
+ return cache
+}
diff --git a/vendor/github.com/allegro/bigcache/caches_bench/caches_gc_overhead_comparison.go b/vendor/github.com/allegro/bigcache/caches_bench/caches_gc_overhead_comparison.go
new file mode 100644
index 00000000..7e212103
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/caches_bench/caches_gc_overhead_comparison.go
@@ -0,0 +1,96 @@
+package main
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+
+ "github.com/allegro/bigcache"
+ "github.com/coocood/freecache"
+)
+
+func gcPause() time.Duration {
+ runtime.GC()
+ var stats debug.GCStats
+ debug.ReadGCStats(&stats)
+ return stats.PauseTotal
+}
+
+const (
+ entries = 20000000
+ valueSize = 100
+)
+
+func main() {
+ debug.SetGCPercent(10)
+ fmt.Println("Number of entries: ", entries)
+
+ config := bigcache.Config{
+ Shards: 256,
+ LifeWindow: 100 * time.Minute,
+ MaxEntriesInWindow: entries,
+ MaxEntrySize: 200,
+ Verbose: true,
+ }
+
+ bigcache, _ := bigcache.NewBigCache(config)
+ for i := 0; i < entries; i++ {
+ key, val := generateKeyValue(i, valueSize)
+ bigcache.Set(key, val)
+ }
+
+ firstKey, _ := generateKeyValue(1, valueSize)
+ checkFirstElement(bigcache.Get(firstKey))
+
+ fmt.Println("GC pause for bigcache: ", gcPause())
+ bigcache = nil
+ gcPause()
+
+ //------------------------------------------
+
+ freeCache := freecache.NewCache(entries * 200) //allocate entries * 200 bytes
+ for i := 0; i < entries; i++ {
+ key, val := generateKeyValue(i, valueSize)
+ if err := freeCache.Set([]byte(key), val, 0); err != nil {
+ fmt.Println("Error in set: ", err.Error())
+ }
+ }
+
+ firstKey, _ = generateKeyValue(1, valueSize)
+ checkFirstElement(freeCache.Get([]byte(firstKey)))
+
+ if freeCache.OverwriteCount() != 0 {
+ fmt.Println("Overwritten: ", freeCache.OverwriteCount())
+ }
+ fmt.Println("GC pause for freecache: ", gcPause())
+ freeCache = nil
+ gcPause()
+
+ //------------------------------------------
+
+ mapCache := make(map[string][]byte)
+ for i := 0; i < entries; i++ {
+ key, val := generateKeyValue(i, valueSize)
+ mapCache[key] = val
+ }
+ fmt.Println("GC pause for map: ", gcPause())
+
+}
+
+func checkFirstElement(val []byte, err error) {
+ _, expectedVal := generateKeyValue(1, valueSize)
+ if err != nil {
+ fmt.Println("Error in get: ", err.Error())
+ } else if string(val) != string(expectedVal) {
+ fmt.Println("Wrong first element: ", string(val))
+ }
+}
+
+func generateKeyValue(index int, valSize int) (string, []byte) {
+ key := fmt.Sprintf("key-%010d", index)
+ fixedNumber := []byte(fmt.Sprintf("%010d", index))
+ val := append(make([]byte, valSize-10), fixedNumber...)
+
+ return key, val
+}
diff --git a/vendor/github.com/allegro/bigcache/clock.go b/vendor/github.com/allegro/bigcache/clock.go
new file mode 100644
index 00000000..f8b535e1
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/clock.go
@@ -0,0 +1,14 @@
+package bigcache
+
+import "time"
+
+type clock interface {
+ epoch() int64
+}
+
+type systemClock struct {
+}
+
+func (c systemClock) epoch() int64 {
+ return time.Now().Unix()
+}
diff --git a/vendor/github.com/allegro/bigcache/config.go b/vendor/github.com/allegro/bigcache/config.go
new file mode 100644
index 00000000..0a523947
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/config.go
@@ -0,0 +1,67 @@
+package bigcache
+
+import "time"
+
+// Config for BigCache
+type Config struct {
+ // Number of cache shards, value must be a power of two
+ Shards int
+ // Time after which entry can be evicted
+ LifeWindow time.Duration
+ // Interval between removing expired entries (clean up).
+ // If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution.
+ CleanWindow time.Duration
+ // Max number of entries in life window. Used only to calculate initial size for cache shards.
+ // When proper value is set then additional memory allocation does not occur.
+ MaxEntriesInWindow int
+ // Max size of entry in bytes. Used only to calculate initial size for cache shards.
+ MaxEntrySize int
+ // Verbose mode prints information about new memory allocation
+ Verbose bool
+ // Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used.
+ Hasher Hasher
+ // HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit.
+ // It can protect application from consuming all available memory on machine, therefore from running OOM Killer.
+ // Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then
+ // the oldest entries are overridden for the new ones.
+ HardMaxCacheSize int
+ // OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left
+ // for the new entry. Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
+ OnRemove func(key string, entry []byte)
+
+ // Logger is a logging interface and used in combination with `Verbose`
+ // Defaults to `DefaultLogger()`
+ Logger Logger
+}
+
+// DefaultConfig initializes config with default values.
+// When load for BigCache can be predicted in advance then it is better to use custom config.
+func DefaultConfig(eviction time.Duration) Config {
+ return Config{
+ Shards: 1024,
+ LifeWindow: eviction,
+ CleanWindow: 0,
+ MaxEntriesInWindow: 1000 * 10 * 60,
+ MaxEntrySize: 500,
+ Verbose: true,
+ Hasher: newDefaultHasher(),
+ HardMaxCacheSize: 0,
+ Logger: DefaultLogger(),
+ }
+}
+
+// initialShardSize computes initial shard size
+func (c Config) initialShardSize() int {
+ return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard)
+}
+
+// maximumShardSize computes maximum shard size
+func (c Config) maximumShardSize() int {
+ maxShardSize := 0
+
+ if c.HardMaxCacheSize > 0 {
+ maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards
+ }
+
+ return maxShardSize
+}
diff --git a/vendor/github.com/allegro/bigcache/encoding.go b/vendor/github.com/allegro/bigcache/encoding.go
new file mode 100644
index 00000000..5d90d71d
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/encoding.go
@@ -0,0 +1,70 @@
+package bigcache
+
+import (
+ "encoding/binary"
+ "reflect"
+ "unsafe"
+)
+
+const (
+ timestampSizeInBytes = 8 // Number of bytes used for timestamp
+ hashSizeInBytes = 8 // Number of bytes used for hash
+ keySizeInBytes = 2 // Number of bytes used for size of entry key
+ headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers
+)
+
+func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte {
+ keyLength := len(key)
+ blobLength := len(entry) + headersSizeInBytes + keyLength
+
+ if blobLength > len(*buffer) {
+ *buffer = make([]byte, blobLength)
+ }
+ blob := *buffer
+
+ binary.LittleEndian.PutUint64(blob, timestamp)
+ binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash)
+ binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength))
+ copy(blob[headersSizeInBytes:], key)
+ copy(blob[headersSizeInBytes+keyLength:], entry)
+
+ return blob[:blobLength]
+}
+
+func readEntry(data []byte) []byte {
+ length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
+
+ // copy on read
+ dst := make([]byte, len(data)-int(headersSizeInBytes+length))
+ copy(dst, data[headersSizeInBytes+length:])
+
+ return dst
+}
+
+func readTimestampFromEntry(data []byte) uint64 {
+ return binary.LittleEndian.Uint64(data)
+}
+
+func readKeyFromEntry(data []byte) string {
+ length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
+
+ // copy on read
+ dst := make([]byte, length)
+ copy(dst, data[headersSizeInBytes:headersSizeInBytes+length])
+
+ return bytesToString(dst)
+}
+
+func bytesToString(b []byte) string {
+ bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len}
+ return *(*string)(unsafe.Pointer(&strHeader))
+}
+
+func readHashFromEntry(data []byte) uint64 {
+ return binary.LittleEndian.Uint64(data[timestampSizeInBytes:])
+}
+
+func resetKeyFromEntry(data []byte) {
+ binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0)
+}
diff --git a/vendor/github.com/allegro/bigcache/encoding_test.go b/vendor/github.com/allegro/bigcache/encoding_test.go
new file mode 100644
index 00000000..ae83811d
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/encoding_test.go
@@ -0,0 +1,46 @@
+package bigcache
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEncodeDecode(t *testing.T) {
+ // given
+ now := uint64(time.Now().Unix())
+ hash := uint64(42)
+ key := "key"
+ data := []byte("data")
+ buffer := make([]byte, 100)
+
+ // when
+ wrapped := wrapEntry(now, hash, key, data, &buffer)
+
+ // then
+ assert.Equal(t, key, readKeyFromEntry(wrapped))
+ assert.Equal(t, hash, readHashFromEntry(wrapped))
+ assert.Equal(t, now, readTimestampFromEntry(wrapped))
+ assert.Equal(t, data, readEntry(wrapped))
+ assert.Equal(t, 100, len(buffer))
+}
+
+func TestAllocateBiggerBuffer(t *testing.T) {
+ //given
+ now := uint64(time.Now().Unix())
+ hash := uint64(42)
+ key := "1"
+ data := []byte("2")
+ buffer := make([]byte, 1)
+
+ // when
+ wrapped := wrapEntry(now, hash, key, data, &buffer)
+
+ // then
+ assert.Equal(t, key, readKeyFromEntry(wrapped))
+ assert.Equal(t, hash, readHashFromEntry(wrapped))
+ assert.Equal(t, now, readTimestampFromEntry(wrapped))
+ assert.Equal(t, data, readEntry(wrapped))
+ assert.Equal(t, 2+headersSizeInBytes, len(buffer))
+}
diff --git a/vendor/github.com/allegro/bigcache/entry_not_found_error.go b/vendor/github.com/allegro/bigcache/entry_not_found_error.go
new file mode 100644
index 00000000..e6955a57
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/entry_not_found_error.go
@@ -0,0 +1,17 @@
+package bigcache
+
+import "fmt"
+
+// EntryNotFoundError is an error type struct which is returned when entry was not found for provided key
+type EntryNotFoundError struct {
+ message string
+}
+
+func notFound(key string) error {
+ return &EntryNotFoundError{fmt.Sprintf("Entry %q not found", key)}
+}
+
+// Error returned when entry does not exist.
+func (e EntryNotFoundError) Error() string {
+ return e.message
+}
diff --git a/vendor/github.com/allegro/bigcache/fnv.go b/vendor/github.com/allegro/bigcache/fnv.go
new file mode 100644
index 00000000..188c9aa6
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/fnv.go
@@ -0,0 +1,28 @@
+package bigcache
+
+// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations.
+// Its Sum64 method will lay the value out in big-endian byte order.
+// See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function
+func newDefaultHasher() Hasher {
+ return fnv64a{}
+}
+
+type fnv64a struct{}
+
+const (
+ // offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
+ offset64 = 14695981039346656037
+ // prime64 FNVa prime value. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
+ prime64 = 1099511628211
+)
+
+// Sum64 gets the string and returns its uint64 hash value.
+func (f fnv64a) Sum64(key string) uint64 {
+ var hash uint64 = offset64
+ for i := 0; i < len(key); i++ {
+ hash ^= uint64(key[i])
+ hash *= prime64
+ }
+
+ return hash
+}
diff --git a/vendor/github.com/allegro/bigcache/fnv_bench_test.go b/vendor/github.com/allegro/bigcache/fnv_bench_test.go
new file mode 100644
index 00000000..327cf32f
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/fnv_bench_test.go
@@ -0,0 +1,18 @@
+package bigcache
+
+import "testing"
+
+var text = "abcdefg"
+
+func BenchmarkFnvHashSum64(b *testing.B) {
+ h := newDefaultHasher()
+ for i := 0; i < b.N; i++ {
+ h.Sum64(text)
+ }
+}
+
+func BenchmarkFnvHashStdLibSum64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ stdLibFnvSum64(text)
+ }
+}
diff --git a/vendor/github.com/allegro/bigcache/fnv_test.go b/vendor/github.com/allegro/bigcache/fnv_test.go
new file mode 100644
index 00000000..c94c0746
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/fnv_test.go
@@ -0,0 +1,35 @@
+package bigcache
+
+import (
+ "hash/fnv"
+ "testing"
+)
+
+type testCase struct {
+ text string
+ expectedHash uint64
+}
+
+var testCases = []testCase{
+ {"", stdLibFnvSum64("")},
+ {"a", stdLibFnvSum64("a")},
+ {"ab", stdLibFnvSum64("ab")},
+ {"abc", stdLibFnvSum64("abc")},
+ {"some longer and more complicated text", stdLibFnvSum64("some longer and more complicated text")},
+}
+
+func TestFnvHashSum64(t *testing.T) {
+ h := newDefaultHasher()
+ for _, testCase := range testCases {
+ hashed := h.Sum64(testCase.text)
+ if hashed != testCase.expectedHash {
+ t.Errorf("hash(%q) = %d want %d", testCase.text, hashed, testCase.expectedHash)
+ }
+ }
+}
+
+func stdLibFnvSum64(key string) uint64 {
+ h := fnv.New64a()
+ h.Write([]byte(key))
+ return h.Sum64()
+}
diff --git a/vendor/github.com/allegro/bigcache/hash.go b/vendor/github.com/allegro/bigcache/hash.go
new file mode 100644
index 00000000..5f8ade77
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/hash.go
@@ -0,0 +1,8 @@
+package bigcache
+
+// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions
+// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e.
+// you can use FarmHash family).
+type Hasher interface {
+ Sum64(string) uint64
+}
diff --git a/vendor/github.com/allegro/bigcache/hash_test.go b/vendor/github.com/allegro/bigcache/hash_test.go
new file mode 100644
index 00000000..2252e7fb
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/hash_test.go
@@ -0,0 +1,7 @@
+package bigcache
+
+type hashStub uint64
+
+func (stub hashStub) Sum64(_ string) uint64 {
+ return uint64(stub)
+}
diff --git a/vendor/github.com/allegro/bigcache/iterator.go b/vendor/github.com/allegro/bigcache/iterator.go
new file mode 100644
index 00000000..70b98d90
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/iterator.go
@@ -0,0 +1,122 @@
+package bigcache
+
+import "sync"
+
+type iteratorError string
+
+func (e iteratorError) Error() string {
+ return string(e)
+}
+
+// ErrInvalidIteratorState is reported when iterator is in invalid state
+const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position")
+
+// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying
+const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache")
+
+var emptyEntryInfo = EntryInfo{}
+
+// EntryInfo holds informations about entry in the cache
+type EntryInfo struct {
+ timestamp uint64
+ hash uint64
+ key string
+ value []byte
+}
+
+// Key returns entry's underlying key
+func (e EntryInfo) Key() string {
+ return e.key
+}
+
+// Hash returns entry's hash value
+func (e EntryInfo) Hash() uint64 {
+ return e.hash
+}
+
+// Timestamp returns entry's timestamp (time of insertion)
+func (e EntryInfo) Timestamp() uint64 {
+ return e.timestamp
+}
+
+// Value returns entry's underlying value
+func (e EntryInfo) Value() []byte {
+ return e.value
+}
+
+// EntryInfoIterator allows to iterate over entries in the cache
+type EntryInfoIterator struct {
+ mutex sync.Mutex
+ cache *BigCache
+ currentShard int
+ currentIndex int
+ elements []uint32
+ elementsCount int
+ valid bool
+}
+
+// SetNext moves to next element and returns true if it exists.
+func (it *EntryInfoIterator) SetNext() bool {
+ it.mutex.Lock()
+
+ it.valid = false
+ it.currentIndex++
+
+ if it.elementsCount > it.currentIndex {
+ it.valid = true
+ it.mutex.Unlock()
+ return true
+ }
+
+ for i := it.currentShard + 1; i < it.cache.config.Shards; i++ {
+ it.elements, it.elementsCount = it.cache.shards[i].copyKeys()
+
+ // Non empty shard - stick with it
+ if it.elementsCount > 0 {
+ it.currentIndex = 0
+ it.currentShard = i
+ it.valid = true
+ it.mutex.Unlock()
+ return true
+ }
+ }
+ it.mutex.Unlock()
+ return false
+}
+
+func newIterator(cache *BigCache) *EntryInfoIterator {
+ elements, count := cache.shards[0].copyKeys()
+
+ return &EntryInfoIterator{
+ cache: cache,
+ currentShard: 0,
+ currentIndex: -1,
+ elements: elements,
+ elementsCount: count,
+ }
+}
+
+// Value returns current value from the iterator
+func (it *EntryInfoIterator) Value() (EntryInfo, error) {
+ it.mutex.Lock()
+
+ if !it.valid {
+ it.mutex.Unlock()
+ return emptyEntryInfo, ErrInvalidIteratorState
+ }
+
+ entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex]))
+
+ if err != nil {
+ it.mutex.Unlock()
+ return emptyEntryInfo, ErrCannotRetrieveEntry
+ }
+ it.mutex.Unlock()
+
+ return EntryInfo{
+ timestamp: readTimestampFromEntry(entry),
+ hash: readHashFromEntry(entry),
+ key: readKeyFromEntry(entry),
+ value: readEntry(entry),
+ }, nil
+}
diff --git a/vendor/github.com/allegro/bigcache/iterator_test.go b/vendor/github.com/allegro/bigcache/iterator_test.go
new file mode 100644
index 00000000..ec8bf137
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/iterator_test.go
@@ -0,0 +1,150 @@
+package bigcache
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEntriesIterator(t *testing.T) {
+ t.Parallel()
+
+ // given
+ keysCount := 1000
+ cache, _ := NewBigCache(Config{
+ Shards: 8,
+ LifeWindow: 6 * time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+ value := []byte("value")
+
+ for i := 0; i < keysCount; i++ {
+ cache.Set(fmt.Sprintf("key%d", i), value)
+ }
+
+ // when
+ keys := make(map[string]struct{})
+ iterator := cache.Iterator()
+
+ for iterator.SetNext() {
+ current, err := iterator.Value()
+
+ if err == nil {
+ keys[current.Key()] = struct{}{}
+ }
+ }
+
+ // then
+ assert.Equal(t, keysCount, len(keys))
+}
+
+func TestEntriesIteratorWithMostShardsEmpty(t *testing.T) {
+ t.Parallel()
+
+ // given
+ clock := mockedClock{value: 0}
+ cache, _ := newBigCache(Config{
+ Shards: 8,
+ LifeWindow: 6 * time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ }, &clock)
+
+ cache.Set("key", []byte("value"))
+
+ // when
+ iterator := cache.Iterator()
+
+ // then
+ if !iterator.SetNext() {
+ t.Errorf("Iterator should contain at least single element")
+ }
+
+ current, err := iterator.Value()
+
+ // then
+ assert.Nil(t, err)
+ assert.Equal(t, "key", current.Key())
+ assert.Equal(t, uint64(0x3dc94a19365b10ec), current.Hash())
+ assert.Equal(t, []byte("value"), current.Value())
+ assert.Equal(t, uint64(0), current.Timestamp())
+}
+
+func TestEntriesIteratorWithConcurrentUpdate(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 1,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+
+ cache.Set("key", []byte("value"))
+
+ // when
+ iterator := cache.Iterator()
+
+ // then
+ if !iterator.SetNext() {
+ t.Errorf("Iterator should contain at least single element")
+ }
+
+ // Quite ugly but works
+ for i := 0; i < cache.config.Shards; i++ {
+ if oldestEntry, err := cache.shards[i].getOldestEntry(); err == nil {
+ cache.onEvict(oldestEntry, 10, cache.shards[i].removeOldestEntry)
+ }
+ }
+
+ current, err := iterator.Value()
+
+ // then
+ assert.Equal(t, ErrCannotRetrieveEntry, err)
+ assert.Equal(t, "Could not retrieve entry from cache", err.Error())
+ assert.Equal(t, EntryInfo{}, current)
+}
+
+func TestEntriesIteratorWithAllShardsEmpty(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 1,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+
+ // when
+ iterator := cache.Iterator()
+
+ // then
+ if iterator.SetNext() {
+ t.Errorf("Iterator should not contain any elements")
+ }
+}
+
+func TestEntriesIteratorInInvalidState(t *testing.T) {
+ t.Parallel()
+
+ // given
+ cache, _ := NewBigCache(Config{
+ Shards: 1,
+ LifeWindow: time.Second,
+ MaxEntriesInWindow: 1,
+ MaxEntrySize: 256,
+ })
+
+ // when
+ iterator := cache.Iterator()
+
+ // then
+ _, err := iterator.Value()
+ assert.Equal(t, ErrInvalidIteratorState, err)
+ assert.Equal(t, "Iterator is in invalid state. Use SetNext() to move to next position", err.Error())
+}
diff --git a/vendor/github.com/allegro/bigcache/logger.go b/vendor/github.com/allegro/bigcache/logger.go
new file mode 100644
index 00000000..50e84abc
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/logger.go
@@ -0,0 +1,30 @@
+package bigcache
+
+import (
+ "log"
+ "os"
+)
+
+// Logger is invoked when `Config.Verbose=true`
+type Logger interface {
+ Printf(format string, v ...interface{})
+}
+
+// this is a safeguard, breaking on compile time in case
+// `log.Logger` does not adhere to our `Logger` interface.
+// see https://golang.org/doc/faq#guarantee_satisfies_interface
+var _ Logger = &log.Logger{}
+
+// DefaultLogger returns a `Logger` implementation
+// backed by stdlib's log
+func DefaultLogger() *log.Logger {
+ return log.New(os.Stdout, "", log.LstdFlags)
+}
+
+func newLogger(custom Logger) Logger {
+ if custom != nil {
+ return custom
+ }
+
+ return DefaultLogger()
+}
diff --git a/vendor/github.com/allegro/bigcache/queue/bytes_queue.go b/vendor/github.com/allegro/bigcache/queue/bytes_queue.go
new file mode 100644
index 00000000..0285c72c
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/queue/bytes_queue.go
@@ -0,0 +1,210 @@
+package queue
+
+import (
+ "encoding/binary"
+ "log"
+ "time"
+)
+
+const (
+ // Number of bytes used to keep information about entry size
+ headerEntrySize = 4
+ // Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index
+ leftMarginIndex = 1
+ // Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation.
+ // It keeps entries indexes unchanged
+ minimumEmptyBlobSize = 32 + headerEntrySize
+)
+
+// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
+// For every push operation index of entry is returned. It can be used to read the entry later
+type BytesQueue struct {
+ array []byte
+ capacity int
+ maxCapacity int
+ head int
+ tail int
+ count int
+ rightMargin int
+ headerBuffer []byte
+ verbose bool
+ initialCapacity int
+}
+
+type queueError struct {
+ message string
+}
+
+// NewBytesQueue initialize new bytes queue.
+// Initial capacity is used in bytes array allocation
+// When verbose flag is set then information about memory allocation are printed
+func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue {
+ return &BytesQueue{
+ array: make([]byte, initialCapacity),
+ capacity: initialCapacity,
+ maxCapacity: maxCapacity,
+ headerBuffer: make([]byte, headerEntrySize),
+ tail: leftMarginIndex,
+ head: leftMarginIndex,
+ rightMargin: leftMarginIndex,
+ verbose: verbose,
+ initialCapacity: initialCapacity,
+ }
+}
+
+// Reset removes all entries from queue
+func (q *BytesQueue) Reset() {
+ // Just reset indexes
+ q.tail = leftMarginIndex
+ q.head = leftMarginIndex
+ q.rightMargin = leftMarginIndex
+ q.count = 0
+}
+
+// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
+// Returns index for pushed data or error if maximum size queue limit is reached.
+func (q *BytesQueue) Push(data []byte) (int, error) {
+ dataLen := len(data)
+
+ if q.availableSpaceAfterTail() < dataLen+headerEntrySize {
+ if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize {
+ q.tail = leftMarginIndex
+ } else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 {
+ return -1, &queueError{"Full queue. Maximum size limit reached."}
+ } else {
+ q.allocateAdditionalMemory(dataLen + headerEntrySize)
+ }
+ }
+
+ index := q.tail
+
+ q.push(data, dataLen)
+
+ return index, nil
+}
+
+func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
+ start := time.Now()
+ if q.capacity < minimum {
+ q.capacity += minimum
+ }
+ q.capacity = q.capacity * 2
+ if q.capacity > q.maxCapacity && q.maxCapacity > 0 {
+ q.capacity = q.maxCapacity
+ }
+
+ oldArray := q.array
+ q.array = make([]byte, q.capacity)
+
+ if leftMarginIndex != q.rightMargin {
+ copy(q.array, oldArray[:q.rightMargin])
+
+ if q.tail < q.head {
+ emptyBlobLen := q.head - q.tail - headerEntrySize
+ q.push(make([]byte, emptyBlobLen), emptyBlobLen)
+ q.head = leftMarginIndex
+ q.tail = q.rightMargin
+ }
+ }
+
+ if q.verbose {
+ log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity)
+ }
+}
+
+func (q *BytesQueue) push(data []byte, len int) {
+ binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len))
+ q.copy(q.headerBuffer, headerEntrySize)
+
+ q.copy(data, len)
+
+ if q.tail > q.head {
+ q.rightMargin = q.tail
+ }
+
+ q.count++
+}
+
+func (q *BytesQueue) copy(data []byte, len int) {
+ q.tail += copy(q.array[q.tail:], data[:len])
+}
+
+// Pop reads the oldest entry from queue and moves head pointer to the next one
+func (q *BytesQueue) Pop() ([]byte, error) {
+ data, size, err := q.peek(q.head)
+ if err != nil {
+ return nil, err
+ }
+
+ q.head += headerEntrySize + size
+ q.count--
+
+ if q.head == q.rightMargin {
+ q.head = leftMarginIndex
+ if q.tail == q.rightMargin {
+ q.tail = leftMarginIndex
+ }
+ q.rightMargin = q.tail
+ }
+
+ return data, nil
+}
+
+// Peek reads the oldest entry from list without moving head pointer
+func (q *BytesQueue) Peek() ([]byte, error) {
+ data, _, err := q.peek(q.head)
+ return data, err
+}
+
+// Get reads entry from index
+func (q *BytesQueue) Get(index int) ([]byte, error) {
+ data, _, err := q.peek(index)
+ return data, err
+}
+
+// Capacity returns number of allocated bytes for queue
+func (q *BytesQueue) Capacity() int {
+ return q.capacity
+}
+
+// Len returns number of entries kept in queue
+func (q *BytesQueue) Len() int {
+ return q.count
+}
+
+// Error returns error message
+func (e *queueError) Error() string {
+ return e.message
+}
+
+func (q *BytesQueue) peek(index int) ([]byte, int, error) {
+
+ if q.count == 0 {
+ return nil, 0, &queueError{"Empty queue"}
+ }
+
+ if index <= 0 {
+ return nil, 0, &queueError{"Index must be grater than zero. Invalid index."}
+ }
+
+ if index+headerEntrySize >= len(q.array) {
+ return nil, 0, &queueError{"Index out of range"}
+ }
+
+ blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))
+ return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil
+}
+
+func (q *BytesQueue) availableSpaceAfterTail() int {
+ if q.tail >= q.head {
+ return q.capacity - q.tail
+ }
+ return q.head - q.tail - minimumEmptyBlobSize
+}
+
+func (q *BytesQueue) availableSpaceBeforeHead() int {
+ if q.tail >= q.head {
+ return q.head - leftMarginIndex - minimumEmptyBlobSize
+ }
+ return q.head - q.tail - minimumEmptyBlobSize
+}
diff --git a/vendor/github.com/allegro/bigcache/queue/bytes_queue_test.go b/vendor/github.com/allegro/bigcache/queue/bytes_queue_test.go
new file mode 100644
index 00000000..f4342b4e
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/queue/bytes_queue_test.go
@@ -0,0 +1,365 @@
+package queue
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPushAndPop(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(10, 0, true)
+ entry := []byte("hello")
+
+ // when
+ _, err := queue.Pop()
+
+ // then
+ assert.EqualError(t, err, "Empty queue")
+
+ // when
+ queue.Push(entry)
+
+ // then
+ assert.Equal(t, entry, pop(queue))
+}
+
+func TestLen(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(100, 0, false)
+ entry := []byte("hello")
+ assert.Zero(t, queue.Len())
+
+ // when
+ queue.Push(entry)
+
+ // then
+ assert.Equal(t, queue.Len(), 1)
+}
+
+func TestPeek(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(100, 0, false)
+ entry := []byte("hello")
+
+ // when
+ read, err := queue.Peek()
+
+ // then
+ assert.EqualError(t, err, "Empty queue")
+ assert.Nil(t, read)
+
+ // when
+ queue.Push(entry)
+ read, err = queue.Peek()
+
+ // then
+ assert.NoError(t, err)
+ assert.Equal(t, pop(queue), read)
+ assert.Equal(t, entry, read)
+}
+
+func TestReset(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(100, 0, false)
+ entry := []byte("hello")
+
+ // when
+ queue.Push(entry)
+ queue.Push(entry)
+ queue.Push(entry)
+
+ queue.Reset()
+ read, err := queue.Peek()
+
+ // then
+ assert.EqualError(t, err, "Empty queue")
+ assert.Nil(t, read)
+
+ // when
+ queue.Push(entry)
+ read, err = queue.Peek()
+
+ // then
+ assert.NoError(t, err)
+ assert.Equal(t, pop(queue), read)
+ assert.Equal(t, entry, read)
+
+ // when
+ read, err = queue.Peek()
+
+ // then
+ assert.EqualError(t, err, "Empty queue")
+ assert.Nil(t, read)
+}
+
+func TestReuseAvailableSpace(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(100, 0, false)
+
+ // when
+ queue.Push(blob('a', 70))
+ queue.Push(blob('b', 20))
+ queue.Pop()
+ queue.Push(blob('c', 20))
+
+ // then
+ assert.Equal(t, 100, queue.Capacity())
+ assert.Equal(t, blob('b', 20), pop(queue))
+}
+
+func TestAllocateAdditionalSpace(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(11, 0, false)
+
+ // when
+ queue.Push([]byte("hello1"))
+ queue.Push([]byte("hello2"))
+
+ // then
+ assert.Equal(t, 22, queue.Capacity())
+}
+
+func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereHeadIsBeforeTail(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(25, 0, false)
+
+ // when
+ queue.Push(blob('a', 3)) // header + entry + left margin = 8 bytes
+ queue.Push(blob('b', 6)) // additional 10 bytes
+ queue.Pop() // space freed, 7 bytes available at the beginning
+ queue.Push(blob('c', 6)) // 10 bytes needed, 14 available but not in one segment, allocate additional memory
+
+ // then
+ assert.Equal(t, 50, queue.Capacity())
+ assert.Equal(t, blob('b', 6), pop(queue))
+ assert.Equal(t, blob('c', 6), pop(queue))
+}
+
+func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereHeadIsBeforeTail(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(25, 0, false)
+
+ // when
+ queue.Push(blob('a', 3)) // header + entry + left margin = 8 bytes
+ index, _ := queue.Push(blob('b', 6)) // additional 10 bytes
+ queue.Pop() // space freed, 7 bytes available at the beginning
+ newestIndex, _ := queue.Push(blob('c', 6)) // 10 bytes needed, 14 available but not in one segment, allocate additional memory
+
+ // then
+ assert.Equal(t, 50, queue.Capacity())
+ assert.Equal(t, blob('b', 6), get(queue, index))
+ assert.Equal(t, blob('c', 6), get(queue, newestIndex))
+}
+
+func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereTailIsBeforeHead(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(100, 0, false)
+
+ // when
+ queue.Push(blob('a', 70)) // header + entry + left margin = 75 bytes
+ queue.Push(blob('b', 10)) // 75 + 10 + 4 = 89 bytes
+ queue.Pop() // space freed at the beginning
+ queue.Push(blob('c', 30)) // 34 bytes used at the beginning, tail pointer is before head pointer
+ queue.Push(blob('d', 40)) // 44 bytes needed but no available in one segment, allocate new memory
+
+ // then
+ assert.Equal(t, 200, queue.Capacity())
+ assert.Equal(t, blob('c', 30), pop(queue))
+ // empty blob fills space between tail and head,
+ // created when additional memory was allocated,
+ // it keeps current entries indexes unchanged
+ assert.Equal(t, blob(0, 36), pop(queue))
+ assert.Equal(t, blob('b', 10), pop(queue))
+ assert.Equal(t, blob('d', 40), pop(queue))
+}
+
+func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereTailIsBeforeHead(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(100, 0, false)
+
+ // when
+ queue.Push(blob('a', 70)) // header + entry + left margin = 75 bytes
+ index, _ := queue.Push(blob('b', 10)) // 75 + 10 + 4 = 89 bytes
+ queue.Pop() // space freed at the beginning
+ queue.Push(blob('c', 30)) // 34 bytes used at the beginning, tail pointer is before head pointer
+ newestIndex, _ := queue.Push(blob('d', 40)) // 44 bytes needed but no available in one segment, allocate new memory
+
+ // then
+ assert.Equal(t, 200, queue.Capacity())
+ assert.Equal(t, blob('b', 10), get(queue, index))
+ assert.Equal(t, blob('d', 40), get(queue, newestIndex))
+}
+
+func TestAllocateAdditionalSpaceForValueBiggerThanInitQueue(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(11, 0, false)
+
+ // when
+ queue.Push(blob('a', 100))
+
+ // then
+ assert.Equal(t, blob('a', 100), pop(queue))
+ assert.Equal(t, 230, queue.Capacity())
+}
+
+func TestAllocateAdditionalSpaceForValueBiggerThanQueue(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(21, 0, false)
+
+ // when
+ queue.Push(make([]byte, 2))
+ queue.Push(make([]byte, 2))
+ queue.Push(make([]byte, 100))
+
+ // then
+ queue.Pop()
+ queue.Pop()
+ assert.Equal(t, make([]byte, 100), pop(queue))
+ assert.Equal(t, 250, queue.Capacity())
+}
+
+func TestPopWholeQueue(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(13, 0, false)
+
+ // when
+ queue.Push([]byte("a"))
+ queue.Push([]byte("b"))
+ queue.Pop()
+ queue.Pop()
+ queue.Push([]byte("c"))
+
+ // then
+ assert.Equal(t, 13, queue.Capacity())
+ assert.Equal(t, []byte("c"), pop(queue))
+}
+
+func TestGetEntryFromIndex(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(20, 0, false)
+
+ // when
+ queue.Push([]byte("a"))
+ index, _ := queue.Push([]byte("b"))
+ queue.Push([]byte("c"))
+ result, _ := queue.Get(index)
+
+ // then
+ assert.Equal(t, []byte("b"), result)
+}
+
+func TestGetEntryFromInvalidIndex(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(1, 0, false)
+ queue.Push([]byte("a"))
+
+ // when
+ result, err := queue.Get(0)
+
+ // then
+ assert.Nil(t, result)
+ assert.EqualError(t, err, "Index must be grater than zero. Invalid index.")
+}
+
+func TestGetEntryFromIndexOutOfRange(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(1, 0, false)
+ queue.Push([]byte("a"))
+
+ // when
+ result, err := queue.Get(42)
+
+ // then
+ assert.Nil(t, result)
+ assert.EqualError(t, err, "Index out of range")
+}
+
+func TestGetEntryFromEmptyQueue(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(13, 0, false)
+
+ // when
+ result, err := queue.Get(1)
+
+ // then
+ assert.Nil(t, result)
+ assert.EqualError(t, err, "Empty queue")
+}
+
+func TestMaxSizeLimit(t *testing.T) {
+ t.Parallel()
+
+ // given
+ queue := NewBytesQueue(30, 50, false)
+
+ // when
+ queue.Push(blob('a', 25))
+ queue.Push(blob('b', 5))
+ capacity := queue.Capacity()
+ _, err := queue.Push(blob('c', 15))
+
+ // then
+ assert.Equal(t, 50, capacity)
+ assert.EqualError(t, err, "Full queue. Maximum size limit reached.")
+ assert.Equal(t, blob('a', 25), pop(queue))
+ assert.Equal(t, blob('b', 5), pop(queue))
+}
+
+func pop(queue *BytesQueue) []byte {
+ entry, err := queue.Pop()
+ if err != nil {
+ panic(err)
+ }
+ return entry
+}
+
+func get(queue *BytesQueue, index int) []byte {
+ entry, err := queue.Get(index)
+ if err != nil {
+ panic(err)
+ }
+ return entry
+}
+
+func blob(char byte, len int) []byte {
+ b := make([]byte, len)
+ for index := range b {
+ b[index] = char
+ }
+ return b
+}
diff --git a/vendor/github.com/allegro/bigcache/server/README.md b/vendor/github.com/allegro/bigcache/server/README.md
new file mode 100644
index 00000000..894235f3
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/server/README.md
@@ -0,0 +1,105 @@
+# BigCache HTTP Server
+
+This is a basic HTTP server implementation for BigCache. It has a basic RESTful API and is designed for easy operational deployments. This server is intended to be consumed as a standalone executable, for things like Cloud Foundry, Heroku, etc. A design goal is versatility, so if you want to cache pictures, software artifacts, text, or any type of bit, the BigCache HTTP Server should fit your needs.
+
+```bash
+# cache API.
+GET /api/v1/cache/{key}
+PUT /api/v1/cache/{key}
+DELETE /api/v1/cache/{key}
+
+# stats API.
+GET /api/v1/stats
+```
+
+The cache API is designed for ease-of-use caching and accepts any content type. The stats API will return hit and miss statistics about the cache since the last time the server was started - they will reset whenever the server is restarted.
+
+### Notes for Operators
+
+1. No SSL support, currently.
+1. No authentication, currently.
+1. Stats from the stats API are not persistent.
+1. The easiest way to clean the cache is to restart the process; it takes less than a second to initialise.
+1. There is no replication or clustering.
+
+### Command-line Interface
+
+```powershell
+PS C:\go\src\github.com\mxplusb\bigcache\server> .\server.exe -h
+Usage of C:\go\src\github.com\mxplusb\bigcache\server\server.exe:
+ -lifetime duration
+ Lifetime of each cache object. (default 10m0s)
+ -logfile string
+ Location of the logfile.
+ -max int
+ Maximum amount of data in the cache in MB. (default 8192)
+ -maxInWindow int
+ Used only in initial memory allocation. (default 600000)
+ -maxShardEntrySize int
+ The maximum size of each object stored in a shard. Used only in initial memory allocation. (default 500)
+ -port int
+ The port to listen on. (default 9090)
+ -shards int
+ Number of shards for the cache. (default 1024)
+ -v Verbose logging.
+ -version
+ Print server version.
+```
+
+Example:
+
+```bash
+$ curl -v -XPUT localhost:9090/api/v1/cache/example -d "yay!"
+* Trying 127.0.0.1...
+* Connected to localhost (127.0.0.1) port 9090 (#0)
+> PUT /api/v1/cache/example HTTP/1.1
+> Host: localhost:9090
+> User-Agent: curl/7.47.0
+> Accept: */*
+> Content-Length: 4
+> Content-Type: application/x-www-form-urlencoded
+>
+* upload completely sent off: 4 out of 4 bytes
+< HTTP/1.1 201 Created
+< Date: Fri, 17 Nov 2017 03:50:07 GMT
+< Content-Length: 0
+< Content-Type: text/plain; charset=utf-8
+<
+* Connection #0 to host localhost left intact
+$
+$ curl -v -XGET localhost:9090/api/v1/cache/example
+Note: Unnecessary use of -X or --request, GET is already inferred.
+* Trying 127.0.0.1...
+* Connected to localhost (127.0.0.1) port 9090 (#0)
+> GET /api/v1/cache/example HTTP/1.1
+> Host: localhost:9090
+> User-Agent: curl/7.47.0
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Date: Fri, 17 Nov 2017 03:50:23 GMT
+< Content-Length: 4
+< Content-Type: text/plain; charset=utf-8
+<
+* Connection #0 to host localhost left intact
+yay!
+```
+
+The server does log basic metrics:
+
+```bash
+$ ./server
+2017/11/16 22:49:22 cache initialised.
+2017/11/16 22:49:22 starting server on :9090
+2017/11/16 22:50:07 stored "example" in cache.
+2017/11/16 22:50:07 request took 277000ns.
+2017/11/16 22:50:23 request took 9000ns.
+```
+
+### Acquiring Natively
+
+This is native Go with no external dependencies, so it will compile for all supported Golang platforms. To build:
+
+```bash
+go build server.go
+```
diff --git a/vendor/github.com/allegro/bigcache/server/cache_handlers.go b/vendor/github.com/allegro/bigcache/server/cache_handlers.go
new file mode 100644
index 00000000..48e41130
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/server/cache_handlers.go
@@ -0,0 +1,87 @@
+package main
+
+import (
+ "io/ioutil"
+ "log"
+ "net/http"
+ "strings"
+)
+
+func cacheIndexHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case http.MethodGet:
+ getCacheHandler(w, r)
+ case http.MethodPut:
+ putCacheHandler(w, r)
+ case http.MethodDelete:
+ deleteCacheHandler(w, r)
+ }
+ })
+}
+
+// handles get requests.
+func getCacheHandler(w http.ResponseWriter, r *http.Request) {
+ target := r.URL.Path[len(cachePath):]
+ if target == "" {
+ w.WriteHeader(http.StatusBadRequest)
+ w.Write([]byte("can't get a key if there is no key."))
+ log.Print("empty request.")
+ return
+ }
+ entry, err := cache.Get(target)
+ if err != nil {
+ errMsg := (err).Error()
+ if strings.Contains(errMsg, "not found") {
+ log.Print(err)
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+ log.Print(err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ w.Write(entry)
+}
+
+func putCacheHandler(w http.ResponseWriter, r *http.Request) {
+ target := r.URL.Path[len(cachePath):]
+ if target == "" {
+ w.WriteHeader(http.StatusBadRequest)
+ w.Write([]byte("can't put a key if there is no key."))
+ log.Print("empty request.")
+ return
+ }
+
+ entry, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ log.Print(err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ if err := cache.Set(target, []byte(entry)); err != nil {
+ log.Print(err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ log.Printf("stored \"%s\" in cache.", target)
+ w.WriteHeader(http.StatusCreated)
+}
+
+// delete cache objects.
+func deleteCacheHandler(w http.ResponseWriter, r *http.Request) {
+ target := r.URL.Path[len(cachePath):]
+ if err := cache.Delete(target); err != nil {
+ if strings.Contains((err).Error(), "not found") {
+ w.WriteHeader(http.StatusNotFound)
+ log.Printf("%s not found.", target)
+ return
+ }
+ w.WriteHeader(http.StatusInternalServerError)
+ log.Printf("internal cache error: %s", err)
+ }
+ // this is what the RFC says to use when calling DELETE.
+ w.WriteHeader(http.StatusOK)
+ return
+}
diff --git a/vendor/github.com/allegro/bigcache/server/middleware.go b/vendor/github.com/allegro/bigcache/server/middleware.go
new file mode 100644
index 00000000..a4c673f3
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/server/middleware.go
@@ -0,0 +1,29 @@
+package main
+
+import (
+ "log"
+ "net/http"
+ "time"
+)
+
+// our base middleware implementation.
+type service func(http.Handler) http.Handler
+
+// chain load middleware services.
+func serviceLoader(h http.Handler, svcs ...service) http.Handler {
+ for _, svc := range svcs {
+ h = svc(h)
+ }
+ return h
+}
+
+// middleware for request length metrics.
+func requestMetrics(l *log.Logger) service {
+ return func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
+ h.ServeHTTP(w, r)
+ l.Printf("%s request to %s took %vns.", r.Method, r.URL.Path, time.Now().Sub(start).Nanoseconds())
+ })
+ }
+}
diff --git a/vendor/github.com/allegro/bigcache/server/server.go b/vendor/github.com/allegro/bigcache/server/server.go
new file mode 100644
index 00000000..35843437
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/server/server.go
@@ -0,0 +1,85 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "strconv"
+
+ "github.com/allegro/bigcache"
+)
+
+const (
+ // base HTTP paths.
+ apiVersion = "v1"
+ apiBasePath = "/api/" + apiVersion + "/"
+
+ // path to cache.
+ cachePath = apiBasePath + "cache/"
+ statsPath = apiBasePath + "stats"
+
+ // server version.
+ version = "1.0.0"
+)
+
+var (
+ port int
+ logfile string
+ ver bool
+
+ // cache-specific settings.
+ cache *bigcache.BigCache
+ config = bigcache.Config{}
+)
+
+func init() {
+ flag.BoolVar(&config.Verbose, "v", false, "Verbose logging.")
+ flag.IntVar(&config.Shards, "shards", 1024, "Number of shards for the cache.")
+ flag.IntVar(&config.MaxEntriesInWindow, "maxInWindow", 1000*10*60, "Used only in initial memory allocation.")
+ flag.DurationVar(&config.LifeWindow, "lifetime", 100000*100000*60, "Lifetime of each cache object.")
+ flag.IntVar(&config.HardMaxCacheSize, "max", 8192, "Maximum amount of data in the cache in MB.")
+ flag.IntVar(&config.MaxEntrySize, "maxShardEntrySize", 500, "The maximum size of each object stored in a shard. Used only in initial memory allocation.")
+ flag.IntVar(&port, "port", 9090, "The port to listen on.")
+ flag.StringVar(&logfile, "logfile", "", "Location of the logfile.")
+ flag.BoolVar(&ver, "version", false, "Print server version.")
+}
+
+func main() {
+ flag.Parse()
+
+ if ver {
+ fmt.Printf("BigCache HTTP Server v%s", version)
+ os.Exit(0)
+ }
+
+ var logger *log.Logger
+
+ if logfile == "" {
+ logger = log.New(os.Stdout, "", log.LstdFlags)
+ } else {
+ f, err := os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, 0600)
+ if err != nil {
+ panic(err)
+ }
+ logger = log.New(f, "", log.LstdFlags)
+ }
+
+ var err error
+ cache, err = bigcache.NewBigCache(config)
+ if err != nil {
+ logger.Fatal(err)
+ }
+
+ logger.Print("cache initialised.")
+
+ // let the middleware log.
+ http.Handle(cachePath, serviceLoader(cacheIndexHandler(), requestMetrics(logger)))
+ http.Handle(statsPath, serviceLoader(statsIndexHandler(), requestMetrics(logger)))
+
+ logger.Printf("starting server on :%d", port)
+
+ strPort := ":" + strconv.Itoa(port)
+ log.Fatal("ListenAndServe: ", http.ListenAndServe(strPort, nil))
+}
diff --git a/vendor/github.com/allegro/bigcache/server/server_test.go b/vendor/github.com/allegro/bigcache/server/server_test.go
new file mode 100644
index 00000000..328a7f1b
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/server/server_test.go
@@ -0,0 +1,185 @@
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/allegro/bigcache"
+)
+
+const (
+ testBaseString = "http://bigcache.org"
+)
+
+func testCacheSetup() {
+ cache, _ = bigcache.NewBigCache(bigcache.Config{
+ Shards: 1024,
+ LifeWindow: 10 * time.Minute,
+ MaxEntriesInWindow: 1000 * 10 * 60,
+ MaxEntrySize: 500,
+ Verbose: true,
+ HardMaxCacheSize: 8192,
+ OnRemove: nil,
+ })
+}
+
+func TestMain(m *testing.M) {
+ testCacheSetup()
+ m.Run()
+}
+
+func TestGetWithNoKey(t *testing.T) {
+ t.Parallel()
+ req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/", nil)
+ rr := httptest.NewRecorder()
+
+ getCacheHandler(rr, req)
+ resp := rr.Result()
+
+ if resp.StatusCode != 400 {
+ t.Errorf("want: 400; got: %d", resp.StatusCode)
+ }
+}
+
+func TestGetWithMissingKey(t *testing.T) {
+ t.Parallel()
+ req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/doesNotExist", nil)
+ rr := httptest.NewRecorder()
+
+ getCacheHandler(rr, req)
+ resp := rr.Result()
+
+ if resp.StatusCode != 404 {
+ t.Errorf("want: 404; got: %d", resp.StatusCode)
+ }
+}
+
+func TestGetKey(t *testing.T) {
+ t.Parallel()
+ req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/getKey", nil)
+ rr := httptest.NewRecorder()
+
+ // set something.
+ cache.Set("getKey", []byte("123"))
+
+ getCacheHandler(rr, req)
+ resp := rr.Result()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Errorf("cannot deserialise test response: %s", err)
+ }
+
+ if string(body) != "123" {
+ t.Errorf("want: 123; got: %s.\n\tcan't get existing key getKey.", string(body))
+ }
+}
+
+func TestPutKey(t *testing.T) {
+ t.Parallel()
+ req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", bytes.NewBuffer([]byte("123")))
+ rr := httptest.NewRecorder()
+
+ putCacheHandler(rr, req)
+
+ testPutKeyResult, err := cache.Get("putKey")
+ if err != nil {
+ t.Errorf("error returning cache entry: %s", err)
+ }
+
+ if string(testPutKeyResult) != "123" {
+ t.Errorf("want: 123; got: %s.\n\tcan't get PUT key putKey.", string(testPutKeyResult))
+ }
+}
+
+func TestPutEmptyKey(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123")))
+ rr := httptest.NewRecorder()
+
+ putCacheHandler(rr, req)
+ resp := rr.Result()
+
+ if resp.StatusCode != 400 {
+ t.Errorf("want: 400; got: %d.\n\tempty key insertion should return with 400", resp.StatusCode)
+ }
+}
+
+func TestDeleteEmptyKey(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123")))
+ rr := httptest.NewRecorder()
+
+ deleteCacheHandler(rr, req)
+ resp := rr.Result()
+
+ if resp.StatusCode != 404 {
+ t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete empty keys.", resp.StatusCode)
+ }
+}
+
+func TestDeleteInvalidKey(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/invalidDeleteKey", bytes.NewBuffer([]byte("123")))
+ rr := httptest.NewRecorder()
+
+ deleteCacheHandler(rr, req)
+ resp := rr.Result()
+
+ if resp.StatusCode != 404 {
+ t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete invalid keys.", resp.StatusCode)
+ }
+}
+
+func TestDeleteKey(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/testDeleteKey", bytes.NewBuffer([]byte("123")))
+ rr := httptest.NewRecorder()
+
+ if err := cache.Set("testDeleteKey", []byte("123")); err != nil {
+ t.Errorf("can't set key for testing. %s", err)
+ }
+
+ deleteCacheHandler(rr, req)
+ resp := rr.Result()
+
+ if resp.StatusCode != 200 {
+ t.Errorf("want: 200; got: %d.\n\tcan't delete keys.", resp.StatusCode)
+ }
+}
+
+func TestGetStats(t *testing.T) {
+ t.Parallel()
+ var testStats bigcache.Stats
+
+ req := httptest.NewRequest("GET", testBaseString+"/api/v1/stats", nil)
+ rr := httptest.NewRecorder()
+
+ // manually enter a key so there are some stats. get it so there's at least 1 hit.
+ if err := cache.Set("incrementStats", []byte("123")); err != nil {
+ t.Errorf("error setting cache value. error %s", err)
+ }
+ // it's okay if this fails, since we'll catch it downstream.
+ if _, err := cache.Get("incrementStats"); err != nil {
+ t.Errorf("can't find incrementStats. error: %s", err)
+ }
+
+ getCacheStatsHandler(rr, req)
+ resp := rr.Result()
+
+ if err := json.NewDecoder(resp.Body).Decode(&testStats); err != nil {
+ t.Errorf("error decoding cache stats. error: %s", err)
+ }
+
+ if testStats.Hits == 0 {
+ t.Errorf("want: > 0; got: 0.\n\thandler not properly returning stats info.")
+ }
+}
diff --git a/vendor/github.com/allegro/bigcache/server/stats_handler.go b/vendor/github.com/allegro/bigcache/server/stats_handler.go
new file mode 100644
index 00000000..4584b96a
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/server/stats_handler.go
@@ -0,0 +1,33 @@
+package main
+
+import (
+ "encoding/json"
+ "log"
+ "net/http"
+)
+
+// index for stats handle
+func statsIndexHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case http.MethodGet:
+ getCacheStatsHandler(w, r)
+ default:
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ }
+ })
+}
+
+// returns the cache's statistics.
+func getCacheStatsHandler(w http.ResponseWriter, r *http.Request) {
+ target, err := json.Marshal(cache.Stats())
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ log.Printf("cannot marshal cache stats. error: %s", err)
+ return
+ }
+ // since we're sending a struct, make it easy for consumers to interface.
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ w.Write(target)
+ return
+}
diff --git a/vendor/github.com/allegro/bigcache/shard.go b/vendor/github.com/allegro/bigcache/shard.go
new file mode 100644
index 00000000..af48ebc3
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/shard.go
@@ -0,0 +1,229 @@
+package bigcache
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "github.com/allegro/bigcache/queue"
+)
+
+type cacheShard struct {
+ hashmap map[uint64]uint32
+ entries queue.BytesQueue
+ lock sync.RWMutex
+ entryBuffer []byte
+ onRemove func(wrappedEntry []byte)
+
+ isVerbose bool
+ logger Logger
+ clock clock
+ lifeWindow uint64
+
+ stats Stats
+}
+
+type onRemoveCallback func(wrappedEntry []byte)
+
+func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
+ s.lock.RLock()
+ itemIndex := s.hashmap[hashedKey]
+
+ if itemIndex == 0 {
+ s.lock.RUnlock()
+ s.miss()
+ return nil, notFound(key)
+ }
+
+ wrappedEntry, err := s.entries.Get(int(itemIndex))
+ if err != nil {
+ s.lock.RUnlock()
+ s.miss()
+ return nil, err
+ }
+ if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
+ if s.isVerbose {
+ s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
+ }
+ s.lock.RUnlock()
+ s.collision()
+ return nil, notFound(key)
+ }
+ s.lock.RUnlock()
+ s.hit()
+ return readEntry(wrappedEntry), nil
+}
+
+func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
+ currentTimestamp := uint64(s.clock.epoch())
+
+ s.lock.Lock()
+
+ if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
+ if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
+ resetKeyFromEntry(previousEntry)
+ }
+ }
+
+ if oldestEntry, err := s.entries.Peek(); err == nil {
+ s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
+ }
+
+ w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
+
+ for {
+ if index, err := s.entries.Push(w); err == nil {
+ s.hashmap[hashedKey] = uint32(index)
+ s.lock.Unlock()
+ return nil
+ }
+ if s.removeOldestEntry() != nil {
+ s.lock.Unlock()
+ return fmt.Errorf("entry is bigger than max shard size")
+ }
+ }
+}
+
+func (s *cacheShard) del(key string, hashedKey uint64) error {
+ s.lock.RLock()
+ itemIndex := s.hashmap[hashedKey]
+
+ if itemIndex == 0 {
+ s.lock.RUnlock()
+ s.delmiss()
+ return notFound(key)
+ }
+
+ wrappedEntry, err := s.entries.Get(int(itemIndex))
+ if err != nil {
+ s.lock.RUnlock()
+ s.delmiss()
+ return err
+ }
+ s.lock.RUnlock()
+
+ s.lock.Lock()
+ {
+ delete(s.hashmap, hashedKey)
+ s.onRemove(wrappedEntry)
+ resetKeyFromEntry(wrappedEntry)
+ }
+ s.lock.Unlock()
+
+ s.delhit()
+ return nil
+}
+
+func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
+ oldestTimestamp := readTimestampFromEntry(oldestEntry)
+ if currentTimestamp-oldestTimestamp > s.lifeWindow {
+ evict()
+ return true
+ }
+ return false
+}
+
+func (s *cacheShard) cleanUp(currentTimestamp uint64) {
+ s.lock.Lock()
+ for {
+ if oldestEntry, err := s.entries.Peek(); err != nil {
+ break
+ } else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted {
+ break
+ }
+ }
+ s.lock.Unlock()
+}
+
+func (s *cacheShard) getOldestEntry() ([]byte, error) {
+ return s.entries.Peek()
+}
+
+func (s *cacheShard) getEntry(index int) ([]byte, error) {
+ return s.entries.Get(index)
+}
+
+func (s *cacheShard) copyKeys() (keys []uint32, next int) {
+ keys = make([]uint32, len(s.hashmap))
+
+ s.lock.RLock()
+
+ for _, index := range s.hashmap {
+ keys[next] = index
+ next++
+ }
+
+ s.lock.RUnlock()
+ return keys, next
+}
+
+func (s *cacheShard) removeOldestEntry() error {
+ oldest, err := s.entries.Pop()
+ if err == nil {
+ hash := readHashFromEntry(oldest)
+ delete(s.hashmap, hash)
+ s.onRemove(oldest)
+ return nil
+ }
+ return err
+}
+
+func (s *cacheShard) reset(config Config) {
+ s.lock.Lock()
+ s.hashmap = make(map[uint64]uint32, config.initialShardSize())
+ s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
+ s.entries.Reset()
+ s.lock.Unlock()
+}
+
+func (s *cacheShard) len() int {
+ s.lock.RLock()
+ res := len(s.hashmap)
+ s.lock.RUnlock()
+ return res
+}
+
+func (s *cacheShard) getStats() Stats {
+ var stats = Stats{
+ Hits: atomic.LoadInt64(&s.stats.Hits),
+ Misses: atomic.LoadInt64(&s.stats.Misses),
+ DelHits: atomic.LoadInt64(&s.stats.DelHits),
+ DelMisses: atomic.LoadInt64(&s.stats.DelMisses),
+ Collisions: atomic.LoadInt64(&s.stats.Collisions),
+ }
+ return stats
+}
+
+func (s *cacheShard) hit() {
+ atomic.AddInt64(&s.stats.Hits, 1)
+}
+
+func (s *cacheShard) miss() {
+ atomic.AddInt64(&s.stats.Misses, 1)
+}
+
+func (s *cacheShard) delhit() {
+ atomic.AddInt64(&s.stats.DelHits, 1)
+}
+
+func (s *cacheShard) delmiss() {
+ atomic.AddInt64(&s.stats.DelMisses, 1)
+}
+
+func (s *cacheShard) collision() {
+ atomic.AddInt64(&s.stats.Collisions, 1)
+}
+
+func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
+ return &cacheShard{
+ hashmap: make(map[uint64]uint32, config.initialShardSize()),
+ entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose),
+ entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
+ onRemove: callback,
+
+ isVerbose: config.Verbose,
+ logger: newLogger(config.Logger),
+ clock: clock,
+ lifeWindow: uint64(config.LifeWindow.Seconds()),
+ }
+}
diff --git a/vendor/github.com/allegro/bigcache/stats.go b/vendor/github.com/allegro/bigcache/stats.go
new file mode 100644
index 00000000..07157132
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/stats.go
@@ -0,0 +1,15 @@
+package bigcache
+
+// Stats stores cache statistics
+type Stats struct {
+ // Hits is a number of successfully found keys
+ Hits int64 `json:"hits"`
+ // Misses is a number of not found keys
+ Misses int64 `json:"misses"`
+ // DelHits is a number of successfully deleted keys
+ DelHits int64 `json:"delete_hits"`
+ // DelMisses is a number of not deleted keys
+ DelMisses int64 `json:"delete_misses"`
+ // Collisions is a number of happened key-collisions
+ Collisions int64 `json:"collisions"`
+}
diff --git a/vendor/github.com/allegro/bigcache/utils.go b/vendor/github.com/allegro/bigcache/utils.go
new file mode 100644
index 00000000..ca1df79b
--- /dev/null
+++ b/vendor/github.com/allegro/bigcache/utils.go
@@ -0,0 +1,16 @@
+package bigcache
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func convertMBToBytes(value int) int {
+ return value * 1024 * 1024
+}
+
+func isPowerOfTwo(number int) bool {
+ return (number & (number - 1)) == 0
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/.travis.yml b/vendor/github.com/ethereum/go-ethereum/.travis.yml
index c1cc7c4a..33a4f894 100644
--- a/vendor/github.com/ethereum/go-ethereum/.travis.yml
+++ b/vendor/github.com/ethereum/go-ethereum/.travis.yml
@@ -29,6 +29,14 @@ matrix:
- os: osx
go: 1.11.x
script:
+ - echo "Increase the maximum number of open file descriptors on macOS"
+ - NOFILE=20480
+ - sudo sysctl -w kern.maxfiles=$NOFILE
+ - sudo sysctl -w kern.maxfilesperproc=$NOFILE
+ - sudo launchctl limit maxfiles $NOFILE $NOFILE
+ - sudo launchctl limit maxfiles
+ - ulimit -S -n $NOFILE
+ - ulimit -n
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
diff --git a/vendor/github.com/ethereum/go-ethereum/README.md b/vendor/github.com/ethereum/go-ethereum/README.md
index f308fb10..7593dd09 100644
--- a/vendor/github.com/ethereum/go-ethereum/README.md
+++ b/vendor/github.com/ethereum/go-ethereum/README.md
@@ -18,7 +18,7 @@ For prerequisites and detailed build instructions please read the
[Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum)
on the wiki.
-Building geth requires both a Go (version 1.7 or later) and a C compiler.
+Building geth requires both a Go (version 1.9 or later) and a C compiler.
You can install them using your favourite package manager.
Once the dependencies are installed, run
@@ -168,7 +168,7 @@ HTTP based JSON-RPC API options:
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
-via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification)
+via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](https://www.jsonrpc.org/specification)
on all transports. You can reuse the same connection for multiple requests!
**Note: Please understand the security implications of opening up an HTTP/WS based transport before
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go
index 93b513c3..f544c80d 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go
@@ -243,11 +243,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// input offset is the bytes offset for packed output
inputOffset := 0
for _, abiArg := range abiArgs {
- if abiArg.Type.T == ArrayTy {
- inputOffset += 32 * abiArg.Type.Size
- } else {
- inputOffset += 32
- }
+ inputOffset += getDynamicTypeOffset(abiArg.Type)
}
var ret []byte
for i, a := range args {
@@ -257,14 +253,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
if err != nil {
return nil, err
}
- // check for a slice type (string, bytes, slice)
- if input.Type.requiresLengthPrefix() {
- // calculate the offset
- offset := inputOffset + len(variableInput)
+ // check for dynamic types
+ if isDynamicType(input.Type) {
// set the offset
- ret = append(ret, packNum(reflect.ValueOf(offset))...)
- // Append the packed output to the variable input. The variable input
- // will be appended at the end of the input.
+ ret = append(ret, packNum(reflect.ValueOf(inputOffset))...)
+ // calculate next offset
+ inputOffset += len(packed)
+ // append to variable input
variableInput = append(variableInput, packed...)
} else {
// append the packed value to the input
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/pack_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/pack_test.go
index 58a5b7a5..ddd2b736 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/pack_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/pack_test.go
@@ -324,6 +324,66 @@ func TestPack(t *testing.T) {
"foobar",
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
},
+ {
+ "string[]",
+ []string{"hello", "foobar"},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
+ "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
+ "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
+ "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
+ },
+ {
+ "string[2]",
+ []string{"hello", "foobar"},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
+ "0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
+ "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
+ "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
+ "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
+ },
+ {
+ "bytes32[][]",
+ [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
+ "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
+ },
+
+ {
+ "bytes32[][2]",
+ [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
+ "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
+ },
+
+ {
+ "bytes32[3][2]",
+ [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
+ common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
+ },
} {
typ, err := NewType(test.typ)
if err != nil {
@@ -336,7 +396,7 @@ func TestPack(t *testing.T) {
}
if !bytes.Equal(output, test.output) {
- t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
+ t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output)
}
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go
index dce89d2b..6bfaabf5 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go
@@ -183,23 +183,39 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
return nil, err
}
- if t.T == SliceTy || t.T == ArrayTy {
- var packed []byte
+ switch t.T {
+ case SliceTy, ArrayTy:
+ var ret []byte
+ if t.requiresLengthPrefix() {
+ // append length
+ ret = append(ret, packNum(reflect.ValueOf(v.Len()))...)
+ }
+
+ // calculate offset if any
+ offset := 0
+ offsetReq := isDynamicType(*t.Elem)
+ if offsetReq {
+ offset = getDynamicTypeOffset(*t.Elem) * v.Len()
+ }
+ var tail []byte
for i := 0; i < v.Len(); i++ {
val, err := t.Elem.pack(v.Index(i))
if err != nil {
return nil, err
}
- packed = append(packed, val...)
- }
- if t.T == SliceTy {
- return packBytesSlice(packed, v.Len()), nil
- } else if t.T == ArrayTy {
- return packed, nil
+ if !offsetReq {
+ ret = append(ret, val...)
+ continue
+ }
+ ret = append(ret, packNum(reflect.ValueOf(offset))...)
+ offset += len(val)
+ tail = append(tail, val...)
}
+ return append(ret, tail...), nil
+ default:
+ return packElement(t, v), nil
}
- return packElement(t, v), nil
}
// requireLengthPrefix returns whether the type requires any sort of length
@@ -207,3 +223,27 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
func (t Type) requiresLengthPrefix() bool {
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
}
+
+// isDynamicType returns true if the type is dynamic.
+// StringTy, BytesTy, and SliceTy(irrespective of slice element type) are dynamic types
+// ArrayTy is considered dynamic if and only if the Array element is a dynamic type.
+// This function recursively checks the type for slice and array elements.
+func isDynamicType(t Type) bool {
+ // dynamic types
+ // array is also a dynamic type if the array type is dynamic
+ return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
+}
+
+// getDynamicTypeOffset returns the offset for the type.
+// See `isDynamicType` to know which types are considered dynamic.
+// If the type t is an array and element type is not a dynamic type, then we consider it a static type and
+// return 32 * size of array since length prefix is not required.
+// If t is a dynamic type or element type(for slices and arrays) is dynamic, then we simply return 32 as offset.
+func getDynamicTypeOffset(t Type) int {
+ // if it is an array and there are no dynamic types
+ // then the array is static type
+ if t.T == ArrayTy && !isDynamicType(*t.Elem) {
+ return 32 * t.Size
+ }
+ return 32
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go
index da3a46eb..8f660e28 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go
@@ -265,7 +265,10 @@ func (ac *accountCache) scanAccounts() error {
case (addr == common.Address{}):
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
default:
- return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
+ return &accounts.Account{
+ Address: addr,
+ URL: accounts.URL{Scheme: KeyStoreScheme, Path: path},
+ }
}
return nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go
index 0564751c..84d8df0c 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go
@@ -171,7 +171,10 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
if err != nil {
return nil, accounts.Account{}, err
}
- a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}}
+ a := accounts.Account{
+ Address: key.Address,
+ URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))},
+ }
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
zeroKey(key.PrivateKey)
return nil, a, err
@@ -224,5 +227,6 @@ func toISO8601(t time.Time) string {
} else {
tz = fmt.Sprintf("%03d00", offset/3600)
}
- return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
+ return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s",
+ t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/passphrase.go
similarity index 99%
rename from vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go
rename to vendor/github.com/ethereum/go-ethereum/accounts/keystore/passphrase.go
index 9794f32f..a0b6cf53 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/passphrase.go
@@ -233,6 +233,7 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
PrivateKey: key,
}, nil
}
+
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
if cryptoJson.Cipher != "aes-128-ctr" {
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/passphrase_test.go
similarity index 100%
rename from vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase_test.go
rename to vendor/github.com/ethereum/go-ethereum/accounts/keystore/passphrase_test.go
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/plain.go
similarity index 100%
rename from vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain.go
rename to vendor/github.com/ethereum/go-ethereum/accounts/keystore/plain.go
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/plain_test.go
similarity index 100%
rename from vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain_test.go
rename to vendor/github.com/ethereum/go-ethereum/accounts/keystore/plain_test.go
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/presale.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/presale.go
index 1554294e..03055245 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/presale.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/presale.go
@@ -38,7 +38,13 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou
return accounts.Account{}, nil, err
}
key.Id = uuid.NewRandom()
- a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: keyStore.JoinPath(keyFileName(key.Address))}}
+ a := accounts.Account{
+ Address: key.Address,
+ URL: accounts.URL{
+ Scheme: KeyStoreScheme,
+ Path: keyStore.JoinPath(keyFileName(key.Address)),
+ },
+ }
err = keyStore.StoreKey(a.URL.Path, key, password)
return a, key, err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_wallet.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/wallet.go
similarity index 100%
rename from vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_wallet.go
rename to vendor/github.com/ethereum/go-ethereum/accounts/keystore/wallet.go
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go b/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go
index 962fc021..54b67ce1 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go
@@ -89,7 +89,7 @@ func runCmd(ctx *cli.Context) error {
genesisConfig *core.Genesis
)
if ctx.GlobalBool(MachineFlag.Name) {
- tracer = NewJSONLogger(logconfig, os.Stdout)
+ tracer = vm.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.GlobalBool(DebugFlag.Name) {
debugLogger = vm.NewStructLogger(logconfig)
tracer = debugLogger
@@ -206,6 +206,7 @@ func runCmd(ctx *cli.Context) error {
execTime := time.Since(tstart)
if ctx.GlobalBool(DumpFlag.Name) {
+ statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump()))
}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/evm/staterunner.go b/vendor/github.com/ethereum/go-ethereum/cmd/evm/staterunner.go
index 06c9be38..b3c69d9b 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/evm/staterunner.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/evm/staterunner.go
@@ -68,7 +68,7 @@ func stateTestCmd(ctx *cli.Context) error {
)
switch {
case ctx.GlobalBool(MachineFlag.Name):
- tracer = NewJSONLogger(config, os.Stderr)
+ tracer = vm.NewJSONLogger(config, os.Stderr)
case ctx.GlobalBool(DebugFlag.Name):
debugger = vm.NewStructLogger(config)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go b/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go
index 2ffe1227..a7c20db7 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go
@@ -256,7 +256,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
}
for _, boot := range enodes {
old, err := enode.ParseV4(boot.String())
- if err != nil {
+ if err == nil {
stack.Server().AddPeer(old)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go
index b0749d23..59f759f0 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io"
+ "math/big"
"os"
"reflect"
"unicode"
@@ -152,7 +153,9 @@ func enableWhisper(ctx *cli.Context) bool {
func makeFullNode(ctx *cli.Context) *node.Node {
stack, cfg := makeConfigNode(ctx)
-
+ if ctx.GlobalIsSet(utils.ConstantinopleOverrideFlag.Name) {
+ cfg.Eth.ConstantinopleOverride = new(big.Int).SetUint64(ctx.GlobalUint64(utils.ConstantinopleOverrideFlag.Name))
+ }
utils.RegisterEthService(stack, &cfg.Eth)
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go
index 0288b338..ebaeba9f 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go
@@ -87,8 +87,10 @@ var (
utils.LightServFlag,
utils.LightPeersFlag,
utils.LightKDFFlag,
+ utils.WhitelistFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
+ utils.CacheTrieFlag,
utils.CacheGCFlag,
utils.TrieCacheGenFlag,
utils.ListenPortFlag,
@@ -121,6 +123,7 @@ var (
utils.RinkebyFlag,
utils.VMEnableDebugFlag,
utils.NetworkIdFlag,
+ utils.ConstantinopleOverrideFlag,
utils.RPCCORSDomainFlag,
utils.RPCVirtualHostsFlag,
utils.EthStatsURLFlag,
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go
index 8b0491ce..25a702dd 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go
@@ -81,6 +81,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.LightServFlag,
utils.LightPeersFlag,
utils.LightKDFFlag,
+ utils.WhitelistFlag,
},
},
{
@@ -132,6 +133,7 @@ var AppHelpFlagGroups = []flagGroup{
Flags: []cli.Flag{
utils.CacheFlag,
utils.CacheDatabaseFlag,
+ utils.CacheTrieFlag,
utils.CacheGCFlag,
utils.TrieCacheGenFlag,
},
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go
index 5f39a889..1025dfe8 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go
@@ -20,35 +20,41 @@ import (
"encoding/binary"
"errors"
"math"
+ "math/big"
+ "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ math2 "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
)
-// cppEthereumGenesisSpec represents the genesis specification format used by the
+// alethGenesisSpec represents the genesis specification format used by the
// C++ Ethereum implementation.
-type cppEthereumGenesisSpec struct {
+type alethGenesisSpec struct {
SealEngine string `json:"sealEngine"`
Params struct {
- AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
- HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
- EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
- EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
- ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
- ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
- NetworkID hexutil.Uint64 `json:"networkID"`
- ChainID hexutil.Uint64 `json:"chainID"`
- MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
- MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
- MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
- GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
- MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
- DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
- DurationLimit *hexutil.Big `json:"durationLimit"`
- BlockReward *hexutil.Big `json:"blockReward"`
+ AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
+ MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
+ HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
+ DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
+ EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
+ EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
+ ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
+ ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
+ MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
+ MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
+ TieBreakingGas bool `json:"tieBreakingGas"`
+ GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
+ MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
+ DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
+ DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
+ BlockReward *hexutil.Big `json:"blockReward"`
+ NetworkID hexutil.Uint64 `json:"networkID"`
+ ChainID hexutil.Uint64 `json:"chainID"`
+ AllowFutureBlocks bool `json:"allowFutureBlocks"`
} `json:"params"`
Genesis struct {
@@ -62,57 +68,68 @@ type cppEthereumGenesisSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
- Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"`
+ Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
}
-// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled
+// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
-type cppEthereumGenesisSpecAccount struct {
- Balance *hexutil.Big `json:"balance"`
- Nonce uint64 `json:"nonce,omitempty"`
- Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"`
+type alethGenesisSpecAccount struct {
+ Balance *math2.HexOrDecimal256 `json:"balance"`
+ Nonce uint64 `json:"nonce,omitempty"`
+ Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
}
-// cppEthereumGenesisSpecBuiltin is the precompiled contract definition.
-type cppEthereumGenesisSpecBuiltin struct {
- Name string `json:"name,omitempty"`
- StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
- Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"`
+// alethGenesisSpecBuiltin is the precompiled contract definition.
+type alethGenesisSpecBuiltin struct {
+ Name string `json:"name,omitempty"`
+ StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
+ Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
}
-type cppEthereumGenesisSpecLinearPricing struct {
+type alethGenesisSpecLinearPricing struct {
Base uint64 `json:"base"`
Word uint64 `json:"word"`
}
-// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
+// newAlethGenesisSpec converts a go-ethereum genesis block into a Aleth-specific
// chain specification format.
-func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) {
- // Only ethash is currently supported between go-ethereum and cpp-ethereum
+func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSpec, error) {
+ // Only ethash is currently supported between go-ethereum and aleth
if genesis.Config.Ethash == nil {
return nil, errors.New("unsupported consensus engine")
}
- // Reconstruct the chain spec in Parity's format
- spec := &cppEthereumGenesisSpec{
+ // Reconstruct the chain spec in Aleth format
+ spec := &alethGenesisSpec{
SealEngine: "Ethash",
}
+ // Some defaults
spec.Params.AccountStartNonce = 0
+ spec.Params.TieBreakingGas = false
+ spec.Params.AllowFutureBlocks = false
+ spec.Params.DaoHardforkBlock = 0
+
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
- spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
- spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
+
+ // Byzantium
+ if num := genesis.Config.ByzantiumBlock; num != nil {
+ spec.setByzantium(num)
+ }
+ // Constantinople
+ if num := genesis.Config.ConstantinopleBlock; num != nil {
+ spec.setConstantinople(num)
+ }
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
-
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
- spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
+ spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxInt64)
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
- spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
- spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
- spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
+ spec.Params.DifficultyBoundDivisor = (*math2.HexOrDecimal256)(params.DifficultyBoundDivisor)
+ spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
+ spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
@@ -126,77 +143,104 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
- spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount)
for address, account := range genesis.Alloc {
- spec.Accounts[address] = &cppEthereumGenesisSpecAccount{
- Balance: (*hexutil.Big)(account.Balance),
- Nonce: account.Nonce,
- }
- }
- spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000},
- }
- spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12},
- }
- spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120},
- }
- spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3},
+ spec.setAccount(address, account)
}
+
+ spec.setPrecompile(1, &alethGenesisSpecBuiltin{Name: "ecrecover",
+ Linear: &alethGenesisSpecLinearPricing{Base: 3000}})
+ spec.setPrecompile(2, &alethGenesisSpecBuiltin{Name: "sha256",
+ Linear: &alethGenesisSpecLinearPricing{Base: 60, Word: 12}})
+ spec.setPrecompile(3, &alethGenesisSpecBuiltin{Name: "ripemd160",
+ Linear: &alethGenesisSpecLinearPricing{Base: 600, Word: 120}})
+ spec.setPrecompile(4, &alethGenesisSpecBuiltin{Name: "identity",
+ Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
if genesis.Config.ByzantiumBlock != nil {
- spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
- }
- spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500},
- }
- spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000},
- }
- spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
- }
+ spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
+ spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
+ Linear: &alethGenesisSpecLinearPricing{Base: 500}})
+ spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
+ Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
+ spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
}
return spec, nil
}
+func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpecBuiltin) {
+ if spec.Accounts == nil {
+ spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
+ }
+ spec.Accounts[common.UnprefixedAddress(common.BytesToAddress([]byte{address}))].Precompiled = data
+}
+
+func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {
+ if spec.Accounts == nil {
+ spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
+ }
+
+ a, exist := spec.Accounts[common.UnprefixedAddress(address)]
+ if !exist {
+ a = &alethGenesisSpecAccount{}
+ spec.Accounts[common.UnprefixedAddress(address)] = a
+ }
+ a.Balance = (*math2.HexOrDecimal256)(account.Balance)
+ a.Nonce = account.Nonce
+
+}
+
+func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
+ spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
+}
+
+func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
+ spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
+}
+
// parityChainSpec is the chain specification format used by Parity.
type parityChainSpec struct {
- Name string `json:"name"`
- Engine struct {
+ Name string `json:"name"`
+ Datadir string `json:"dataDir"`
+ Engine struct {
Ethash struct {
Params struct {
- MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
- DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
- DurationLimit *hexutil.Big `json:"durationLimit"`
- BlockReward *hexutil.Big `json:"blockReward"`
- HomesteadTransition uint64 `json:"homesteadTransition"`
- EIP150Transition uint64 `json:"eip150Transition"`
- EIP160Transition uint64 `json:"eip160Transition"`
- EIP161abcTransition uint64 `json:"eip161abcTransition"`
- EIP161dTransition uint64 `json:"eip161dTransition"`
- EIP649Reward *hexutil.Big `json:"eip649Reward"`
- EIP100bTransition uint64 `json:"eip100bTransition"`
- EIP649Transition uint64 `json:"eip649Transition"`
+ MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
+ DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
+ DurationLimit *hexutil.Big `json:"durationLimit"`
+ BlockReward map[string]string `json:"blockReward"`
+ DifficultyBombDelays map[string]string `json:"difficultyBombDelays"`
+ HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"`
+ EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"`
} `json:"params"`
} `json:"Ethash"`
} `json:"engine"`
Params struct {
- MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
- MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
- GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
- NetworkID hexutil.Uint64 `json:"networkID"`
- MaxCodeSize uint64 `json:"maxCodeSize"`
- EIP155Transition uint64 `json:"eip155Transition"`
- EIP98Transition uint64 `json:"eip98Transition"`
- EIP86Transition uint64 `json:"eip86Transition"`
- EIP140Transition uint64 `json:"eip140Transition"`
- EIP211Transition uint64 `json:"eip211Transition"`
- EIP214Transition uint64 `json:"eip214Transition"`
- EIP658Transition uint64 `json:"eip658Transition"`
+ AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
+ MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
+ MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
+ GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
+ NetworkID hexutil.Uint64 `json:"networkID"`
+ ChainID hexutil.Uint64 `json:"chainID"`
+ MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
+ MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
+ EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
+ EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
+ EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
+ EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
+ EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
+ EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
+ EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
+ EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
+ EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
+ EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
+ EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
+ EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
+ EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
+ EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
} `json:"params"`
Genesis struct {
@@ -215,22 +259,22 @@ type parityChainSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
- Nodes []string `json:"nodes"`
- Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"`
+ Nodes []string `json:"nodes"`
+ Accounts map[common.UnprefixedAddress]*parityChainSpecAccount `json:"accounts"`
}
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
type parityChainSpecAccount struct {
- Balance *hexutil.Big `json:"balance"`
- Nonce uint64 `json:"nonce,omitempty"`
+ Balance math2.HexOrDecimal256 `json:"balance"`
+ Nonce math2.HexOrDecimal64 `json:"nonce,omitempty"`
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
}
// parityChainSpecBuiltin is the precompiled contract definition.
type parityChainSpecBuiltin struct {
Name string `json:"name,omitempty"`
- ActivateAt uint64 `json:"activate_at,omitempty"`
+ ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
}
@@ -265,34 +309,51 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
}
// Reconstruct the chain spec in Parity's format
spec := &parityChainSpec{
- Name: network,
- Nodes: bootnodes,
+ Name: network,
+ Nodes: bootnodes,
+ Datadir: strings.ToLower(network),
}
+ spec.Engine.Ethash.Params.BlockReward = make(map[string]string)
+ spec.Engine.Ethash.Params.DifficultyBombDelays = make(map[string]string)
+ // Frontier
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
- spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
- spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
- spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64()
- spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64()
- spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64()
- spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64()
- spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward)
- spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
+ spec.Engine.Ethash.Params.BlockReward["0x0"] = hexutil.EncodeBig(ethash.FrontierBlockReward)
+ // Homestead
+ spec.Engine.Ethash.Params.HomesteadTransition = hexutil.Uint64(genesis.Config.HomesteadBlock.Uint64())
+
+ // Tangerine Whistle : 150
+ // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-608.md
+ spec.Params.EIP150Transition = hexutil.Uint64(genesis.Config.EIP150Block.Uint64())
+
+ // Spurious Dragon: 155, 160, 161, 170
+ // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-607.md
+ spec.Params.EIP155Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
+ spec.Params.EIP160Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
+ spec.Params.EIP161abcTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
+ spec.Params.EIP161dTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
+
+ // Byzantium
+ if num := genesis.Config.ByzantiumBlock; num != nil {
+ spec.setByzantium(num)
+ }
+ // Constantinople
+ if num := genesis.Config.ConstantinopleBlock; num != nil {
+ spec.setConstantinople(num)
+ }
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
- spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
+ spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
+ spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaxCodeSize = params.MaxCodeSize
- spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
- spec.Params.EIP98Transition = math.MaxUint64
- spec.Params.EIP86Transition = math.MaxUint64
- spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64()
+ // geth has it set from zero
+ spec.Params.MaxCodeSizeTransition = 0
+
+ // Disable this one
+ spec.Params.EIP98Transition = math.MaxInt64
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
@@ -305,42 +366,77 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
- spec.Accounts = make(map[common.Address]*parityChainSpecAccount)
+ spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
for address, account := range genesis.Alloc {
- spec.Accounts[address] = &parityChainSpecAccount{
- Balance: (*hexutil.Big)(account.Balance),
- Nonce: account.Nonce,
+ bal := math2.HexOrDecimal256(*account.Balance)
+
+ spec.Accounts[common.UnprefixedAddress(address)] = &parityChainSpecAccount{
+ Balance: bal,
+ Nonce: math2.HexOrDecimal64(account.Nonce),
}
}
- spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{
- Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}},
- }
- spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{
+ spec.setPrecompile(1, &parityChainSpecBuiltin{Name: "ecrecover",
+ Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}})
+
+ spec.setPrecompile(2, &parityChainSpecBuiltin{
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
- }
- spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{
+ })
+ spec.setPrecompile(3, &parityChainSpecBuiltin{
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
- }
- spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{
+ })
+ spec.setPrecompile(4, &parityChainSpecBuiltin{
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
- }
+ })
if genesis.Config.ByzantiumBlock != nil {
- spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{
- Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
- }
- spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{
- Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
- }
- spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{
- Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
- }
- spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{
- Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
- }
+ blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
+ spec.setPrecompile(5, &parityChainSpecBuiltin{
+ Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
+ })
+ spec.setPrecompile(6, &parityChainSpecBuiltin{
+ Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
+ })
+ spec.setPrecompile(7, &parityChainSpecBuiltin{
+ Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
+ })
+ spec.setPrecompile(8, &parityChainSpecBuiltin{
+ Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
+ })
}
return spec, nil
}
+func (spec *parityChainSpec) setPrecompile(address byte, data *parityChainSpecBuiltin) {
+ if spec.Accounts == nil {
+ spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
+ }
+ a := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
+ if _, exist := spec.Accounts[a]; !exist {
+ spec.Accounts[a] = &parityChainSpecAccount{}
+ }
+ spec.Accounts[a].Builtin = data
+}
+
+func (spec *parityChainSpec) setByzantium(num *big.Int) {
+ spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ByzantiumBlockReward)
+ spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(3000000)
+ n := hexutil.Uint64(num.Uint64())
+ spec.Engine.Ethash.Params.EIP100bTransition = n
+ spec.Params.EIP140Transition = n
+ spec.Params.EIP211Transition = n
+ spec.Params.EIP214Transition = n
+ spec.Params.EIP658Transition = n
+}
+
+func (spec *parityChainSpec) setConstantinople(num *big.Int) {
+ spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ConstantinopleBlockReward)
+ spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(2000000)
+ n := hexutil.Uint64(num.Uint64())
+ spec.Params.EIP145Transition = n
+ spec.Params.EIP1014Transition = n
+ spec.Params.EIP1052Transition = n
+ spec.Params.EIP1283Transition = n
+}
+
// pyEthereumGenesisSpec represents the genesis specification format used by the
// Python Ethereum implementation.
type pyEthereumGenesisSpec struct {
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis_test.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis_test.go
new file mode 100644
index 00000000..83e73836
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis_test.go
@@ -0,0 +1,109 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/ethereum/go-ethereum/core"
+)
+
+// Tests the go-ethereum to Aleth chainspec conversion for the Stureby testnet.
+func TestAlethSturebyConverter(t *testing.T) {
+ blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ var genesis core.Genesis
+ if err := json.Unmarshal(blob, &genesis); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ spec, err := newAlethGenesisSpec("stureby", &genesis)
+ if err != nil {
+ t.Fatalf("failed creating chainspec: %v", err)
+ }
+
+ expBlob, err := ioutil.ReadFile("testdata/stureby_aleth.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ expspec := &alethGenesisSpec{}
+ if err := json.Unmarshal(expBlob, expspec); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ if !reflect.DeepEqual(expspec, spec) {
+ t.Errorf("chainspec mismatch")
+ c := spew.ConfigState{
+ DisablePointerAddresses: true,
+ SortKeys: true,
+ }
+ exp := strings.Split(c.Sdump(expspec), "\n")
+ got := strings.Split(c.Sdump(spec), "\n")
+ for i := 0; i < len(exp) && i < len(got); i++ {
+ if exp[i] != got[i] {
+ fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
+ }
+ }
+ }
+}
+
+// Tests the go-ethereum to Parity chainspec conversion for the Stureby testnet.
+func TestParitySturebyConverter(t *testing.T) {
+ blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ var genesis core.Genesis
+ if err := json.Unmarshal(blob, &genesis); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ spec, err := newParityChainSpec("Stureby", &genesis, []string{})
+ if err != nil {
+ t.Fatalf("failed creating chainspec: %v", err)
+ }
+
+ expBlob, err := ioutil.ReadFile("testdata/stureby_parity.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ expspec := &parityChainSpec{}
+ if err := json.Unmarshal(expBlob, expspec); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ expspec.Nodes = []string{}
+
+ if !reflect.DeepEqual(expspec, spec) {
+ t.Errorf("chainspec mismatch")
+ c := spew.ConfigState{
+ DisablePointerAddresses: true,
+ SortKeys: true,
+ }
+ exp := strings.Split(c.Sdump(expspec), "\n")
+ got := strings.Split(c.Sdump(spec), "\n")
+ for i := 0; i < len(exp) && i < len(got); i++ {
+ if exp[i] != got[i] {
+ fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go
index d22bd811..cb3ed6e7 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go
@@ -640,7 +640,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
files[filepath.Join(workdir, network+".json")] = genesis
if conf.Genesis.Config.Ethash != nil {
- cppSpec, err := newCppEthereumGenesisSpec(network, conf.Genesis)
+ cppSpec, err := newAlethGenesisSpec(network, conf.Genesis)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_ethstats.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_ethstats.go
index a7d99a29..58ecb839 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_ethstats.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_ethstats.go
@@ -43,7 +43,8 @@ version: '2'
services:
ethstats:
build: .
- image: {{.Network}}/ethstats{{if not .VHost}}
+ image: {{.Network}}/ethstats
+ container_name: {{.Network}}_ethstats_1{{if not .VHost}}
ports:
- "{{.Port}}:3000"{{end}}
environment:
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_explorer.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_explorer.go
index e916deaf..e465fa04 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_explorer.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_explorer.go
@@ -77,6 +77,7 @@ services:
explorer:
build: .
image: {{.Network}}/explorer
+ container_name: {{.Network}}_explorer_1
ports:
- "{{.NodePort}}:{{.NodePort}}"
- "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_faucet.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_faucet.go
index 06c9fc0f..3a06bf3c 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_faucet.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_faucet.go
@@ -56,8 +56,10 @@ services:
faucet:
build: .
image: {{.Network}}/faucet
+ container_name: {{.Network}}_faucet_1
ports:
- - "{{.EthPort}}:{{.EthPort}}"{{if not .VHost}}
+ - "{{.EthPort}}:{{.EthPort}}"
+ - "{{.EthPort}}:{{.EthPort}}/udp"{{if not .VHost}}
- "{{.ApiPort}}:8080"{{end}}
volumes:
- {{.Datadir}}:/root/.faucet
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_nginx.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_nginx.go
index 7f87661d..1b1ae61f 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_nginx.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_nginx.go
@@ -40,6 +40,7 @@ services:
nginx:
build: .
image: {{.Network}}/nginx
+ container_name: {{.Network}}_nginx_1
ports:
- "{{.Port}}:80"
volumes:
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go
index 069adfe4..5d9ef465 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go
@@ -55,6 +55,7 @@ services:
{{.Type}}:
build: .
image: {{.Network}}/{{.Type}}
+ container_name: {{.Network}}_{{.Type}}_1
ports:
- "{{.Port}}:{{.Port}}"
- "{{.Port}}:{{.Port}}/udp"
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_wallet.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_wallet.go
index 90812c4a..ebaa5b6a 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_wallet.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_wallet.go
@@ -57,6 +57,7 @@ services:
wallet:
build: .
image: {{.Network}}/wallet
+ container_name: {{.Network}}_wallet_1
ports:
- "{{.NodePort}}:{{.NodePort}}"
- "{{.NodePort}}:{{.NodePort}}/udp"
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/puppeth.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/puppeth.go
index f9b8fe48..c3de5f93 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/puppeth.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/puppeth.go
@@ -43,18 +43,23 @@ func main() {
Usage: "log level to emit to the screen",
},
}
- app.Action = func(c *cli.Context) error {
+ app.Before = func(c *cli.Context) error {
// Set up the logger to print everything and the random generator
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int("loglevel")), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
rand.Seed(time.Now().UnixNano())
- network := c.String("network")
- if strings.Contains(network, " ") || strings.Contains(network, "-") {
- log.Crit("No spaces or hyphens allowed in network name")
- }
- // Start the wizard and relinquish control
- makeWizard(c.String("network")).run()
return nil
}
+ app.Action = runWizard
app.Run(os.Args)
}
+
+// runWizard start the wizard and relinquish control to it.
+func runWizard(c *cli.Context) error {
+ network := c.String("network")
+ if strings.Contains(network, " ") || strings.Contains(network, "-") || strings.ToLower(network) != network {
+ log.Crit("No spaces, hyphens or capital letters allowed in network name")
+ }
+ makeWizard(c.String("network")).run()
+ return nil
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_aleth.json b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_aleth.json
new file mode 100644
index 00000000..1ef1d8ae
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_aleth.json
@@ -0,0 +1,112 @@
+{
+ "sealEngine":"Ethash",
+ "params":{
+ "accountStartNonce":"0x00",
+ "maximumExtraDataSize":"0x20",
+ "homesteadForkBlock":"0x2710",
+ "daoHardforkBlock":"0x00",
+ "EIP150ForkBlock":"0x3a98",
+ "EIP158ForkBlock":"0x59d8",
+ "byzantiumForkBlock":"0x7530",
+ "constantinopleForkBlock":"0x9c40",
+ "minGasLimit":"0x1388",
+ "maxGasLimit":"0x7fffffffffffffff",
+ "tieBreakingGas":false,
+ "gasLimitBoundDivisor":"0x0400",
+ "minimumDifficulty":"0x20000",
+ "difficultyBoundDivisor":"0x0800",
+ "durationLimit":"0x0d",
+ "blockReward":"0x4563918244F40000",
+ "networkID":"0x4cb2e",
+ "chainID":"0x4cb2e",
+ "allowFutureBlocks":false
+ },
+ "genesis":{
+ "nonce":"0x0000000000000000",
+ "difficulty":"0x20000",
+ "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "author":"0x0000000000000000000000000000000000000000",
+ "timestamp":"0x59a4e76d",
+ "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
+ "gasLimit":"0x47b760"
+ },
+ "accounts":{
+ "0000000000000000000000000000000000000001":{
+ "balance":"1",
+ "precompiled":{
+ "name":"ecrecover",
+ "linear":{
+ "base":3000,
+ "word":0
+ }
+ }
+ },
+ "0000000000000000000000000000000000000002":{
+ "balance":"1",
+ "precompiled":{
+ "name":"sha256",
+ "linear":{
+ "base":60,
+ "word":12
+ }
+ }
+ },
+ "0000000000000000000000000000000000000003":{
+ "balance":"1",
+ "precompiled":{
+ "name":"ripemd160",
+ "linear":{
+ "base":600,
+ "word":120
+ }
+ }
+ },
+ "0000000000000000000000000000000000000004":{
+ "balance":"1",
+ "precompiled":{
+ "name":"identity",
+ "linear":{
+ "base":15,
+ "word":3
+ }
+ }
+ },
+ "0000000000000000000000000000000000000005":{
+ "balance":"1",
+ "precompiled":{
+ "name":"modexp",
+ "startingBlock":"0x7530"
+ }
+ },
+ "0000000000000000000000000000000000000006":{
+ "balance":"1",
+ "precompiled":{
+ "name":"alt_bn128_G1_add",
+ "startingBlock":"0x7530",
+ "linear":{
+ "base":500,
+ "word":0
+ }
+ }
+ },
+ "0000000000000000000000000000000000000007":{
+ "balance":"1",
+ "precompiled":{
+ "name":"alt_bn128_G1_mul",
+ "startingBlock":"0x7530",
+ "linear":{
+ "base":40000,
+ "word":0
+ }
+ }
+ },
+ "0000000000000000000000000000000000000008":{
+ "balance":"1",
+ "precompiled":{
+ "name":"alt_bn128_pairing_product",
+ "startingBlock":"0x7530"
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_geth.json b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_geth.json
new file mode 100644
index 00000000..c8c3b3c9
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_geth.json
@@ -0,0 +1,47 @@
+{
+ "config": {
+ "ethash":{},
+ "chainId": 314158,
+ "homesteadBlock": 10000,
+ "eip150Block": 15000,
+ "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "eip155Block": 23000,
+ "eip158Block": 23000,
+ "byzantiumBlock": 30000,
+ "constantinopleBlock": 40000
+ },
+ "nonce": "0x0",
+ "timestamp": "0x59a4e76d",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
+ "gasLimit": "0x47b760",
+ "difficulty": "0x20000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase": "0x0000000000000000000000000000000000000000",
+ "alloc": {
+ "0000000000000000000000000000000000000001": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000002": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000003": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000004": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000005": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000006": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000007": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000008": {
+ "balance": "0x01"
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_parity.json b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_parity.json
new file mode 100644
index 00000000..f3fa8386
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_parity.json
@@ -0,0 +1,181 @@
+{
+ "name":"Stureby",
+ "dataDir":"stureby",
+ "engine":{
+ "Ethash":{
+ "params":{
+ "minimumDifficulty":"0x20000",
+ "difficultyBoundDivisor":"0x800",
+ "durationLimit":"0xd",
+ "blockReward":{
+ "0x0":"0x4563918244f40000",
+ "0x7530":"0x29a2241af62c0000",
+ "0x9c40":"0x1bc16d674ec80000"
+ },
+ "homesteadTransition":"0x2710",
+ "eip100bTransition":"0x7530",
+ "difficultyBombDelays":{
+ "0x7530":"0x2dc6c0",
+ "0x9c40":"0x1e8480"
+ }
+ }
+ }
+ },
+ "params":{
+ "accountStartNonce":"0x0",
+ "maximumExtraDataSize":"0x20",
+ "gasLimitBoundDivisor":"0x400",
+ "minGasLimit":"0x1388",
+ "networkID":"0x4cb2e",
+ "chainID":"0x4cb2e",
+ "maxCodeSize":"0x6000",
+ "maxCodeSizeTransition":"0x0",
+ "eip98Transition": "0x7fffffffffffffff",
+ "eip150Transition":"0x3a98",
+ "eip160Transition":"0x59d8",
+ "eip161abcTransition":"0x59d8",
+ "eip161dTransition":"0x59d8",
+ "eip155Transition":"0x59d8",
+ "eip140Transition":"0x7530",
+ "eip211Transition":"0x7530",
+ "eip214Transition":"0x7530",
+ "eip658Transition":"0x7530",
+ "eip145Transition":"0x9c40",
+ "eip1014Transition":"0x9c40",
+ "eip1052Transition":"0x9c40",
+ "eip1283Transition":"0x9c40"
+ },
+ "genesis":{
+ "seal":{
+ "ethereum":{
+ "nonce":"0x0000000000000000",
+ "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
+ }
+ },
+ "difficulty":"0x20000",
+ "author":"0x0000000000000000000000000000000000000000",
+ "timestamp":"0x59a4e76d",
+ "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
+ "gasLimit":"0x47b760"
+ },
+ "nodes":[
+ "enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
+ "enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
+ "enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
+ "enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
+ "enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
+ "enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
+ "enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
+ "enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
+ ],
+ "accounts":{
+ "0000000000000000000000000000000000000001":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"ecrecover",
+ "pricing":{
+ "linear":{
+ "base":3000,
+ "word":0
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000002":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"sha256",
+ "pricing":{
+ "linear":{
+ "base":60,
+ "word":12
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000003":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"ripemd160",
+ "pricing":{
+ "linear":{
+ "base":600,
+ "word":120
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000004":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"identity",
+ "pricing":{
+ "linear":{
+ "base":15,
+ "word":3
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000005":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"modexp",
+ "activate_at":"0x7530",
+ "pricing":{
+ "modexp":{
+ "divisor":20
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000006":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"alt_bn128_add",
+ "activate_at":"0x7530",
+ "pricing":{
+ "linear":{
+ "base":500,
+ "word":0
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000007":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"alt_bn128_mul",
+ "activate_at":"0x7530",
+ "pricing":{
+ "linear":{
+ "base":40000,
+ "word":0
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000008":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"alt_bn128_pairing",
+ "activate_at":"0x7530",
+ "pricing":{
+ "alt_bn128_pairing":{
+ "base":100000,
+ "pair":80000
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard.go
index b88a61de..83536506 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard.go
@@ -23,6 +23,7 @@ import (
"io/ioutil"
"math/big"
"net"
+ "net/url"
"os"
"path/filepath"
"sort"
@@ -118,6 +119,47 @@ func (w *wizard) readDefaultString(def string) string {
return def
}
+// readDefaultYesNo reads a single line from stdin, trimming if from spaces and
+// interpreting it as a 'yes' or a 'no'. If an empty line is entered, the default
+// value is returned.
+func (w *wizard) readDefaultYesNo(def bool) bool {
+ for {
+ fmt.Printf("> ")
+ text, err := w.in.ReadString('\n')
+ if err != nil {
+ log.Crit("Failed to read user input", "err", err)
+ }
+ if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
+ return def
+ }
+ if text == "y" || text == "yes" {
+ return true
+ }
+ if text == "n" || text == "no" {
+ return false
+ }
+ log.Error("Invalid input, expected 'y', 'yes', 'n', 'no' or empty")
+ }
+}
+
+// readURL reads a single line from stdin, trimming if from spaces and trying to
+// interpret it as a URL (http, https or file).
+func (w *wizard) readURL() *url.URL {
+ for {
+ fmt.Printf("> ")
+ text, err := w.in.ReadString('\n')
+ if err != nil {
+ log.Crit("Failed to read user input", "err", err)
+ }
+ uri, err := url.Parse(strings.TrimSpace(text))
+ if err != nil {
+ log.Error("Invalid input, expected URL", "err", err)
+ continue
+ }
+ return uri
+ }
+}
+
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
// to parse into an integer.
func (w *wizard) readInt() int {
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go
index 1a01631f..8a837084 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go
@@ -137,14 +137,14 @@ func (w *wizard) deployDashboard() {
if w.conf.ethstats != "" {
fmt.Println()
fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)")
- infos.trusted = w.readDefaultString("y") == "y"
+ infos.trusted = w.readDefaultYesNo(true)
}
// Try to deploy the dashboard container on the host
nocache := false
if existed {
fmt.Println()
fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil {
log.Error("Failed to deploy dashboard container", "err", err)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_ethstats.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_ethstats.go
index fb2529c2..58ff3efb 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_ethstats.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_ethstats.go
@@ -67,11 +67,11 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
// The user might want to clear the entire list, although generally probably not
fmt.Println()
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
- if w.readDefaultString("n") != "n" {
+ if w.readDefaultYesNo(false) {
infos.banned = nil
}
// Offer the user to explicitly add/remove certain IP addresses
@@ -106,7 +106,7 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
trusted := make([]string, 0, len(w.servers))
for _, client := range w.servers {
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_explorer.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_explorer.go
index 413511c1..a128fb9f 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_explorer.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_explorer.go
@@ -100,7 +100,7 @@ func (w *wizard) deployExplorer() {
if existed {
fmt.Println()
fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil {
log.Error("Failed to deploy explorer container", "err", err)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_faucet.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_faucet.go
index 6f084089..9068c1d3 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_faucet.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_faucet.go
@@ -81,7 +81,7 @@ func (w *wizard) deployFaucet() {
if infos.captchaToken != "" {
fmt.Println()
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
infos.captchaToken, infos.captchaSecret = "", ""
}
}
@@ -89,7 +89,7 @@ func (w *wizard) deployFaucet() {
// No previous authorization (or old one discarded)
fmt.Println()
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
- if w.readDefaultString("n") == "n" {
+ if !w.readDefaultYesNo(false) {
log.Warn("Users will be able to requests funds via automated scripts")
} else {
// Captcha protection explicitly requested, read the site and secret keys
@@ -132,7 +132,7 @@ func (w *wizard) deployFaucet() {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) funding account (y/n)? (default = yes)\n", key.Address.Hex())
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
infos.node.keyJSON, infos.node.keyPass = "", ""
}
}
@@ -166,7 +166,7 @@ func (w *wizard) deployFaucet() {
if existed {
fmt.Println()
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy faucet container", "err", err)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go
index 6c4cd571..95da5bd4 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go
@@ -20,9 +20,13 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "io"
"io/ioutil"
"math/big"
"math/rand"
+ "net/http"
+ "os"
+ "path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -40,11 +44,12 @@ func (w *wizard) makeGenesis() {
Difficulty: big.NewInt(524288),
Alloc: make(core.GenesisAlloc),
Config: ¶ms.ChainConfig{
- HomesteadBlock: big.NewInt(1),
- EIP150Block: big.NewInt(2),
- EIP155Block: big.NewInt(3),
- EIP158Block: big.NewInt(3),
- ByzantiumBlock: big.NewInt(4),
+ HomesteadBlock: big.NewInt(1),
+ EIP150Block: big.NewInt(2),
+ EIP155Block: big.NewInt(3),
+ EIP158Block: big.NewInt(3),
+ ByzantiumBlock: big.NewInt(4),
+ ConstantinopleBlock: big.NewInt(5),
},
}
// Figure out which consensus engine to choose
@@ -114,9 +119,13 @@ func (w *wizard) makeGenesis() {
}
break
}
- // Add a batch of precompile balances to avoid them getting deleted
- for i := int64(0); i < 256; i++ {
- genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
+ fmt.Println()
+ fmt.Println("Should the precompile-addresses (0x1 .. 0xff) be pre-funded with 1 wei? (advisable yes)")
+ if w.readDefaultYesNo(true) {
+ // Add a batch of precompile balances to avoid them getting deleted
+ for i := int64(0); i < 256; i++ {
+ genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
+ }
}
// Query the user for some custom extras
fmt.Println()
@@ -130,53 +139,130 @@ func (w *wizard) makeGenesis() {
w.conf.flush()
}
+// importGenesis imports a Geth genesis spec into puppeth.
+func (w *wizard) importGenesis() {
+ // Request the genesis JSON spec URL from the user
+ fmt.Println()
+ fmt.Println("Where's the genesis file? (local file or http/https url)")
+ url := w.readURL()
+
+ // Convert the various allowed URLs to a reader stream
+ var reader io.Reader
+
+ switch url.Scheme {
+ case "http", "https":
+ // Remote web URL, retrieve it via an HTTP client
+ res, err := http.Get(url.String())
+ if err != nil {
+ log.Error("Failed to retrieve remote genesis", "err", err)
+ return
+ }
+ defer res.Body.Close()
+ reader = res.Body
+
+ case "":
+ // Schemaless URL, interpret as a local file
+ file, err := os.Open(url.String())
+ if err != nil {
+ log.Error("Failed to open local genesis", "err", err)
+ return
+ }
+ defer file.Close()
+ reader = file
+
+ default:
+ log.Error("Unsupported genesis URL scheme", "scheme", url.Scheme)
+ return
+ }
+ // Parse the genesis file and inject it successful
+ var genesis core.Genesis
+ if err := json.NewDecoder(reader).Decode(&genesis); err != nil {
+ log.Error("Invalid genesis spec: %v", err)
+ return
+ }
+ log.Info("Imported genesis block")
+
+ w.conf.Genesis = &genesis
+ w.conf.flush()
+}
+
// manageGenesis permits the modification of chain configuration parameters in
// a genesis config and the export of the entire genesis spec.
func (w *wizard) manageGenesis() {
// Figure out whether to modify or export the genesis
fmt.Println()
fmt.Println(" 1. Modify existing fork rules")
- fmt.Println(" 2. Export genesis configuration")
+ fmt.Println(" 2. Export genesis configurations")
fmt.Println(" 3. Remove genesis configuration")
choice := w.read()
- switch {
- case choice == "1":
+ switch choice {
+ case "1":
// Fork rule updating requested, iterate over each fork
fmt.Println()
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock)
w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock)
fmt.Println()
- fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
+ fmt.Printf("Which block should EIP150 (Tangerine Whistle) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block)
fmt.Println()
- fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
+ fmt.Printf("Which block should EIP155 (Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block)
fmt.Println()
- fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
+ fmt.Printf("Which block should EIP158/161 (also Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block)
fmt.Println()
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock)
w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock)
+ fmt.Println()
+ fmt.Printf("Which block should Constantinople come into effect? (default = %v)\n", w.conf.Genesis.Config.ConstantinopleBlock)
+ w.conf.Genesis.Config.ConstantinopleBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ConstantinopleBlock)
+
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
- case choice == "2":
+ case "2":
// Save whatever genesis configuration we currently have
fmt.Println()
- fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
- out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
- if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
- log.Error("Failed to save genesis file", "err", err)
- }
- log.Info("Exported existing genesis block")
+ fmt.Printf("Which folder to save the genesis specs into? (default = current)\n")
+ fmt.Printf(" Will create %s.json, %s-aleth.json, %s-harmony.json, %s-parity.json\n", w.network, w.network, w.network, w.network)
- case choice == "3":
+ folder := w.readDefaultString(".")
+ if err := os.MkdirAll(folder, 0755); err != nil {
+ log.Error("Failed to create spec folder", "folder", folder, "err", err)
+ return
+ }
+ out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
+
+ // Export the native genesis spec used by puppeth and Geth
+ gethJson := filepath.Join(folder, fmt.Sprintf("%s.json", w.network))
+ if err := ioutil.WriteFile((gethJson), out, 0644); err != nil {
+ log.Error("Failed to save genesis file", "err", err)
+ return
+ }
+ log.Info("Saved native genesis chain spec", "path", gethJson)
+
+ // Export the genesis spec used by Aleth (formerly C++ Ethereum)
+ if spec, err := newAlethGenesisSpec(w.network, w.conf.Genesis); err != nil {
+ log.Error("Failed to create Aleth chain spec", "err", err)
+ } else {
+ saveGenesis(folder, w.network, "aleth", spec)
+ }
+ // Export the genesis spec used by Parity
+ if spec, err := newParityChainSpec(w.network, w.conf.Genesis, []string{}); err != nil {
+ log.Error("Failed to create Parity chain spec", "err", err)
+ } else {
+ saveGenesis(folder, w.network, "parity", spec)
+ }
+ // Export the genesis spec used by Harmony (formerly EthereumJ
+ saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
+
+ case "3":
// Make sure we don't have any services running
if len(w.conf.servers()) > 0 {
log.Error("Genesis reset requires all services and servers torn down")
@@ -186,8 +272,20 @@ func (w *wizard) manageGenesis() {
w.conf.Genesis = nil
w.conf.flush()
-
default:
log.Error("That's not something I can do")
+ return
}
}
+
+// saveGenesis JSON encodes an arbitrary genesis spec into a pre-defined file.
+func saveGenesis(folder, network, client string, spec interface{}) {
+ path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
+
+ out, _ := json.Marshal(spec)
+ if err := ioutil.WriteFile(path, out, 0644); err != nil {
+ log.Error("Failed to save genesis file", "client", client, "err", err)
+ return
+ }
+ log.Info("Saved genesis chain spec", "client", client, "path", path)
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_intro.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_intro.go
index 60aa0f7f..75fb04b7 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_intro.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_intro.go
@@ -61,14 +61,14 @@ func (w *wizard) run() {
// Make sure we have a good network name to work with fmt.Println()
// Docker accepts hyphens in image names, but doesn't like it for container names
if w.network == "" {
- fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
+ fmt.Println("Please specify a network name to administer (no spaces, hyphens or capital letters please)")
for {
w.network = w.readString()
- if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
+ if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") && strings.ToLower(w.network) == w.network {
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
break
}
- log.Error("I also like to live dangerously, still no spaces or hyphens")
+ log.Error("I also like to live dangerously, still no spaces, hyphens or capital letters")
}
}
log.Info("Administering Ethereum network", "name", w.network)
@@ -131,7 +131,20 @@ func (w *wizard) run() {
case choice == "2":
if w.conf.Genesis == nil {
- w.makeGenesis()
+ fmt.Println()
+ fmt.Println("What would you like to do? (default = create)")
+ fmt.Println(" 1. Create new genesis from scratch")
+ fmt.Println(" 2. Import already existing genesis")
+
+ choice := w.read()
+ switch {
+ case choice == "" || choice == "1":
+ w.makeGenesis()
+ case choice == "2":
+ w.importGenesis()
+ default:
+ log.Error("That's not something I can do")
+ }
} else {
w.manageGenesis()
}
@@ -149,7 +162,6 @@ func (w *wizard) run() {
} else {
w.manageComponents()
}
-
default:
log.Error("That's not something I can do")
}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_nginx.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_nginx.go
index 4eeae93a..8397b7fd 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_nginx.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_nginx.go
@@ -41,12 +41,12 @@ func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (str
// Reverse proxy is not running, offer to deploy a new one
fmt.Println()
fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)")
- if w.readDefaultString("y") == "y" {
+ if w.readDefaultYesNo(true) {
nocache := false
if proxy != nil {
fmt.Println()
fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployNginx(client, w.network, port, nocache); err != nil {
log.Error("Failed to deploy reverse-proxy", "err", err)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go
index 49b10a02..e37297f6 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go
@@ -126,7 +126,7 @@ func (w *wizard) deployNode(boot bool) {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) signing account (y/n)? (default = yes)\n", key.Address.Hex())
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
infos.keyJSON, infos.keyPass = "", ""
}
}
@@ -165,7 +165,7 @@ func (w *wizard) deployNode(boot bool) {
if existed {
fmt.Println()
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy Ethereum node container", "err", err)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_wallet.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_wallet.go
index 7624d11e..ca1ea5bd 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_wallet.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_wallet.go
@@ -96,7 +96,7 @@ func (w *wizard) deployWallet() {
if existed {
fmt.Println()
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy wallet container", "err", err)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access_test.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access_test.go
index e812cd8f..9357c577 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access_test.go
@@ -14,8 +14,6 @@
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see .
-// +build !windows
-
package main
import (
@@ -28,6 +26,7 @@ import (
gorand "math/rand"
"net/http"
"os"
+ "runtime"
"strings"
"testing"
"time"
@@ -37,8 +36,7 @@ import (
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
- swarm "github.com/ethereum/go-ethereum/swarm/api/client"
- swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
+ swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@@ -49,22 +47,41 @@ const (
var DefaultCurve = crypto.S256()
-// TestAccessPassword tests for the correct creation of an ACT manifest protected by a password.
+func TestACT(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip()
+ }
+
+ initCluster(t)
+
+ cases := []struct {
+ name string
+ f func(t *testing.T)
+ }{
+ {"Password", testPassword},
+ {"PK", testPK},
+ {"ACTWithoutBogus", testACTWithoutBogus},
+ {"ACTWithBogus", testACTWithBogus},
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, tc.f)
+ }
+}
+
+// testPassword tests for the correct creation of an ACT manifest protected by a password.
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
// The parties participating - node (publisher), uploads to second node then disappears. Content which was uploaded
// is then fetched through 2nd node. since the tested code is not key-aware - we can just
// fetch from the 2nd node using HTTP BasicAuth
-func TestAccessPassword(t *testing.T) {
- srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
- defer srv.Close()
-
+func testPassword(t *testing.T) {
dataFilename := testutil.TempFileWithContent(t, data)
defer os.RemoveAll(dataFilename)
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t,
"--bzzapi",
- srv.URL, //it doesn't matter through which node we upload content
+ cluster.Nodes[0].URL,
"up",
"--encrypt",
dataFilename)
@@ -138,16 +155,17 @@ func TestAccessPassword(t *testing.T) {
if a.Publisher != "" {
t.Fatal("should be empty")
}
- client := swarm.NewClient(srv.URL)
+
+ client := swarmapi.NewClient(cluster.Nodes[0].URL)
hash, err := client.UploadManifest(&m, false)
if err != nil {
t.Fatal(err)
}
- httpClient := &http.Client{}
+ url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash
- url := srv.URL + "/" + "bzz:/" + hash
+ httpClient := &http.Client{}
response, err := httpClient.Get(url)
if err != nil {
t.Fatal(err)
@@ -189,7 +207,7 @@ func TestAccessPassword(t *testing.T) {
//download file with 'swarm down' with wrong password
up = runSwarm(t,
"--bzzapi",
- srv.URL,
+ cluster.Nodes[0].URL,
"down",
"bzz:/"+hash,
tmp,
@@ -203,16 +221,12 @@ func TestAccessPassword(t *testing.T) {
up.ExpectExit()
}
-// TestAccessPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
+// testPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
// The parties participating - node (publisher), uploads to second node (which is also the grantee) then disappears.
// Content which was uploaded is then fetched through the grantee's http proxy. Since the tested code is private-key aware,
// the test will fail if the proxy's given private key is not granted on the ACT.
-func TestAccessPK(t *testing.T) {
- // Setup Swarm and upload a test file to it
- cluster := newTestCluster(t, 2)
- defer cluster.Shutdown()
-
+func testPK(t *testing.T) {
dataFilename := testutil.TempFileWithContent(t, data)
defer os.RemoveAll(dataFilename)
@@ -318,7 +332,7 @@ func TestAccessPK(t *testing.T) {
if a.Publisher != pkComp {
t.Fatal("publisher key did not match")
}
- client := swarm.NewClient(cluster.Nodes[0].URL)
+ client := swarmapi.NewClient(cluster.Nodes[0].URL)
hash, err := client.UploadManifest(&m, false)
if err != nil {
@@ -344,29 +358,24 @@ func TestAccessPK(t *testing.T) {
}
}
-// TestAccessACT tests the creation of the ACT manifest end-to-end, without any bogus entries (i.e. default scenario = 3 nodes 1 unauthorized)
-func TestAccessACT(t *testing.T) {
- testAccessACT(t, 0)
+// testACTWithoutBogus tests the creation of the ACT manifest end-to-end, without any bogus entries (i.e. default scenario = 3 nodes 1 unauthorized)
+func testACTWithoutBogus(t *testing.T) {
+ testACT(t, 0)
}
-// TestAccessACTScale tests the creation of the ACT manifest end-to-end, with 1000 bogus entries (i.e. 1000 EC keys + default scenario = 3 nodes 1 unauthorized = 1003 keys in the ACT manifest)
-func TestAccessACTScale(t *testing.T) {
- testAccessACT(t, 1000)
+// testACTWithBogus tests the creation of the ACT manifest end-to-end, with 100 bogus entries (i.e. 100 EC keys + default scenario = 3 nodes 1 unauthorized = 103 keys in the ACT manifest)
+func testACTWithBogus(t *testing.T) {
+ testACT(t, 100)
}
-// TestAccessACT tests the e2e creation, uploading and downloading of an ACT access control with both EC keys AND password protection
+// testACT tests the e2e creation, uploading and downloading of an ACT access control with both EC keys AND password protection
// the test fires up a 3 node cluster, then randomly picks 2 nodes which will be acting as grantees to the data
// set and also protects the ACT with a password. the third node should fail decoding the reference as it will not be granted access.
// the third node then then tries to download using a correct password (and succeeds) then uses a wrong password and fails.
// the publisher uploads through one of the nodes then disappears.
-func testAccessACT(t *testing.T, bogusEntries int) {
- // Setup Swarm and upload a test file to it
- const clusterSize = 3
- cluster := newTestCluster(t, clusterSize)
- defer cluster.Shutdown()
-
+func testACT(t *testing.T, bogusEntries int) {
var uploadThroughNode = cluster.Nodes[0]
- client := swarm.NewClient(uploadThroughNode.URL)
+ client := swarmapi.NewClient(uploadThroughNode.URL)
r1 := gorand.New(gorand.NewSource(time.Now().UnixNano()))
nodeToSkip := r1.Intn(clusterSize) // a number between 0 and 2 (node indices in `cluster`)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/config_test.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/config_test.go
index 02198f87..18be316e 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/config_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/config_test.go
@@ -26,14 +26,14 @@ import (
"testing"
"time"
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm"
"github.com/ethereum/go-ethereum/swarm/api"
-
- "github.com/docker/docker/pkg/reexec"
)
-func TestDumpConfig(t *testing.T) {
+func TestConfigDump(t *testing.T) {
swarm := runSwarm(t, "dumpconfig")
defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf)
@@ -91,8 +91,8 @@ func TestConfigCmdLineOverrides(t *testing.T) {
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
- "--datadir", dir,
- "--ipcpath", conf.IPCPath,
+ fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -189,9 +189,9 @@ func TestConfigFileOverrides(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
- "--ens-api", "",
- "--ipcpath", conf.IPCPath,
- "--datadir", dir,
+ fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
+ fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -407,9 +407,9 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
- "--ens-api", "",
- "--datadir", dir,
- "--ipcpath", conf.IPCPath,
+ fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
+ fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -466,7 +466,7 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
node.Shutdown()
}
-func TestValidateConfig(t *testing.T) {
+func TestConfigValidate(t *testing.T) {
for _, c := range []struct {
cfg *api.Config
err string
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/export_test.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/export_test.go
index f1bc2f26..e8671eea 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/export_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/export_test.go
@@ -43,8 +43,8 @@ func TestCLISwarmExportImport(t *testing.T) {
}
cluster := newTestCluster(t, 1)
- // generate random 10mb file
- content := testutil.RandomBytes(1, 10000000)
+ // generate random 1mb file
+ content := testutil.RandomBytes(1, 1000000)
fileName := testutil.TempFileWithContent(t, string(content))
defer os.Remove(fileName)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds.go
index f26a8cc7..6cd971a9 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds.go
@@ -169,7 +169,6 @@ func feedUpdate(ctx *cli.Context) {
query = new(feed.Query)
query.User = signer.Address()
query.Topic = getTopic(ctx)
-
}
// Retrieve a feed update request
@@ -178,6 +177,11 @@ func feedUpdate(ctx *cli.Context) {
utils.Fatalf("Error retrieving feed status: %s", err.Error())
}
+ // Check that the provided signer matches the request to sign
+ if updateRequest.User != signer.Address() {
+ utils.Fatalf("Signer address does not match the update request")
+ }
+
// set the new data
updateRequest.SetData(data)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds_test.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds_test.go
index fc3f72ab..4c40f62a 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds_test.go
@@ -19,7 +19,6 @@ package main
import (
"bytes"
"encoding/json"
- "fmt"
"io/ioutil"
"os"
"testing"
@@ -36,7 +35,6 @@ import (
)
func TestCLIFeedUpdate(t *testing.T) {
-
srv := swarmhttp.NewTestSwarmServer(t, func(api *api.API) swarmhttp.TestServer {
return swarmhttp.NewServer(api, "")
}, nil)
@@ -44,7 +42,6 @@ func TestCLIFeedUpdate(t *testing.T) {
defer srv.Close()
// create a private key file for signing
-
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001979"
privKey, _ := crypto.HexToECDSA(privkeyHex)
address := crypto.PubkeyToAddress(privKey.PublicKey)
@@ -71,7 +68,7 @@ func TestCLIFeedUpdate(t *testing.T) {
hexData}
// create an update and expect an exit without errors
- log.Info(fmt.Sprintf("updating a feed with 'swarm feed update'"))
+ log.Info("updating a feed with 'swarm feed update'")
cmd := runSwarm(t, flags...)
cmd.ExpectExit()
@@ -118,7 +115,7 @@ func TestCLIFeedUpdate(t *testing.T) {
"--user", address.Hex(),
}
- log.Info(fmt.Sprintf("getting feed info with 'swarm feed info'"))
+ log.Info("getting feed info with 'swarm feed info'")
cmd = runSwarm(t, flags...)
_, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout
cmd.ExpectExit()
@@ -143,9 +140,9 @@ func TestCLIFeedUpdate(t *testing.T) {
"--topic", topic.Hex(),
}
- log.Info(fmt.Sprintf("Publishing manifest with 'swarm feed create'"))
+ log.Info("Publishing manifest with 'swarm feed create'")
cmd = runSwarm(t, flags...)
- _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`) // regex hack to extract stdout
+ _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
cmd.ExpectExit()
manifestAddress := matches[0] // read the received feed manifest
@@ -164,4 +161,36 @@ func TestCLIFeedUpdate(t *testing.T) {
if !bytes.Equal(data, retrieved) {
t.Fatalf("Received %s, expected %s", retrieved, data)
}
+
+ // test publishing a manifest for a different user
+ flags = []string{
+ "--bzzapi", srv.URL,
+ "feed", "create",
+ "--topic", topic.Hex(),
+ "--user", "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // different user
+ }
+
+ log.Info("Publishing manifest with 'swarm feed create' for a different user")
+ cmd = runSwarm(t, flags...)
+ _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
+ cmd.ExpectExit()
+
+ manifestAddress = matches[0] // read the received feed manifest
+
+ // now let's try to update that user's manifest which we don't have the private key for
+ flags = []string{
+ "--bzzapi", srv.URL,
+ "--bzzaccount", pkFileName,
+ "feed", "update",
+ "--manifest", manifestAddress,
+ hexData}
+
+ // create an update and expect an error given there is a user mismatch
+ log.Info("updating a feed with 'swarm feed update'")
+ cmd = runSwarm(t, flags...)
+ cmd.ExpectRegexp("Fatal:.*") // best way so far to detect a failure.
+ cmd.ExpectExit()
+ if cmd.ExitStatus() == 0 {
+ t.Fatal("Expected nonzero exit code when updating a manifest with the wrong user. Got 0.")
+ }
}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs.go
index b970b2e8..edeeddff 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs.go
@@ -24,7 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
- "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/fuse"
"gopkg.in/urfave/cli.v1"
@@ -41,27 +41,24 @@ var fsCommand = cli.Command{
Action: mount,
CustomHelpTemplate: helpTemplate,
Name: "mount",
- Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "mount a swarm hash to a mount point",
- ArgsUsage: "swarm fs mount --ipcpath ",
+ ArgsUsage: "swarm fs mount ",
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: unmount,
CustomHelpTemplate: helpTemplate,
Name: "unmount",
- Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "unmount a swarmfs mount",
- ArgsUsage: "swarm fs unmount --ipcpath ",
+ ArgsUsage: "swarm fs unmount ",
Description: "Unmounts a swarmfs mount residing at . This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: listMounts,
CustomHelpTemplate: helpTemplate,
Name: "list",
- Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "list swarmfs mounts",
- ArgsUsage: "swarm fs list --ipcpath ",
+ ArgsUsage: "swarm fs list",
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
},
@@ -70,7 +67,7 @@ var fsCommand = cli.Command{
func mount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 2 {
- utils.Fatalf("Usage: swarm fs mount --ipcpath ")
+ utils.Fatalf("Usage: swarm fs mount ")
}
client, err := dialRPC(cliContext)
@@ -97,7 +94,7 @@ func unmount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 1 {
- utils.Fatalf("Usage: swarm fs unmount --ipcpath ")
+ utils.Fatalf("Usage: swarm fs unmount ")
}
client, err := dialRPC(cliContext)
if err != nil {
@@ -145,20 +142,21 @@ func listMounts(cliContext *cli.Context) {
}
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
- var endpoint string
+ endpoint := getIPCEndpoint(ctx)
+ log.Info("IPC endpoint", "path", endpoint)
+ return rpc.Dial(endpoint)
+}
- if ctx.IsSet(utils.IPCPathFlag.Name) {
- endpoint = ctx.String(utils.IPCPathFlag.Name)
- } else {
- utils.Fatalf("swarm ipc endpoint not specified")
- }
+func getIPCEndpoint(ctx *cli.Context) string {
+ cfg := defaultNodeConfig
+ utils.SetNodeConfig(ctx, &cfg)
- if endpoint == "" {
- endpoint = node.DefaultIPCEndpoint(clientIdentifier)
- } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
+ endpoint := cfg.IPCEndpoint()
+
+ if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
// Backwards compatibility with geth < 1.5 which required
// these prefixes.
endpoint = endpoint[4:]
}
- return rpc.Dial(endpoint)
+ return endpoint
}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs_test.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs_test.go
index 3b722515..5f58d6c0 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs_test.go
@@ -20,6 +20,7 @@ package main
import (
"bytes"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -28,20 +29,35 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
- colorable "github.com/mattn/go-colorable"
)
-func init() {
- log.PrintOrigins(true)
- log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
-}
-
type testFile struct {
filePath string
content string
}
+// TestCLISwarmFsDefaultIPCPath tests if the most basic fs command, i.e., list
+// can find and correctly connect to a running Swarm node on the default
+// IPCPath.
+func TestCLISwarmFsDefaultIPCPath(t *testing.T) {
+ cluster := newTestCluster(t, 1)
+ defer cluster.Shutdown()
+
+ handlingNode := cluster.Nodes[0]
+ list := runSwarm(t, []string{
+ "--datadir", handlingNode.Dir,
+ "fs",
+ "list",
+ }...)
+
+ list.WaitExit()
+ if list.Err != nil {
+ t.Fatal(list.Err)
+ }
+}
+
// TestCLISwarmFs is a high-level test of swarmfs
//
// This test fails on travis for macOS as this executable exits with code 1
@@ -65,9 +81,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
mount := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"mount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mhash,
mountPoint,
}...)
@@ -107,9 +123,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmount := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"unmount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mountPoint,
}...)
_, matches := unmount.ExpectRegexp(hashRegexp)
@@ -142,9 +158,9 @@ func TestCLISwarmFs(t *testing.T) {
//remount, check files
newMount := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"mount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
hash, // the latest hash
secondMountPoint,
}...)
@@ -178,9 +194,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmountSec := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"unmount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
secondMountPoint,
}...)
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/run_test.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/run_test.go
index 416fa7a5..680d238d 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/run_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/run_test.go
@@ -57,6 +57,17 @@ func init() {
})
}
+const clusterSize = 3
+
+var clusteronce sync.Once
+var cluster *testCluster
+
+func initCluster(t *testing.T) {
+ clusteronce.Do(func() {
+ cluster = newTestCluster(t, clusterSize)
+ })
+}
+
func serverFunc(api *api.API) swarmhttp.TestServer {
return swarmhttp.NewServer(api, "")
}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
index 1371d665..2c5e3fd2 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
@@ -2,11 +2,13 @@ package main
import (
"bytes"
+ "context"
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"net/http"
+ "net/http/httptrace"
"os"
"os/exec"
"strings"
@@ -16,9 +18,13 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/swarm/multihash"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/api/client"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
+ "github.com/ethereum/go-ethereum/swarm/testutil"
colorable "github.com/mattn/go-colorable"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
@@ -27,16 +33,34 @@ const (
feedRandomDataLength = 8
)
-// TODO: retrieve with manifest + extract repeating code
func cliFeedUploadAndSync(c *cli.Context) error {
-
+ metrics.GetOrRegisterCounter("feed-and-sync", nil).Inc(1)
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))))
+ errc := make(chan error)
+ go func() {
+ errc <- feedUploadAndSync(c)
+ }()
+
+ select {
+ case err := <-errc:
+ if err != nil {
+ metrics.GetOrRegisterCounter("feed-and-sync.fail", nil).Inc(1)
+ }
+ return err
+ case <-time.After(time.Duration(timeout) * time.Second):
+ metrics.GetOrRegisterCounter("feed-and-sync.timeout", nil).Inc(1)
+ return fmt.Errorf("timeout after %v sec", timeout)
+ }
+}
+
+// TODO: retrieve with manifest + extract repeating code
+func feedUploadAndSync(c *cli.Context) error {
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
- generateEndpoints(scheme, cluster, from, to)
+ generateEndpoints(scheme, cluster, appName, from, to)
- log.Info("generating and uploading MRUs to " + endpoints[0] + " and syncing")
+ log.Info("generating and uploading feeds to " + endpoints[0] + " and syncing")
// create a random private key to sign updates with and derive the address
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test")
@@ -205,12 +229,12 @@ func cliFeedUploadAndSync(c *cli.Context) error {
log.Info("all endpoints synced random data successfully")
// upload test file
- log.Info("uploading to " + endpoints[0] + " and syncing")
+ seed := int(time.Now().UnixNano() / 1e6)
+ log.Info("feed uploading to "+endpoints[0]+" and syncing", "seed", seed)
- f, cleanup := generateRandomFile(filesize * 1000)
- defer cleanup()
+ randomBytes := testutil.RandomBytes(seed, filesize*1000)
- hash, err := upload(f, endpoints[0])
+ hash, err := upload(&randomBytes, endpoints[0])
if err != nil {
return err
}
@@ -218,9 +242,8 @@ func cliFeedUploadAndSync(c *cli.Context) error {
if err != nil {
return err
}
- multihashHex := hexutil.Encode(multihash.ToMultihash(hashBytes))
-
- fileHash, err := digest(f)
+ multihashHex := hexutil.Encode(hashBytes)
+ fileHash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
return err
}
@@ -286,14 +309,37 @@ func cliFeedUploadAndSync(c *cli.Context) error {
}
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error {
+ ctx, sp := spancontext.StartSpan(context.Background(), "feed-and-sync.fetch")
+ defer sp.Finish()
+
log.Trace("sleeping", "ruid", ruid)
time.Sleep(3 * time.Second)
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user)
- res, err := http.Get(endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user)
+
+ var tn time.Time
+ reqUri := endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user
+ req, _ := http.NewRequest("GET", reqUri, nil)
+
+ opentracing.GlobalTracer().Inject(
+ sp.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(req.Header))
+
+ trace := client.GetClientTrace("feed-and-sync - http get", "feed-and-sync", ruid, &tn)
+
+ req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
+ transport := http.DefaultTransport
+
+ //transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+
+ tn = time.Now()
+ res, err := transport.RoundTrip(req)
if err != nil {
+ log.Error(err.Error(), "ruid", ruid)
return err
}
+
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength)
if res.StatusCode != 200 {
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go
index 97054dd4..66cecdc5 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go
@@ -17,23 +17,38 @@
package main
import (
+ "fmt"
"os"
"sort"
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ gethmetrics "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/metrics/influxdb"
+ swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
+ "github.com/ethereum/go-ethereum/swarm/tracing"
+
"github.com/ethereum/go-ethereum/log"
cli "gopkg.in/urfave/cli.v1"
)
+var (
+ gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
+)
+
var (
endpoints []string
includeLocalhost bool
cluster string
+ appName string
scheme string
filesize int
+ syncDelay int
from int
to int
verbosity int
+ timeout int
+ single bool
)
func main() {
@@ -45,10 +60,16 @@ func main() {
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "cluster-endpoint",
- Value: "testing",
- Usage: "cluster to point to (local, open or testing)",
+ Value: "prod",
+ Usage: "cluster to point to (prod or a given namespace)",
Destination: &cluster,
},
+ cli.StringFlag{
+ Name: "app",
+ Value: "swarm",
+ Usage: "application to point to (swarm or swarm-private)",
+ Destination: &appName,
+ },
cli.IntFlag{
Name: "cluster-from",
Value: 8501,
@@ -78,14 +99,42 @@ func main() {
Usage: "file size for generated random file in KB",
Destination: &filesize,
},
+ cli.IntFlag{
+ Name: "sync-delay",
+ Value: 5,
+ Usage: "duration of delay in seconds to wait for content to be synced",
+ Destination: &syncDelay,
+ },
cli.IntFlag{
Name: "verbosity",
Value: 1,
Usage: "verbosity",
Destination: &verbosity,
},
+ cli.IntFlag{
+ Name: "timeout",
+ Value: 120,
+ Usage: "timeout in seconds after which kill the process",
+ Destination: &timeout,
+ },
+ cli.BoolFlag{
+ Name: "single",
+ Usage: "whether to fetch content from a single node or from all nodes",
+ Destination: &single,
+ },
}
+ app.Flags = append(app.Flags, []cli.Flag{
+ utils.MetricsEnabledFlag,
+ swarmmetrics.MetricsInfluxDBEndpointFlag,
+ swarmmetrics.MetricsInfluxDBDatabaseFlag,
+ swarmmetrics.MetricsInfluxDBUsernameFlag,
+ swarmmetrics.MetricsInfluxDBPasswordFlag,
+ swarmmetrics.MetricsInfluxDBHostTagFlag,
+ }...)
+
+ app.Flags = append(app.Flags, tracing.Flags...)
+
app.Commands = []cli.Command{
{
Name: "upload_and_sync",
@@ -104,8 +153,38 @@ func main() {
sort.Sort(cli.FlagsByName(app.Flags))
sort.Sort(cli.CommandsByName(app.Commands))
+ app.Before = func(ctx *cli.Context) error {
+ tracing.Setup(ctx)
+ return nil
+ }
+
+ app.After = func(ctx *cli.Context) error {
+ return emitMetrics(ctx)
+ }
+
err := app.Run(os.Args)
if err != nil {
log.Error(err.Error())
+
+ os.Exit(1)
}
}
+
+func emitMetrics(ctx *cli.Context) error {
+ if gethmetrics.Enabled {
+ var (
+ endpoint = ctx.GlobalString(swarmmetrics.MetricsInfluxDBEndpointFlag.Name)
+ database = ctx.GlobalString(swarmmetrics.MetricsInfluxDBDatabaseFlag.Name)
+ username = ctx.GlobalString(swarmmetrics.MetricsInfluxDBUsernameFlag.Name)
+ password = ctx.GlobalString(swarmmetrics.MetricsInfluxDBPasswordFlag.Name)
+ hosttag = ctx.GlobalString(swarmmetrics.MetricsInfluxDBHostTagFlag.Name)
+ )
+ return influxdb.InfluxDBWithTagsOnce(gethmetrics.DefaultRegistry, endpoint, database, username, password, "swarm-smoke.", map[string]string{
+ "host": hosttag,
+ "version": gitCommit,
+ "filesize": fmt.Sprintf("%v", filesize),
+ })
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go
index 358141c7..d605f79a 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go
@@ -18,39 +18,41 @@ package main
import (
"bytes"
+ "context"
"crypto/md5"
crand "crypto/rand"
"errors"
"fmt"
"io"
"io/ioutil"
+ "math/rand"
"net/http"
+ "net/http/httptrace"
"os"
- "os/exec"
- "strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/api/client"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
+ "github.com/ethereum/go-ethereum/swarm/testutil"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
-func generateEndpoints(scheme string, cluster string, from int, to int) {
+func generateEndpoints(scheme string, cluster string, app string, from int, to int) {
if cluster == "prod" {
- cluster = ""
- } else if cluster == "local" {
- for port := from; port <= to; port++ {
- endpoints = append(endpoints, fmt.Sprintf("%s://localhost:%v", scheme, port))
+ for port := from; port < to; port++ {
+ endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port))
}
- return
} else {
- cluster = cluster + "."
- }
-
- for port := from; port <= to; port++ {
- endpoints = append(endpoints, fmt.Sprintf("%s://%v.%sswarm-gateways.net", scheme, port, cluster))
+ for port := from; port < to; port++ {
+ endpoints = append(endpoints, fmt.Sprintf("%s://%s-%v-%s.stg.swarm-gateways.net", scheme, app, port, cluster))
+ }
}
if includeLocalhost {
@@ -59,22 +61,51 @@ func generateEndpoints(scheme string, cluster string, from int, to int) {
}
func cliUploadAndSync(c *cli.Context) error {
- defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
+ log.PrintOrigins(true)
+ log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
- generateEndpoints(scheme, cluster, from, to)
+ metrics.GetOrRegisterCounter("upload-and-sync", nil).Inc(1)
- log.Info("uploading to " + endpoints[0] + " and syncing")
+ errc := make(chan error)
+ go func() {
+ errc <- uploadAndSync(c)
+ }()
- f, cleanup := generateRandomFile(filesize * 1000)
- defer cleanup()
+ select {
+ case err := <-errc:
+ if err != nil {
+ metrics.GetOrRegisterCounter("upload-and-sync.fail", nil).Inc(1)
+ }
+ return err
+ case <-time.After(time.Duration(timeout) * time.Second):
+ metrics.GetOrRegisterCounter("upload-and-sync.timeout", nil).Inc(1)
+ return fmt.Errorf("timeout after %v sec", timeout)
+ }
+}
- hash, err := upload(f, endpoints[0])
+func uploadAndSync(c *cli.Context) error {
+ defer func(now time.Time) {
+ totalTime := time.Since(now)
+
+ log.Info("total time", "time", totalTime, "kb", filesize)
+ metrics.GetOrRegisterCounter("upload-and-sync.total-time", nil).Inc(int64(totalTime))
+ }(time.Now())
+
+ generateEndpoints(scheme, cluster, appName, from, to)
+ seed := int(time.Now().UnixNano() / 1e6)
+ log.Info("uploading to "+endpoints[0]+" and syncing", "seed", seed)
+
+ randomBytes := testutil.RandomBytes(seed, filesize*1000)
+
+ t1 := time.Now()
+ hash, err := upload(&randomBytes, endpoints[0])
if err != nil {
log.Error(err.Error())
return err
}
+ metrics.GetOrRegisterCounter("upload-and-sync.upload-time", nil).Inc(int64(time.Since(t1)))
- fhash, err := digest(f)
+ fhash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
log.Error(err.Error())
return err
@@ -82,23 +113,47 @@ func cliUploadAndSync(c *cli.Context) error {
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
- time.Sleep(3 * time.Second)
+ time.Sleep(time.Duration(syncDelay) * time.Second)
wg := sync.WaitGroup{}
- for _, endpoint := range endpoints {
+ if single {
+ rand.Seed(time.Now().UTC().UnixNano())
+ randIndex := 1 + rand.Intn(len(endpoints)-1)
ruid := uuid.New()[:8]
wg.Add(1)
go func(endpoint string, ruid string) {
for {
+ start := time.Now()
err := fetch(hash, endpoint, fhash, ruid)
+ fetchTime := time.Since(start)
if err != nil {
continue
}
+ metrics.GetOrRegisterMeter("upload-and-sync.single.fetch-time", nil).Mark(int64(fetchTime))
wg.Done()
return
}
- }(endpoint, ruid)
+ }(endpoints[randIndex], ruid)
+ } else {
+ for _, endpoint := range endpoints {
+ ruid := uuid.New()[:8]
+ wg.Add(1)
+ go func(endpoint string, ruid string) {
+ for {
+ start := time.Now()
+ err := fetch(hash, endpoint, fhash, ruid)
+ fetchTime := time.Since(start)
+ if err != nil {
+ continue
+ }
+
+ metrics.GetOrRegisterMeter("upload-and-sync.each.fetch-time", nil).Mark(int64(fetchTime))
+ wg.Done()
+ return
+ }
+ }(endpoint, ruid)
+ }
}
wg.Wait()
log.Info("all endpoints synced random file successfully")
@@ -108,13 +163,33 @@ func cliUploadAndSync(c *cli.Context) error {
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
func fetch(hash string, endpoint string, original []byte, ruid string) error {
+ ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
+ defer sp.Finish()
+
log.Trace("sleeping", "ruid", ruid)
time.Sleep(3 * time.Second)
-
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
- res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
+
+ var tn time.Time
+ reqUri := endpoint + "/bzz:/" + hash + "/"
+ req, _ := http.NewRequest("GET", reqUri, nil)
+
+ opentracing.GlobalTracer().Inject(
+ sp.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(req.Header))
+
+ trace := client.GetClientTrace("upload-and-sync - http get", "upload-and-sync", ruid, &tn)
+
+ req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
+ transport := http.DefaultTransport
+
+ //transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+
+ tn = time.Now()
+ res, err := transport.RoundTrip(req)
if err != nil {
- log.Warn(err.Error(), "ruid", ruid)
+ log.Error(err.Error(), "ruid", ruid)
return err
}
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
@@ -145,16 +220,19 @@ func fetch(hash string, endpoint string, original []byte, ruid string) error {
}
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
-func upload(f *os.File, endpoint string) (string, error) {
- var out bytes.Buffer
- cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
- cmd.Stdout = &out
- err := cmd.Run()
- if err != nil {
- return "", err
+func upload(dataBytes *[]byte, endpoint string) (string, error) {
+ swarm := client.NewClient(endpoint)
+ f := &client.File{
+ ReadCloser: ioutil.NopCloser(bytes.NewReader(*dataBytes)),
+ ManifestEntry: api.ManifestEntry{
+ ContentType: "text/plain",
+ Mode: 0660,
+ Size: int64(len(*dataBytes)),
+ },
}
- hash := strings.TrimRight(out.String(), "\r\n")
- return hash, nil
+
+ // upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
+ return swarm.Upload(f, "", false)
}
func digest(r io.Reader) ([]byte, error) {
@@ -177,27 +255,3 @@ func generateRandomData(datasize int) ([]byte, error) {
}
return b, nil
}
-
-// generateRandomFile is creating a temporary file with the requested byte size
-func generateRandomFile(size int) (f *os.File, teardown func()) {
- // create a tmp file
- tmp, err := ioutil.TempFile("", "swarm-test")
- if err != nil {
- panic(err)
- }
-
- // callback for tmp file cleanup
- teardown = func() {
- tmp.Close()
- os.Remove(tmp.Name())
- }
-
- buf := make([]byte, size)
- _, err = crand.Read(buf)
- if err != nil {
- panic(err)
- }
- ioutil.WriteFile(tmp.Name(), buf, 0755)
-
- return tmp, teardown
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/upload_test.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/upload_test.go
index 5f984495..616486e3 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/upload_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/upload_test.go
@@ -31,8 +31,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/log"
- swarm "github.com/ethereum/go-ethereum/swarm/api/client"
- swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
+ swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/testutil"
"github.com/mattn/go-colorable"
)
@@ -42,42 +41,50 @@ func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
-// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
+func TestSwarmUp(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip()
+ }
+
+ initCluster(t)
+
+ cases := []struct {
+ name string
+ f func(t *testing.T)
+ }{
+ {"NoEncryption", testNoEncryption},
+ {"Encrypted", testEncrypted},
+ {"RecursiveNoEncryption", testRecursiveNoEncryption},
+ {"RecursiveEncrypted", testRecursiveEncrypted},
+ {"DefaultPathAll", testDefaultPathAll},
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, tc.f)
+ }
+}
+
+// testNoEncryption tests that running 'swarm up' makes the resulting file
// available from all nodes via the HTTP API
-func TestCLISwarmUp(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip()
- }
-
- testCLISwarmUp(false, t)
-}
-func TestCLISwarmUpRecursive(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip()
- }
- testCLISwarmUpRecursive(false, t)
+func testNoEncryption(t *testing.T) {
+ testDefault(false, t)
}
-// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
+// testEncrypted tests that running 'swarm up --encrypted' makes the resulting file
// available from all nodes via the HTTP API
-func TestCLISwarmUpEncrypted(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip()
- }
- testCLISwarmUp(true, t)
-}
-func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip()
- }
- testCLISwarmUpRecursive(true, t)
+func testEncrypted(t *testing.T) {
+ testDefault(true, t)
}
-func testCLISwarmUp(toEncrypt bool, t *testing.T) {
- log.Info("starting 3 node cluster")
- cluster := newTestCluster(t, 3)
- defer cluster.Shutdown()
+func testRecursiveNoEncryption(t *testing.T) {
+ testRecursive(false, t)
+}
+func testRecursiveEncrypted(t *testing.T) {
+ testRecursive(true, t)
+}
+
+func testDefault(toEncrypt bool, t *testing.T) {
tmpFileName := testutil.TempFileWithContent(t, data)
defer os.Remove(tmpFileName)
@@ -182,11 +189,7 @@ func testCLISwarmUp(toEncrypt bool, t *testing.T) {
}
}
-func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
- fmt.Println("starting 3 node cluster")
- cluster := newTestCluster(t, 3)
- defer cluster.Shutdown()
-
+func testRecursive(toEncrypt bool, t *testing.T) {
tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
@@ -253,7 +256,7 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
switch mode := fi.Mode(); {
case mode.IsRegular():
- if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
+ if file, err := swarmapi.Open(path.Join(tmpDownload, v.Name())); err != nil {
t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
} else {
ff := make([]byte, len(data))
@@ -274,22 +277,16 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
}
}
-// TestCLISwarmUpDefaultPath tests swarm recursive upload with relative and absolute
+// testDefaultPathAll tests swarm recursive upload with relative and absolute
// default paths and with encryption.
-func TestCLISwarmUpDefaultPath(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip()
- }
- testCLISwarmUpDefaultPath(false, false, t)
- testCLISwarmUpDefaultPath(false, true, t)
- testCLISwarmUpDefaultPath(true, false, t)
- testCLISwarmUpDefaultPath(true, true, t)
+func testDefaultPathAll(t *testing.T) {
+ testDefaultPath(false, false, t)
+ testDefaultPath(false, true, t)
+ testDefaultPath(true, false, t)
+ testDefaultPath(true, true, t)
}
-func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
- srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
- defer srv.Close()
-
+func testDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
tmp, err := ioutil.TempDir("", "swarm-defaultpath-test")
if err != nil {
t.Fatal(err)
@@ -312,7 +309,7 @@ func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T
args := []string{
"--bzzapi",
- srv.URL,
+ cluster.Nodes[0].URL,
"--recursive",
"--defaultpath",
defaultPath,
@@ -329,7 +326,7 @@ func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T
up.ExpectExit()
hash := matches[0]
- client := swarm.NewClient(srv.URL)
+ client := swarmapi.NewClient(cluster.Nodes[0].URL)
m, isEncrypted, err := client.DownloadManifest(hash)
if err != nil {
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go b/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go
index 429c2bbb..60e45d09 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go
@@ -140,6 +140,10 @@ var (
Name: "rinkeby",
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
}
+ ConstantinopleOverrideFlag = cli.Uint64Flag{
+ Name: "override.constantinople",
+ Usage: "Manually specify constantinople fork-block, overriding the bundled setting",
+ }
DeveloperFlag = cli.BoolFlag{
Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
@@ -182,6 +186,10 @@ var (
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
+ WhitelistFlag = cli.StringFlag{
+ Name: "whitelist",
+ Usage: "Comma separated block number-to-hash mappings to enforce (=)",
+ }
// Dashboard settings
DashboardEnabledFlag = cli.BoolFlag{
Name: metrics.DashboardEnabledFlag,
@@ -295,7 +303,12 @@ var (
CacheDatabaseFlag = cli.IntFlag{
Name: "cache.database",
Usage: "Percentage of cache memory allowance to use for database io",
- Value: 75,
+ Value: 50,
+ }
+ CacheTrieFlag = cli.IntFlag{
+ Name: "cache.trie",
+ Usage: "Percentage of cache memory allowance to use for trie caching",
+ Value: 25,
}
CacheGCFlag = cli.IntFlag{
Name: "cache.gc",
@@ -819,17 +832,12 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
// makeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
func makeDatabaseHandles() int {
- limit, err := fdlimit.Current()
+ limit, err := fdlimit.Maximum()
if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
}
- if limit < 2048 {
- if err := fdlimit.Raise(2048); err != nil {
- Fatalf("Failed to raise file descriptor allowance: %v", err)
- }
- }
- if limit > 2048 { // cap database file descriptors even if more is available
- limit = 2048
+ if err := fdlimit.Raise(uint64(limit)); err != nil {
+ Fatalf("Failed to raise file descriptor allowance: %v", err)
}
return limit / 2 // Leave half for networking and other stuff
}
@@ -973,16 +981,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setWS(ctx, cfg)
setNodeUserIdent(ctx, cfg)
- switch {
- case ctx.GlobalIsSet(DataDirFlag.Name):
- cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
- case ctx.GlobalBool(DeveloperFlag.Name):
- cfg.DataDir = "" // unless explicitly requested, use memory databases
- case ctx.GlobalBool(TestnetFlag.Name):
- cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
- case ctx.GlobalBool(RinkebyFlag.Name):
- cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
- }
+ setDataDir(ctx, cfg)
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name)
@@ -995,6 +994,19 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
}
}
+func setDataDir(ctx *cli.Context, cfg *node.Config) {
+ switch {
+ case ctx.GlobalIsSet(DataDirFlag.Name):
+ cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
+ case ctx.GlobalBool(DeveloperFlag.Name):
+ cfg.DataDir = "" // unless explicitly requested, use memory databases
+ case ctx.GlobalBool(TestnetFlag.Name):
+ cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
+ case ctx.GlobalBool(RinkebyFlag.Name):
+ cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
+ }
+}
+
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
@@ -1068,6 +1080,29 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) {
}
}
+func setWhitelist(ctx *cli.Context, cfg *eth.Config) {
+ whitelist := ctx.GlobalString(WhitelistFlag.Name)
+ if whitelist == "" {
+ return
+ }
+ cfg.Whitelist = make(map[uint64]common.Hash)
+ for _, entry := range strings.Split(whitelist, ",") {
+ parts := strings.Split(entry, "=")
+ if len(parts) != 2 {
+ Fatalf("Invalid whitelist entry: %s", entry)
+ }
+ number, err := strconv.ParseUint(parts[0], 0, 64)
+ if err != nil {
+ Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
+ }
+ var hash common.Hash
+ if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
+ Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
+ }
+ cfg.Whitelist[number] = hash
+ }
+}
+
// checkExclusive verifies that only a single instance of the provided flags was
// set by the user. Each flag might optionally be followed by a string type to
// specialize it further.
@@ -1133,6 +1168,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
setGPO(ctx, &cfg.GPO)
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
+ setWhitelist(ctx, cfg)
if ctx.GlobalIsSet(SyncModeFlag.Name) {
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
@@ -1146,7 +1182,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
}
-
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
}
@@ -1157,8 +1192,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
}
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
+ if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
+ cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
+ }
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
- cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
+ cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
@@ -1368,7 +1406,6 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
var err error
chainDb = MakeChainDatabase(ctx, stack)
-
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
@@ -1393,12 +1430,16 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
cache := &core.CacheConfig{
- Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
- TrieNodeLimit: eth.DefaultConfig.TrieCache,
- TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
+ Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
+ TrieCleanLimit: eth.DefaultConfig.TrieCleanCache,
+ TrieDirtyLimit: eth.DefaultConfig.TrieDirtyCache,
+ TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
+ }
+ if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
+ cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
- cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
+ cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil)
diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go b/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go
index eae09f91..0cb72c35 100644
--- a/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go
+++ b/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go
@@ -696,7 +696,7 @@ func (c *Clique) SealHash(header *types.Header) common.Hash {
return sigHash(header)
}
-// Close implements consensus.Engine. It's a noop for clique as there is are no background threads.
+// Close implements consensus.Engine. It's a noop for clique as there are no background threads.
func (c *Clique) Close() error {
return nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go
index 26ac75b8..c29063a7 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go
@@ -47,7 +47,10 @@ import (
)
var (
- blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
+ blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
+ blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
ErrNoGenesis = errors.New("Genesis not found in chain")
)
@@ -68,9 +71,10 @@ const (
// CacheConfig contains the configuration values for the trie caching/pruning
// that's resident in a blockchain.
type CacheConfig struct {
- Disabled bool // Whether to disable trie write caching (archive node)
- TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk
- TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
+ Disabled bool // Whether to disable trie write caching (archive node)
+ TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
+ TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
+ TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
}
// BlockChain represents the canonical chain given a database with a genesis
@@ -140,8 +144,9 @@ type BlockChain struct {
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = &CacheConfig{
- TrieNodeLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
}
}
bodyCache, _ := lru.New(bodyCacheLimit)
@@ -156,7 +161,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
cacheConfig: cacheConfig,
db: db,
triegc: prque.New(nil),
- stateCache: state.NewDatabase(db),
+ stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
quit: make(chan struct{}),
shouldPreserve: shouldPreserve,
bodyCache: bodyCache,
@@ -205,6 +210,11 @@ func (bc *BlockChain) getProcInterrupt() bool {
return atomic.LoadInt32(&bc.procInterrupt) == 1
}
+// GetVMConfig returns the block chain VM config.
+func (bc *BlockChain) GetVMConfig() *vm.Config {
+ return &bc.vmConfig
+}
+
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
@@ -393,6 +403,11 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, bc.stateCache)
}
+// StateCache returns the caching database underpinning the blockchain instance.
+func (bc *BlockChain) StateCache() state.Database {
+ return bc.stateCache
+}
+
// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *BlockChain) Reset() error {
return bc.ResetWithGenesisBlock(bc.genesisBlock)
@@ -438,7 +453,11 @@ func (bc *BlockChain) repair(head **types.Block) error {
return nil
}
// Otherwise rewind one block and recheck state availability there
- (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
+ block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
+ if block == nil {
+ return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
+ }
+ (*head) = block
}
}
@@ -554,6 +573,17 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
return rawdb.HasBody(bc.db, hash, number)
}
+// HasFastBlock checks if a fast block is fully present in the database or not.
+func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
+ if !bc.HasBlock(hash, number) {
+ return false
+ }
+ if bc.receiptsCache.Contains(hash) {
+ return true
+ }
+ return rawdb.HasReceipts(bc.db, hash, number)
+}
+
// HasState checks if state trie is fully present in the database or not.
func (bc *BlockChain) HasState(hash common.Hash) bool {
_, err := bc.stateCache.OpenTrie(hash)
@@ -611,12 +641,10 @@ func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
if receipts, ok := bc.receiptsCache.Get(hash); ok {
return receipts.(types.Receipts)
}
-
number := rawdb.ReadHeaderNumber(bc.db, hash)
if number == nil {
return nil
}
-
receipts := rawdb.ReadReceipts(bc.db, hash, *number)
bc.receiptsCache.Add(hash, receipts)
return receipts
@@ -938,7 +966,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// If we exceeded our memory allowance, flush matured singleton nodes to disk
var (
nodes, imgs = triedb.Size()
- limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
+ limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
)
if nodes > limit || imgs > 4*1024*1024 {
triedb.Cap(limit - ethdb.IdealBatchSize)
@@ -1020,6 +1048,18 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
return status, nil
}
+// addFutureBlock checks if the block is within the max allowed window to get
+// accepted for future processing, and returns an error if the block is too far
+// ahead and was not added.
+func (bc *BlockChain) addFutureBlock(block *types.Block) error {
+ max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
+ if block.Time().Cmp(max) > 0 {
+ return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
+ }
+ bc.futureBlocks.Add(block.Hash(), block)
+ return nil
+}
+
// InsertChain attempts to insert the given batch of blocks in to the canonical
// chain or, otherwise, create a fork. If an error is returned it will return
// the index number of the failing block as well an error describing what went
@@ -1027,18 +1067,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
//
// After insertion is done, all accumulated events will be fired.
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
- n, events, logs, err := bc.insertChain(chain)
- bc.PostChainEvents(events, logs)
- return n, err
-}
-
-// insertChain will execute the actual chain insertion and event aggregation. The
-// only reason this method exists as a separate one is to make locking cleaner
-// with deferred statements.
-func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
// Sanity check that we have something meaningful to import
if len(chain) == 0 {
- return 0, nil, nil, nil
+ return 0, nil
}
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ {
@@ -1047,16 +1078,36 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
- return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
+ return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
}
}
// Pre-checks passed, start the full block imports
bc.wg.Add(1)
- defer bc.wg.Done()
-
bc.chainmu.Lock()
- defer bc.chainmu.Unlock()
+ n, events, logs, err := bc.insertChain(chain, true)
+ bc.chainmu.Unlock()
+ bc.wg.Done()
+
+ bc.PostChainEvents(events, logs)
+ return n, err
+}
+
+// insertChain is the internal implementation of insertChain, which assumes that
+// 1) chains are contiguous, and 2) The chain mutex is held.
+//
+// This method is split out so that import batches that require re-injecting
+// historical blocks can do so without releasing the lock, which could lead to
+// racey behaviour. If a sidechain import is in progress, and the historic state
+// is imported, but then new canon-head is added before the actual sidechain
+// completes, then the historic state could be pruned again
+func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
+ // If the chain is terminating, don't even bother starting u
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ return 0, nil, nil, nil
+ }
+ // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
+ senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
@@ -1073,16 +1124,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
for i, block := range chain {
headers[i] = block.Header()
- seals[i] = true
+ seals[i] = verifySeals
}
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
defer close(abort)
- // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
- senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
+ // Peek the error for the first block to decide the directing import logic
+ it := newInsertIterator(chain, results, bc.Validator())
- // Iterate over the blocks and insert when the verifier permits
- for i, block := range chain {
+ block, err := it.next()
+ switch {
+ // First block is pruned, insert as sidechain and reorg only if TD grows enough
+ case err == consensus.ErrPrunedAncestor:
+ return bc.insertSidechain(it)
+
+ // First block is future, shove it (and all children) to the future queue (unknown ancestor)
+ case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
+ for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ block, err = it.next()
+ }
+ stats.queued += it.processed()
+ stats.ignored += it.remaining()
+
+ // If there are any still remaining, mark as ignored
+ return it.index, events, coalescedLogs, err
+
+ // First block (and state) is known
+ // 1. We did a roll-back, and should now do a re-import
+ // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
+ // from the canonical chain, which has not been verified.
+ case err == ErrKnownBlock:
+ // Skip all known blocks that behind us
+ current := bc.CurrentBlock().NumberU64()
+
+ for block != nil && err == ErrKnownBlock && current >= block.NumberU64() {
+ stats.ignored++
+ block, err = it.next()
+ }
+ // Falls through to the block import
+
+ // Some other error occurred, abort
+ case err != nil:
+ stats.ignored += len(it.chain)
+ bc.reportBlock(block, nil, err)
+ return it.index, events, coalescedLogs, err
+ }
+ // No validation errors for the first block (or chain prefix skipped)
+ for ; block != nil && err == nil; block, err = it.next() {
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
@@ -1091,115 +1182,53 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
// If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
bc.reportBlock(block, nil, ErrBlacklistedHash)
- return i, events, coalescedLogs, ErrBlacklistedHash
+ return it.index, events, coalescedLogs, ErrBlacklistedHash
}
- // Wait for the block's verification to complete
- bstart := time.Now()
+ // Retrieve the parent block and it's state to execute on top
+ start := time.Now()
- err := <-results
- if err == nil {
- err = bc.Validator().ValidateBody(block)
- }
- switch {
- case err == ErrKnownBlock:
- // Block and state both already known. However if the current block is below
- // this number we did a rollback and we should reimport it nonetheless.
- if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
- stats.ignored++
- continue
- }
-
- case err == consensus.ErrFutureBlock:
- // Allow up to MaxFuture second in the future blocks. If this limit is exceeded
- // the chain is discarded and processed at a later time if given.
- max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
- if block.Time().Cmp(max) > 0 {
- return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
- }
- bc.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
-
- case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
- bc.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
-
- case err == consensus.ErrPrunedAncestor:
- // Block competing with the canonical chain, store in the db, but don't process
- // until the competitor TD goes above the canonical TD
- currentBlock := bc.CurrentBlock()
- localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
- externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
- if localTd.Cmp(externTd) > 0 {
- if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
- return i, events, coalescedLogs, err
- }
- continue
- }
- // Competitor chain beat canonical, gather all blocks from the common ancestor
- var winner []*types.Block
-
- parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
- for !bc.HasState(parent.Root()) {
- winner = append(winner, parent)
- parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
- }
- for j := 0; j < len(winner)/2; j++ {
- winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
- }
- // Import all the pruned blocks to make the state available
- bc.chainmu.Unlock()
- _, evs, logs, err := bc.insertChain(winner)
- bc.chainmu.Lock()
- events, coalescedLogs = evs, logs
-
- if err != nil {
- return i, events, coalescedLogs, err
- }
-
- case err != nil:
- bc.reportBlock(block, nil, err)
- return i, events, coalescedLogs, err
- }
- // Create a new statedb using the parent block and report an
- // error if it fails.
- var parent *types.Block
- if i == 0 {
+ parent := it.previous()
+ if parent == nil {
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
- } else {
- parent = chain[i-1]
}
state, err := state.New(parent.Root(), bc.stateCache)
if err != nil {
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
// Process block using the parent state as reference point.
+ t0 := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
+ t1 := time.Now()
if err != nil {
bc.reportBlock(block, receipts, err)
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
// Validate the state using the default validator
- err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
- if err != nil {
+ if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err)
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
- proctime := time.Since(bstart)
+ t2 := time.Now()
+ proctime := time.Since(start)
// Write the block to the chain and get the status.
status, err := bc.WriteBlockWithState(block, receipts, state)
+ t3 := time.Now()
if err != nil {
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
+ blockInsertTimer.UpdateSince(start)
+ blockExecutionTimer.Update(t1.Sub(t0))
+ blockValidationTimer.Update(t2.Sub(t1))
+ blockWriteTimer.Update(t3.Sub(t2))
switch status {
case CanonStatTy:
- log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
- "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
+ log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
+ "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
+ "elapsed", common.PrettyDuration(time.Since(start)),
+ "root", block.Root())
coalescedLogs = append(coalescedLogs, logs...)
- blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block
@@ -1207,78 +1236,153 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
bc.gcproc += proctime
case SideStatTy:
- log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
- common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
-
- blockInsertTimer.UpdateSince(bstart)
+ log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
+ "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
+ "root", block.Root())
events = append(events, ChainSideEvent{block})
}
+ blockInsertTimer.UpdateSince(start)
stats.processed++
stats.usedGas += usedGas
cache, _ := bc.stateCache.TrieDB().Size()
- stats.report(chain, i, cache)
+ stats.report(chain, it.index, cache)
}
+ // Any blocks remaining here? The only ones we care about are the future ones
+ if block != nil && err == consensus.ErrFutureBlock {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ block, err = it.next()
+
+ for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ stats.queued++
+ }
+ }
+ stats.ignored += it.remaining()
+
// Append a single chain head event if we've progressed the chain
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
events = append(events, ChainHeadEvent{lastCanon})
}
- return 0, events, coalescedLogs, nil
+ return it.index, events, coalescedLogs, err
}
-// insertStats tracks and reports on block insertion.
-type insertStats struct {
- queued, processed, ignored int
- usedGas uint64
- lastIndex int
- startTime mclock.AbsTime
-}
-
-// statsReportLimit is the time limit during import and export after which we
-// always print out progress. This avoids the user wondering what's going on.
-const statsReportLimit = 8 * time.Second
-
-// report prints statistics if some number of blocks have been processed
-// or more than a few seconds have passed since the last message.
-func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
- // Fetch the timings for the batch
+// insertSidechain is called when an import batch hits upon a pruned ancestor
+// error, which happens when a sidechain with a sufficiently old fork-block is
+// found.
+//
+// The method writes all (header-and-body-valid) blocks to disk, then tries to
+// switch over to the new chain if the TD exceeded the current chain.
+func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) {
var (
- now = mclock.Now()
- elapsed = time.Duration(now) - time.Duration(st.startTime)
+ externTd *big.Int
+ current = bc.CurrentBlock().NumberU64()
)
- // If we're at the last block of the batch or report period reached, log
- if index == len(chain)-1 || elapsed >= statsReportLimit {
- var (
- end = chain[index]
- txs = countTransactions(chain[st.lastIndex : index+1])
- )
- context := []interface{}{
- "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
- "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
- "number", end.Number(), "hash", end.Hash(),
- }
- if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
- context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
- }
- context = append(context, []interface{}{"cache", cache}...)
+ // The first sidechain block error is already verified to be ErrPrunedAncestor.
+ // Since we don't import them here, we expect ErrUnknownAncestor for the remaining
+ // ones. Any other errors means that the block is invalid, and should not be written
+ // to disk.
+ block, err := it.current(), consensus.ErrPrunedAncestor
+ for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
+ // Check the canonical state root for that number
+ if number := block.NumberU64(); current >= number {
+ if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() {
+ // This is most likely a shadow-state attack. When a fork is imported into the
+ // database, and it eventually reaches a block height which is not pruned, we
+ // just found that the state already exist! This means that the sidechain block
+ // refers to a state which already exists in our canon chain.
+ //
+ // If left unchecked, we would now proceed importing the blocks, without actually
+ // having verified the state of the previous blocks.
+ log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
- if st.queued > 0 {
- context = append(context, []interface{}{"queued", st.queued}...)
+ // If someone legitimately side-mines blocks, they would still be imported as usual. However,
+ // we cannot risk writing unverified blocks to disk when they obviously target the pruning
+ // mechanism.
+ return it.index, nil, nil, errors.New("sidechain ghost-state attack")
+ }
}
- if st.ignored > 0 {
- context = append(context, []interface{}{"ignored", st.ignored}...)
+ if externTd == nil {
+ externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
}
- log.Info("Imported new chain segment", context...)
+ externTd = new(big.Int).Add(externTd, block.Difficulty())
- *st = insertStats{startTime: now, lastIndex: index + 1}
+ if !bc.HasBlock(block.Hash(), block.NumberU64()) {
+ start := time.Now()
+ if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
+ return it.index, nil, nil, err
+ }
+ log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(),
+ "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
+ "root", block.Root())
+ }
}
-}
-
-func countTransactions(chain []*types.Block) (c int) {
- for _, b := range chain {
- c += len(b.Transactions())
+ // At this point, we've written all sidechain blocks to database. Loop ended
+ // either on some other error or all were processed. If there was some other
+ // error, we can ignore the rest of those blocks.
+ //
+ // If the externTd was larger than our local TD, we now need to reimport the previous
+ // blocks to regenerate the required state
+ localTd := bc.GetTd(bc.CurrentBlock().Hash(), current)
+ if localTd.Cmp(externTd) > 0 {
+ log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd)
+ return it.index, nil, nil, err
}
- return c
+ // Gather all the sidechain hashes (full blocks may be memory heavy)
+ var (
+ hashes []common.Hash
+ numbers []uint64
+ )
+ parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64())
+ for parent != nil && !bc.HasState(parent.Root) {
+ hashes = append(hashes, parent.Hash())
+ numbers = append(numbers, parent.Number.Uint64())
+
+ parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
+ }
+ if parent == nil {
+ return it.index, nil, nil, errors.New("missing parent")
+ }
+ // Import all the pruned blocks to make the state available
+ var (
+ blocks []*types.Block
+ memory common.StorageSize
+ )
+ for i := len(hashes) - 1; i >= 0; i-- {
+ // Append the next block to our batch
+ block := bc.GetBlock(hashes[i], numbers[i])
+
+ blocks = append(blocks, block)
+ memory += block.Size()
+
+ // If memory use grew too large, import and continue. Sadly we need to discard
+ // all raised events and logs from notifications since we're too heavy on the
+ // memory here.
+ if len(blocks) >= 2048 || memory > 64*1024*1024 {
+ log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
+ if _, _, _, err := bc.insertChain(blocks, false); err != nil {
+ return 0, nil, nil, err
+ }
+ blocks, memory = blocks[:0], 0
+
+ // If the chain is terminating, stop processing blocks
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ log.Debug("Premature abort during blocks processing")
+ return 0, nil, nil, nil
+ }
+ }
+ }
+ if len(blocks) > 0 {
+ log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
+ return bc.insertChain(blocks, false)
+ }
+ return 0, nil, nil, nil
}
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
@@ -1453,8 +1557,10 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
bc.addBadBlock(block)
var receiptString string
- for _, receipt := range receipts {
- receiptString += fmt.Sprintf("\t%v\n", receipt)
+ for i, receipt := range receipts {
+ receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
+ i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
+ receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
}
log.Error(fmt.Sprintf(`
########## BAD BLOCK #########
diff --git a/vendor/github.com/ethereum/go-ethereum/core/blockchain_insert.go b/vendor/github.com/ethereum/go-ethereum/core/blockchain_insert.go
new file mode 100644
index 00000000..70bea354
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/core/blockchain_insert.go
@@ -0,0 +1,143 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package core
+
+import (
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// insertStats tracks and reports on block insertion.
+type insertStats struct {
+ queued, processed, ignored int
+ usedGas uint64
+ lastIndex int
+ startTime mclock.AbsTime
+}
+
+// statsReportLimit is the time limit during import and export after which we
+// always print out progress. This avoids the user wondering what's going on.
+const statsReportLimit = 8 * time.Second
+
+// report prints statistics if some number of blocks have been processed
+// or more than a few seconds have passed since the last message.
+func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
+ // Fetch the timings for the batch
+ var (
+ now = mclock.Now()
+ elapsed = time.Duration(now) - time.Duration(st.startTime)
+ )
+ // If we're at the last block of the batch or report period reached, log
+ if index == len(chain)-1 || elapsed >= statsReportLimit {
+ // Count the number of transactions in this segment
+ var txs int
+ for _, block := range chain[st.lastIndex : index+1] {
+ txs += len(block.Transactions())
+ }
+ end := chain[index]
+
+ // Assemble the log context and send it to the logger
+ context := []interface{}{
+ "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
+ "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
+ "number", end.Number(), "hash", end.Hash(),
+ }
+ if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
+ context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
+ }
+ context = append(context, []interface{}{"cache", cache}...)
+
+ if st.queued > 0 {
+ context = append(context, []interface{}{"queued", st.queued}...)
+ }
+ if st.ignored > 0 {
+ context = append(context, []interface{}{"ignored", st.ignored}...)
+ }
+ log.Info("Imported new chain segment", context...)
+
+ // Bump the stats reported to the next section
+ *st = insertStats{startTime: now, lastIndex: index + 1}
+ }
+}
+
+// insertIterator is a helper to assist during chain import.
+type insertIterator struct {
+ chain types.Blocks
+ results <-chan error
+ index int
+ validator Validator
+}
+
+// newInsertIterator creates a new iterator based on the given blocks, which are
+// assumed to be a contiguous chain.
+func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator {
+ return &insertIterator{
+ chain: chain,
+ results: results,
+ index: -1,
+ validator: validator,
+ }
+}
+
+// next returns the next block in the iterator, along with any potential validation
+// error for that block. When the end is reached, it will return (nil, nil).
+func (it *insertIterator) next() (*types.Block, error) {
+ if it.index+1 >= len(it.chain) {
+ it.index = len(it.chain)
+ return nil, nil
+ }
+ it.index++
+ if err := <-it.results; err != nil {
+ return it.chain[it.index], err
+ }
+ return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
+}
+
+// current returns the current block that's being processed.
+func (it *insertIterator) current() *types.Block {
+ if it.index < 0 || it.index+1 >= len(it.chain) {
+ return nil
+ }
+ return it.chain[it.index]
+}
+
+// previous returns the previous block was being processed, or nil
+func (it *insertIterator) previous() *types.Block {
+ if it.index < 1 {
+ return nil
+ }
+ return it.chain[it.index-1]
+}
+
+// first returns the first block in the it.
+func (it *insertIterator) first() *types.Block {
+ return it.chain[0]
+}
+
+// remaining returns the number of remaining blocks.
+func (it *insertIterator) remaining() int {
+ return len(it.chain) - it.index
+}
+
+// processed returns the number of processed blocks.
+func (it *insertIterator) processed() int {
+ return it.index + 1
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/core/blockchain_test.go b/vendor/github.com/ethereum/go-ethereum/core/blockchain_test.go
index aef81005..5ab29e20 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/blockchain_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/blockchain_test.go
@@ -579,11 +579,11 @@ func testInsertNonceError(t *testing.T, full bool) {
blockchain.hc.engine = blockchain.engine
failRes, err = blockchain.InsertHeaderChain(headers, 1)
}
- // Check that the returned error indicates the failure.
+ // Check that the returned error indicates the failure
if failRes != failAt {
- t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
+ t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
}
- // Check that all no blocks after the failing block have been inserted.
+ // Check that all blocks after the failing block have been inserted
for j := 0; j < i-failAt; j++ {
if full {
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
@@ -1345,7 +1345,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
t.Fatalf("failed to insert shared chain: %v", err)
}
if _, err := chain.InsertChain(original); err != nil {
- t.Fatalf("failed to insert shared chain: %v", err)
+ t.Fatalf("failed to insert original chain: %v", err)
}
// Ensure that the state associated with the forking point is pruned away
if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
diff --git a/vendor/github.com/ethereum/go-ethereum/core/genesis.go b/vendor/github.com/ethereum/go-ethereum/core/genesis.go
index 6e71afd6..c96cb17a 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/genesis.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/genesis.go
@@ -151,6 +151,9 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
+ return SetupGenesisBlockWithOverride(db, genesis, nil)
+}
+func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constantinopleOverride *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@@ -178,6 +181,9 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
// Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored)
+ if constantinopleOverride != nil {
+ newcfg.ConstantinopleBlock = constantinopleOverride
+ }
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil {
log.Warn("Found genesis block without chain config")
diff --git a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain.go b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain.go
index 6660e17d..491a125c 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain.go
@@ -271,6 +271,15 @@ func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
}
}
+// HasReceipts verifies the existence of all the transaction receipts belonging
+// to a block.
+func HasReceipts(db DatabaseReader, hash common.Hash, number uint64) bool {
+ if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
+ return false
+ }
+ return true
+}
+
// ReadReceipts retrieves all the transaction receipts belonging to a block.
func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
// Retrieve the flattened receipt slice
diff --git a/vendor/github.com/ethereum/go-ethereum/core/state/database.go b/vendor/github.com/ethereum/go-ethereum/core/state/database.go
index c1b63099..f6ea144b 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/state/database.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/state/database.go
@@ -72,13 +72,19 @@ type Trie interface {
}
// NewDatabase creates a backing store for state. The returned database is safe for
-// concurrent use and retains cached trie nodes in memory. The pool is an optional
-// intermediate trie-node memory pool between the low level storage layer and the
-// high level trie abstraction.
+// concurrent use and retains a few recent expanded trie nodes in memory. To keep
+// more historical state in memory, use the NewDatabaseWithCache constructor.
func NewDatabase(db ethdb.Database) Database {
+ return NewDatabaseWithCache(db, 0)
+}
+
+// NewDatabase creates a backing store for state. The returned database is safe for
+// concurrent use and retains both a few recent expanded trie nodes in memory, as
+// well as a lot of collapsed RLP trie nodes in a large memory cache.
+func NewDatabaseWithCache(db ethdb.Database, cache int) Database {
csc, _ := lru.New(codeSizeCacheSize)
return &cachingDB{
- db: trie.NewDatabase(db),
+ db: trie.NewDatabaseWithCache(db, cache),
codeSizeCache: csc,
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go b/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go
index f6da5da2..fc35d1f2 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go
@@ -825,7 +825,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error {
// addTxsLocked attempts to queue a batch of transactions if they are valid,
// whilst assuming the transaction pool lock is already held.
func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
- // Add the batch of transaction, tracking the accepted ones
+ // Add the batch of transactions, tracking the accepted ones
dirty := make(map[common.Address]struct{})
errs := make([]error, len(txs))
diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/block.go b/vendor/github.com/ethereum/go-ethereum/core/types/block.go
index 8a21bba1..9d11f60d 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/types/block.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/types/block.go
@@ -81,8 +81,8 @@ type Header struct {
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Time *big.Int `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash" gencodec:"required"`
- Nonce BlockNonce `json:"nonce" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
}
// field type overrides for gencodec
diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go b/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go
index 1b92cd9c..59a1c9c4 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go
@@ -13,6 +13,7 @@ import (
var _ = (*headerMarshaling)(nil)
+// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
@@ -28,8 +29,8 @@ func (h Header) MarshalJSON() ([]byte, error) {
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash" gencodec:"required"`
- Nonce BlockNonce `json:"nonce" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
Hash common.Hash `json:"hash"`
}
var enc Header
@@ -52,6 +53,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
return json.Marshal(&enc)
}
+// UnmarshalJSON unmarshals from JSON.
func (h *Header) UnmarshalJSON(input []byte) error {
type Header struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
@@ -67,8 +69,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest *common.Hash `json:"mixHash" gencodec:"required"`
- Nonce *BlockNonce `json:"nonce" gencodec:"required"`
+ MixDigest *common.Hash `json:"mixHash"`
+ Nonce *BlockNonce `json:"nonce"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@@ -126,13 +128,11 @@ func (h *Header) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'extraData' for Header")
}
h.Extra = *dec.Extra
- if dec.MixDigest == nil {
- return errors.New("missing required field 'mixHash' for Header")
+ if dec.MixDigest != nil {
+ h.MixDigest = *dec.MixDigest
}
- h.MixDigest = *dec.MixDigest
- if dec.Nonce == nil {
- return errors.New("missing required field 'nonce' for Header")
+ if dec.Nonce != nil {
+ h.Nonce = *dec.Nonce
}
- h.Nonce = *dec.Nonce
return nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go b/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go
index 968d2219..ba4d1e9e 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go
@@ -339,6 +339,12 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
contract := NewContract(caller, to, new(big.Int), gas)
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
+ // We do an AddBalance of zero here, just in order to trigger a touch.
+ // This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium,
+ // but is the correct thing to do and matters on other networks, in tests, and potential
+ // future scenarios
+ evm.StateDB.AddBalance(addr, bigZero)
+
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in Homestead this also counts for code storage gas errors.
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/evm/json_logger.go b/vendor/github.com/ethereum/go-ethereum/core/vm/logger_json.go
similarity index 81%
rename from vendor/github.com/ethereum/go-ethereum/cmd/evm/json_logger.go
rename to vendor/github.com/ethereum/go-ethereum/core/vm/logger_json.go
index 50cb4f0e..ac3c4075 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/evm/json_logger.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/vm/logger_json.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see .
-package main
+package vm
import (
"encoding/json"
@@ -24,17 +24,16 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/core/vm"
)
type JSONLogger struct {
encoder *json.Encoder
- cfg *vm.LogConfig
+ cfg *LogConfig
}
// NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
// into the provided stream.
-func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
+func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger {
return &JSONLogger{json.NewEncoder(writer), cfg}
}
@@ -43,8 +42,8 @@ func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create
}
// CaptureState outputs state information on the logger.
-func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
- log := vm.StructLog{
+func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
+ log := StructLog{
Pc: pc,
Op: op,
Gas: gas,
@@ -65,7 +64,7 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
}
// CaptureFault outputs state information on the logger.
-func (l *JSONLogger) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
+func (l *JSONLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
return nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/api.go b/vendor/github.com/ethereum/go-ethereum/eth/api.go
index 3ec3afb8..816b9cd3 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/api.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/api.go
@@ -444,16 +444,16 @@ func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Bloc
if startBlock.Number().Uint64() >= endBlock.Number().Uint64() {
return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64())
}
+ triedb := api.eth.BlockChain().StateCache().TrieDB()
- oldTrie, err := trie.NewSecure(startBlock.Root(), trie.NewDatabase(api.eth.chainDb), 0)
+ oldTrie, err := trie.NewSecure(startBlock.Root(), triedb, 0)
if err != nil {
return nil, err
}
- newTrie, err := trie.NewSecure(endBlock.Root(), trie.NewDatabase(api.eth.chainDb), 0)
+ newTrie, err := trie.NewSecure(endBlock.Root(), triedb, 0)
if err != nil {
return nil, err
}
-
diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
iter := trie.NewIterator(diff)
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/api_backend.go b/vendor/github.com/ethereum/go-ethereum/eth/api_backend.go
index 8748d444..a48815e0 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/api_backend.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/api_backend.go
@@ -125,12 +125,12 @@ func (b *EthAPIBackend) GetTd(blockHash common.Hash) *big.Int {
return b.eth.blockchain.GetTdByHash(blockHash)
}
-func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) {
+func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {
state.SetBalance(msg.From(), math.MaxBig256)
vmError := func() error { return nil }
context := core.NewEVMContext(msg, header, b.eth.BlockChain(), nil)
- return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), vmError, nil
+ return vm.NewEVM(context, state, b.eth.chainConfig, *b.eth.blockchain.GetVMConfig()), vmError, nil
}
func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go b/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go
index 80552ada..0b8f8aa0 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go
@@ -17,11 +17,13 @@
package eth
import (
+ "bufio"
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
+ "os"
"runtime"
"sync"
"time"
@@ -60,6 +62,13 @@ type TraceConfig struct {
Reexec *uint64
}
+// StdTraceConfig holds extra parameters to standard-json trace functions.
+type StdTraceConfig struct {
+ *vm.LogConfig
+ Reexec *uint64
+ TxHash common.Hash
+}
+
// txTraceResult is the result of a single transaction trace.
type txTraceResult struct {
Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer
@@ -138,7 +147,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
// Ensure we have a valid starting state before doing any work
origin := start.NumberU64()
- database := state.NewDatabase(api.eth.ChainDb())
+ database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16) // Chain tracing will probably start at genesis
if number := start.NumberU64(); number > 0 {
start = api.eth.blockchain.GetBlock(start.ParentHash(), start.NumberU64()-1)
@@ -366,7 +375,7 @@ func (api *PrivateDebugAPI) TraceBlockByNumber(ctx context.Context, number rpc.B
func (api *PrivateDebugAPI) TraceBlockByHash(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
block := api.eth.blockchain.GetBlockByHash(hash)
if block == nil {
- return nil, fmt.Errorf("block #%x not found", hash)
+ return nil, fmt.Errorf("block %#x not found", hash)
}
return api.traceBlock(ctx, block, config)
}
@@ -391,13 +400,41 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string,
return api.TraceBlock(ctx, blob, config)
}
-// TraceBadBlock returns the structured logs created during the execution of a block
-// within the blockchain 'badblocks' cache
-func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, index int, config *TraceConfig) ([]*txTraceResult, error) {
- if blocks := api.eth.blockchain.BadBlocks(); index < len(blocks) {
- return api.traceBlock(ctx, blocks[index], config)
+// TraceBadBlockByHash returns the structured logs created during the execution of
+// EVM against a block pulled from the pool of bad ones and returns them as a JSON
+// object.
+func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
+ blocks := api.eth.blockchain.BadBlocks()
+ for _, block := range blocks {
+ if block.Hash() == hash {
+ return api.traceBlock(ctx, block, config)
+ }
}
- return nil, fmt.Errorf("index out of range")
+ return nil, fmt.Errorf("bad block %#x not found", hash)
+}
+
+// StandardTraceBlockToFile dumps the structured logs created during the
+// execution of EVM to the local file system and returns a list of files
+// to the caller.
+func (api *PrivateDebugAPI) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
+ block := api.eth.blockchain.GetBlockByHash(hash)
+ if block == nil {
+ return nil, fmt.Errorf("block %#x not found", hash)
+ }
+ return api.standardTraceBlockToFile(ctx, block, config)
+}
+
+// StandardTraceBadBlockToFile dumps the structured logs created during the
+// execution of EVM against a block pulled from the pool of bad ones to the
+// local file system and returns a list of files to the caller.
+func (api *PrivateDebugAPI) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
+ blocks := api.eth.blockchain.BadBlocks()
+ for _, block := range blocks {
+ if block.Hash() == hash {
+ return api.standardTraceBlockToFile(ctx, block, config)
+ }
+ }
+ return nil, fmt.Errorf("bad block %#x not found", hash)
}
// traceBlock configures a new tracer according to the provided configuration, and
@@ -410,7 +447,7 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
}
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
- return nil, fmt.Errorf("parent %x not found", block.ParentHash())
+ return nil, fmt.Errorf("parent %#x not found", block.ParentHash())
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
@@ -481,6 +518,106 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
return results, nil
}
+// standardTraceBlockToFile configures a new tracer which uses standard JSON output,
+// and traces either a full block or an individual transaction. The return value will
+// be one filename per transaction traced.
+func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) {
+ // If we're tracing a single transaction, make sure it's present
+ if config != nil && config.TxHash != (common.Hash{}) {
+ var exists bool
+ for _, tx := range block.Transactions() {
+ if exists = (tx.Hash() == config.TxHash); exists {
+ break
+ }
+ }
+ if !exists {
+ return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash)
+ }
+ }
+ // Create the parent state database
+ if err := api.eth.engine.VerifyHeader(api.eth.blockchain, block.Header(), true); err != nil {
+ return nil, err
+ }
+ parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
+ if parent == nil {
+ return nil, fmt.Errorf("parent %#x not found", block.ParentHash())
+ }
+ reexec := defaultTraceReexec
+ if config != nil && config.Reexec != nil {
+ reexec = *config.Reexec
+ }
+ statedb, err := api.computeStateDB(parent, reexec)
+ if err != nil {
+ return nil, err
+ }
+ // Retrieve the tracing configurations, or use default values
+ var (
+ logConfig vm.LogConfig
+ txHash common.Hash
+ )
+ if config != nil {
+ if config.LogConfig != nil {
+ logConfig = *config.LogConfig
+ }
+ txHash = config.TxHash
+ }
+ logConfig.Debug = true
+
+ // Execute transaction, either tracing all or just the requested one
+ var (
+ signer = types.MakeSigner(api.config, block.Number())
+ dumps []string
+ )
+ for i, tx := range block.Transactions() {
+ // Prepare the trasaction for un-traced execution
+ var (
+ msg, _ = tx.AsMessage(signer)
+ vmctx = core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
+
+ vmConf vm.Config
+ dump *os.File
+ err error
+ )
+ // If the transaction needs tracing, swap out the configs
+ if tx.Hash() == txHash || txHash == (common.Hash{}) {
+ // Generate a unique temporary file to dump it into
+ prefix := fmt.Sprintf("block_%#x-%d-%#x-", block.Hash().Bytes()[:4], i, tx.Hash().Bytes()[:4])
+
+ dump, err = ioutil.TempFile(os.TempDir(), prefix)
+ if err != nil {
+ return nil, err
+ }
+ dumps = append(dumps, dump.Name())
+
+ // Swap out the noop logger to the standard tracer
+ vmConf = vm.Config{
+ Debug: true,
+ Tracer: vm.NewJSONLogger(&logConfig, bufio.NewWriter(dump)),
+ EnablePreimageRecording: true,
+ }
+ }
+ // Execute the transaction and flush any traces to disk
+ vmenv := vm.NewEVM(vmctx, statedb, api.config, vmConf)
+ _, _, _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
+
+ if dump != nil {
+ dump.Close()
+ log.Info("Wrote standard trace", "file", dump.Name())
+ }
+ if err != nil {
+ return dumps, err
+ }
+ // Finalize the state so any modifications are written to the trie
+ statedb.Finalise(true)
+
+ // If we've traced the transaction we were looking for, abort
+ if tx.Hash() == txHash {
+ break
+ }
+ }
+ return dumps, nil
+}
+
// computeStateDB retrieves the state database associated with a certain block.
// If no state is locally available for the given block, a number of blocks are
// attempted to be reexecuted to generate the desired state.
@@ -492,7 +629,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
}
// Otherwise try to reexec blocks until we find a state or reach our limit
origin := block.NumberU64()
- database := state.NewDatabase(api.eth.ChainDb())
+ database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16)
for i := uint64(0); i < reexec; i++ {
block = api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
@@ -506,7 +643,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
if err != nil {
switch err.(type) {
case *trie.MissingNodeError:
- return nil, errors.New("required historical state unavailable")
+ return nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec)
default:
return nil, err
}
@@ -520,7 +657,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
for block.NumberU64() < origin {
// Print progress logs if long enough time elapsed
if time.Since(logged) > 8*time.Second {
- log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "elapsed", time.Since(start))
+ log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "remaining", origin-block.NumberU64()-1, "elapsed", time.Since(start))
logged = time.Now()
}
// Retrieve the next block to regenerate and process it
@@ -529,15 +666,15 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
}
_, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{})
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
- root, err := statedb.Commit(true)
+ root, err := statedb.Commit(api.eth.blockchain.Config().IsEIP158(block.Number()))
if err != nil {
return nil, err
}
if err := statedb.Reset(root); err != nil {
- return nil, err
+ return nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err)
}
database.TrieDB().Reference(root, common.Hash{})
if proot != (common.Hash{}) {
@@ -556,7 +693,7 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Ha
// Retrieve the transaction and assemble its EVM context
tx, blockHash, _, index := rawdb.ReadTransaction(api.eth.ChainDb(), hash)
if tx == nil {
- return nil, fmt.Errorf("transaction %x not found", hash)
+ return nil, fmt.Errorf("transaction %#x not found", hash)
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
@@ -636,11 +773,11 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
// Create the parent state database
block := api.eth.blockchain.GetBlockByHash(blockHash)
if block == nil {
- return nil, vm.Context{}, nil, fmt.Errorf("block %x not found", blockHash)
+ return nil, vm.Context{}, nil, fmt.Errorf("block %#x not found", blockHash)
}
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
- return nil, vm.Context{}, nil, fmt.Errorf("parent %x not found", block.ParentHash())
+ return nil, vm.Context{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash())
}
statedb, err := api.computeStateDB(parent, reexec)
if err != nil {
@@ -659,10 +796,10 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewEVM(context, statedb, api.config, vm.Config{})
if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
- return nil, vm.Context{}, nil, fmt.Errorf("tx %x failed: %v", tx.Hash(), err)
+ return nil, vm.Context{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
statedb.Finalise(true)
}
- return nil, vm.Context{}, nil, fmt.Errorf("tx index %d out of range for block %x", txIndex, blockHash)
+ return nil, vm.Context{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, blockHash)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/backend.go b/vendor/github.com/ethereum/go-ethereum/eth/backend.go
index b555b064..354fc17d 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/backend.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/backend.go
@@ -118,7 +118,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.ConstantinopleOverride)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr
}
@@ -154,7 +154,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
EWASMInterpreter: config.EWASMInterpreter,
EVMInterpreter: config.EVMInterpreter,
}
- cacheConfig = &core.CacheConfig{Disabled: config.NoPruning, TrieNodeLimit: config.TrieCache, TrieTimeLimit: config.TrieTimeout}
+ cacheConfig = &core.CacheConfig{Disabled: config.NoPruning, TrieCleanLimit: config.TrieCleanCache, TrieDirtyLimit: config.TrieDirtyCache, TrieTimeLimit: config.TrieTimeout}
)
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, eth.chainConfig, eth.engine, vmConfig, eth.shouldPreserve)
if err != nil {
@@ -173,7 +173,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
}
eth.txPool = core.NewTxPool(config.TxPool, eth.chainConfig, eth.blockchain)
- if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil {
+ if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb, config.Whitelist); err != nil {
return nil, err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/config.go b/vendor/github.com/ethereum/go-ethereum/eth/config.go
index e32c01a7..7c041d1a 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/config.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/config.go
@@ -43,15 +43,16 @@ var DefaultConfig = Config{
DatasetsInMem: 1,
DatasetsOnDisk: 2,
},
- NetworkId: 1,
- LightPeers: 100,
- DatabaseCache: 768,
- TrieCache: 256,
- TrieTimeout: 60 * time.Minute,
- MinerGasFloor: 8000000,
- MinerGasCeil: 8000000,
- MinerGasPrice: big.NewInt(params.GWei),
- MinerRecommit: 3 * time.Second,
+ NetworkId: 1,
+ LightPeers: 100,
+ DatabaseCache: 512,
+ TrieCleanCache: 256,
+ TrieDirtyCache: 256,
+ TrieTimeout: 60 * time.Minute,
+ MinerGasFloor: 8000000,
+ MinerGasCeil: 8000000,
+ MinerGasPrice: big.NewInt(params.GWei),
+ MinerRecommit: 3 * time.Second,
TxPool: core.DefaultTxPoolConfig,
GPO: gasprice.Config{
@@ -86,6 +87,9 @@ type Config struct {
SyncMode downloader.SyncMode
NoPruning bool
+ // Whitelist of required block number -> hash values to accept
+ Whitelist map[uint64]common.Hash `toml:"-"`
+
// Light client options
LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests
LightPeers int `toml:",omitempty"` // Maximum number of LES client peers
@@ -94,7 +98,8 @@ type Config struct {
SkipBcVersionCheck bool `toml:"-"`
DatabaseHandles int `toml:"-"`
DatabaseCache int
- TrieCache int
+ TrieCleanCache int
+ TrieDirtyCache int
TrieTimeout time.Duration
// Mining-related options
@@ -124,8 +129,12 @@ type Config struct {
// Type of the EWASM interpreter ("" for default)
EWASMInterpreter string
+
// Type of the EVM interpreter ("" for default)
EVMInterpreter string
+
+ // Constantinople block override (TODO: remove after the fork)
+ ConstantinopleOverride *big.Int
}
type configMarshaling struct {
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go
index 56c54c8e..3a177ab9 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go
@@ -99,6 +99,7 @@ type Downloader struct {
mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle)
mux *event.TypeMux // Event multiplexer to announce sync operation events
+ genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT)
queue *queue // Scheduler for selecting the hashes to download
peers *peerSet // Set of active peers from which download can proceed
stateDB ethdb.Database
@@ -181,6 +182,9 @@ type BlockChain interface {
// HasBlock verifies a block's presence in the local chain.
HasBlock(common.Hash, uint64) bool
+ // HasFastBlock verifies a fast block's presence in the local chain.
+ HasFastBlock(common.Hash, uint64) bool
+
// GetBlockByHash retrieves a block from the local chain.
GetBlockByHash(common.Hash) *types.Block
@@ -430,7 +434,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
}
height := latest.Number.Uint64()
- origin, err := d.findAncestor(p, height)
+ origin, err := d.findAncestor(p, latest)
if err != nil {
return err
}
@@ -587,41 +591,107 @@ func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) {
}
}
+// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
+// common ancestor.
+// It returns parameters to be used for peer.RequestHeadersByNumber:
+// from - starting block number
+// count - number of headers to request
+// skip - number of headers to skip
+// and also returns 'max', the last block which is expected to be returned by the remote peers,
+// given the (from,count,skip)
+func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
+ var (
+ from int
+ count int
+ MaxCount = MaxHeaderFetch / 16
+ )
+ // requestHead is the highest block that we will ask for. If requestHead is not offset,
+ // the highest block that we will get is 16 blocks back from head, which means we
+ // will fetch 14 or 15 blocks unnecessarily in the case the height difference
+ // between us and the peer is 1-2 blocks, which is most common
+ requestHead := int(remoteHeight) - 1
+ if requestHead < 0 {
+ requestHead = 0
+ }
+ // requestBottom is the lowest block we want included in the query
+ // Ideally, we want to include just below own head
+ requestBottom := int(localHeight - 1)
+ if requestBottom < 0 {
+ requestBottom = 0
+ }
+ totalSpan := requestHead - requestBottom
+ span := 1 + totalSpan/MaxCount
+ if span < 2 {
+ span = 2
+ }
+ if span > 16 {
+ span = 16
+ }
+
+ count = 1 + totalSpan/span
+ if count > MaxCount {
+ count = MaxCount
+ }
+ if count < 2 {
+ count = 2
+ }
+ from = requestHead - (count-1)*span
+ if from < 0 {
+ from = 0
+ }
+ max := from + (count-1)*span
+ return int64(from), count, span - 1, uint64(max)
+}
+
// findAncestor tries to locate the common ancestor link of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N links should already get us a match.
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
// the head links match), we do a binary search to find the common ancestor.
-func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) {
+func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {
// Figure out the valid ancestor range to prevent rewrite attacks
- floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64()
+ var (
+ floor = int64(-1)
+ localHeight uint64
+ remoteHeight = remoteHeader.Number.Uint64()
+ )
+ switch d.mode {
+ case FullSync:
+ localHeight = d.blockchain.CurrentBlock().NumberU64()
+ case FastSync:
+ localHeight = d.blockchain.CurrentFastBlock().NumberU64()
+ default:
+ localHeight = d.lightchain.CurrentHeader().Number.Uint64()
+ }
+ p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
+ if localHeight >= MaxForkAncestry {
+ // We're above the max reorg threshold, find the earliest fork point
+ floor = int64(localHeight - MaxForkAncestry)
- if d.mode == FullSync {
- ceil = d.blockchain.CurrentBlock().NumberU64()
- } else if d.mode == FastSync {
- ceil = d.blockchain.CurrentFastBlock().NumberU64()
+ // If we're doing a light sync, ensure the floor doesn't go below the CHT, as
+ // all headers before that point will be missing.
+ if d.mode == LightSync {
+ // If we dont know the current CHT position, find it
+ if d.genesis == 0 {
+ header := d.lightchain.CurrentHeader()
+ for header != nil {
+ d.genesis = header.Number.Uint64()
+ if floor >= int64(d.genesis)-1 {
+ break
+ }
+ header = d.lightchain.GetHeaderByHash(header.ParentHash)
+ }
+ }
+ // We already know the "genesis" block number, cap floor to that
+ if floor < int64(d.genesis)-1 {
+ floor = int64(d.genesis) - 1
+ }
+ }
}
- if ceil >= MaxForkAncestry {
- floor = int64(ceil - MaxForkAncestry)
- }
- p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height)
+ from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
- // Request the topmost blocks to short circuit binary ancestor lookup
- head := ceil
- if head > height {
- head = height
- }
- from := int64(head) - int64(MaxHeaderFetch)
- if from < 0 {
- from = 0
- }
- // Span out with 15 block gaps into the future to catch bad head reports
- limit := 2 * MaxHeaderFetch / 16
- count := 1 + int((int64(ceil)-from)/16)
- if count > limit {
- count = limit
- }
- go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false)
+ p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip)
+ go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false)
// Wait for the remote response to the head fetch
number, hash := uint64(0), common.Hash{}
@@ -647,9 +717,10 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
return 0, errEmptyHeaderSet
}
// Make sure the peer's reply conforms to the request
- for i := 0; i < len(headers); i++ {
- if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
- p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number)
+ for i, header := range headers {
+ expectNumber := from + int64(i)*int64((skip+1))
+ if number := header.Number.Int64(); number != expectNumber {
+ p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number)
return 0, errInvalidChain
}
}
@@ -657,20 +728,24 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
finished = true
for i := len(headers) - 1; i >= 0; i-- {
// Skip any headers that underflow/overflow our requested set
- if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil {
+ if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max {
continue
}
// Otherwise check if we already know the header or not
h := headers[i].Hash()
n := headers[i].Number.Uint64()
- if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && d.lightchain.HasHeader(h, n)) {
- number, hash = n, h
- // If every header is known, even future ones, the peer straight out lied about its head
- if number > height && i == limit-1 {
- p.log.Warn("Lied about chain head", "reported", height, "found", number)
- return 0, errStallingPeer
- }
+ var known bool
+ switch d.mode {
+ case FullSync:
+ known = d.blockchain.HasBlock(h, n)
+ case FastSync:
+ known = d.blockchain.HasFastBlock(h, n)
+ default:
+ known = d.lightchain.HasHeader(h, n)
+ }
+ if known {
+ number, hash = n, h
break
}
}
@@ -694,10 +769,12 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
return number, nil
}
// Ancestor not found, we need to binary search over our chain
- start, end := uint64(0), head
+ start, end := uint64(0), remoteHeight
if floor > 0 {
start = uint64(floor)
}
+ p.log.Trace("Binary searching for common ancestor", "start", start, "end", end)
+
for start+1 < end {
// Split our chain interval in two, and request the hash to cross check
check := (start + end) / 2
@@ -730,7 +807,17 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
// Modify the search interval based on the response
h := headers[0].Hash()
n := headers[0].Number.Uint64()
- if (d.mode == FullSync && !d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && !d.lightchain.HasHeader(h, n)) {
+
+ var known bool
+ switch d.mode {
+ case FullSync:
+ known = d.blockchain.HasBlock(h, n)
+ case FastSync:
+ known = d.blockchain.HasFastBlock(h, n)
+ default:
+ known = d.lightchain.HasHeader(h, n)
+ }
+ if !known {
end = check
break
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader_test.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader_test.go
index 1fe02d88..1a42965d 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader_test.go
@@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"math/big"
+ "strings"
"sync"
"sync/atomic"
"testing"
@@ -114,6 +115,15 @@ func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
return dl.GetBlockByHash(hash) != nil
}
+// HasFastBlock checks if a block is present in the testers canonical chain.
+func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ _, ok := dl.ownReceipts[hash]
+ return ok
+}
+
// GetHeader retrieves a header from the testers canonical chain.
func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
dl.lock.RLock()
@@ -234,6 +244,7 @@ func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
dl.ownHeaders[block.Hash()] = block.Header()
}
dl.ownBlocks[block.Hash()] = block
+ dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
}
@@ -374,28 +385,28 @@ func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
// assertOwnChain checks if the local chain contains the correct number of items
// of the various chain components.
func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
+ // Mark this method as a helper to report errors at callsite, not in here
+ t.Helper()
+
assertOwnForkedChain(t, tester, 1, []int{length})
}
// assertOwnForkedChain checks if the local forked chain contains the correct
// number of items of the various chain components.
func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
- // Initialize the counters for the first fork
- headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks
+ // Mark this method as a helper to report errors at callsite, not in here
+ t.Helper()
+
+ // Initialize the counters for the first fork
+ headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
- if receipts < 0 {
- receipts = 1
- }
// Update the counters for each subsequent fork
for _, length := range lengths[1:] {
headers += length - common
blocks += length - common
- receipts += length - common - fsMinFullBlocks
+ receipts += length - common
}
- switch tester.downloader.mode {
- case FullSync:
- receipts = 1
- case LightSync:
+ if tester.downloader.mode == LightSync {
blocks, receipts = 1, 1
}
if hs := len(tester.ownHeaders); hs != headers {
@@ -1149,7 +1160,9 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
}
func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
+ // Mark this method as a helper to report errors at callsite, not in here
t.Helper()
+
p := d.Progress()
p.KnownStates, p.PulledStates = 0, 0
want.KnownStates, want.PulledStates = 0, 0
@@ -1479,3 +1492,78 @@ func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int
}
return nil
}
+
+func TestRemoteHeaderRequestSpan(t *testing.T) {
+ testCases := []struct {
+ remoteHeight uint64
+ localHeight uint64
+ expected []int
+ }{
+ // Remote is way higher. We should ask for the remote head and go backwards
+ {1500, 1000,
+ []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
+ },
+ {15000, 13006,
+ []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
+ },
+ //Remote is pretty close to us. We don't have to fetch as many
+ {1200, 1150,
+ []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
+ },
+ // Remote is equal to us (so on a fork with higher td)
+ // We should get the closest couple of ancestors
+ {1500, 1500,
+ []int{1497, 1499},
+ },
+ // We're higher than the remote! Odd
+ {1000, 1500,
+ []int{997, 999},
+ },
+ // Check some weird edgecases that it behaves somewhat rationally
+ {0, 1500,
+ []int{0, 2},
+ },
+ {6000000, 0,
+ []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
+ },
+ {0, 0,
+ []int{0, 2},
+ },
+ }
+ reqs := func(from, count, span int) []int {
+ var r []int
+ num := from
+ for len(r) < count {
+ r = append(r, num)
+ num += span + 1
+ }
+ return r
+ }
+ for i, tt := range testCases {
+ from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
+ data := reqs(int(from), count, span)
+
+ if max != uint64(data[len(data)-1]) {
+ t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
+ }
+ failed := false
+ if len(data) != len(tt.expected) {
+ failed = true
+ t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
+ } else {
+ for j, n := range data {
+ if n != tt.expected[j] {
+ failed = true
+ break
+ }
+ }
+ }
+ if failed {
+ res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
+ exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
+ fmt.Printf("got: %v\n", res)
+ fmt.Printf("exp: %v\n", exp)
+ t.Errorf("test %d: wrong values", i)
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go
index 863cc8de..7c339538 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go
@@ -325,7 +325,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
}
// Make sure no duplicate requests are executed
if _, ok := q.blockTaskPool[hash]; ok {
- log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
+ log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
continue
}
if _, ok := q.receiptTaskPool[hash]; ok {
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/gen_config.go b/vendor/github.com/ethereum/go-ethereum/eth/gen_config.go
index d401a917..2777aa9e 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/gen_config.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/gen_config.go
@@ -28,7 +28,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
SkipBcVersionCheck bool `toml:"-"`
DatabaseHandles int `toml:"-"`
DatabaseCache int
- TrieCache int
+ TrieCleanCache int
+ TrieDirtyCache int
TrieTimeout time.Duration
Etherbase common.Address `toml:",omitempty"`
MinerNotify []string `toml:",omitempty"`
@@ -43,6 +44,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
GPO gasprice.Config
EnablePreimageRecording bool
DocRoot string `toml:"-"`
+ EWASMInterpreter string
+ EVMInterpreter string
}
var enc Config
enc.Genesis = c.Genesis
@@ -54,7 +57,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.SkipBcVersionCheck = c.SkipBcVersionCheck
enc.DatabaseHandles = c.DatabaseHandles
enc.DatabaseCache = c.DatabaseCache
- enc.TrieCache = c.TrieCache
+ enc.TrieCleanCache = c.TrieCleanCache
+ enc.TrieDirtyCache = c.TrieDirtyCache
enc.TrieTimeout = c.TrieTimeout
enc.Etherbase = c.Etherbase
enc.MinerNotify = c.MinerNotify
@@ -69,6 +73,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.GPO = c.GPO
enc.EnablePreimageRecording = c.EnablePreimageRecording
enc.DocRoot = c.DocRoot
+ enc.EWASMInterpreter = c.EWASMInterpreter
+ enc.EVMInterpreter = c.EVMInterpreter
return &enc, nil
}
@@ -84,7 +90,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
SkipBcVersionCheck *bool `toml:"-"`
DatabaseHandles *int `toml:"-"`
DatabaseCache *int
- TrieCache *int
+ TrieCleanCache *int
+ TrieDirtyCache *int
TrieTimeout *time.Duration
Etherbase *common.Address `toml:",omitempty"`
MinerNotify []string `toml:",omitempty"`
@@ -99,6 +106,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
GPO *gasprice.Config
EnablePreimageRecording *bool
DocRoot *string `toml:"-"`
+ EWASMInterpreter *string
+ EVMInterpreter *string
}
var dec Config
if err := unmarshal(&dec); err != nil {
@@ -131,8 +140,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.DatabaseCache != nil {
c.DatabaseCache = *dec.DatabaseCache
}
- if dec.TrieCache != nil {
- c.TrieCache = *dec.TrieCache
+ if dec.TrieCleanCache != nil {
+ c.TrieCleanCache = *dec.TrieCleanCache
+ }
+ if dec.TrieDirtyCache != nil {
+ c.TrieDirtyCache = *dec.TrieDirtyCache
}
if dec.TrieTimeout != nil {
c.TrieTimeout = *dec.TrieTimeout
@@ -176,5 +188,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.DocRoot != nil {
c.DocRoot = *dec.DocRoot
}
+ if dec.EWASMInterpreter != nil {
+ c.EWASMInterpreter = *dec.EWASMInterpreter
+ }
+ if dec.EVMInterpreter != nil {
+ c.EVMInterpreter = *dec.EVMInterpreter
+ }
return nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/handler.go b/vendor/github.com/ethereum/go-ethereum/eth/handler.go
index bd227a84..b42612a5 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/handler.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/handler.go
@@ -88,6 +88,8 @@ type ProtocolManager struct {
txsSub event.Subscription
minedBlockSub *event.TypeMuxSubscription
+ whitelist map[uint64]common.Hash
+
// channels for fetcher, syncer, txsyncLoop
newPeerCh chan *peer
txsyncCh chan *txsync
@@ -101,7 +103,7 @@ type ProtocolManager struct {
// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the Ethereum network.
-func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
+func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database, whitelist map[uint64]common.Hash) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
networkID: networkID,
@@ -110,6 +112,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
blockchain: blockchain,
chainconfig: config,
peers: newPeerSet(),
+ whitelist: whitelist,
newPeerCh: make(chan *peer),
noMorePeers: make(chan struct{}),
txsyncCh: make(chan *txsync),
@@ -307,7 +310,13 @@ func (pm *ProtocolManager) handle(p *peer) error {
}
}()
}
- // main loop. handle incoming messages.
+ // If we have any explicit whitelist block hashes, request them
+ for number := range pm.whitelist {
+ if err := p.RequestHeadersByNumber(number, 1, 0, false); err != nil {
+ return err
+ }
+ }
+ // Handle incoming messages until the connection is torn down
for {
if err := pm.handleMsg(p); err != nil {
p.Log().Debug("Ethereum message handling failed", "err", err)
@@ -466,6 +475,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
p.Log().Debug("Verified to be on the same side of the DAO fork")
return nil
}
+ // Otherwise if it's a whitelisted block, validate against the set
+ if want, ok := pm.whitelist[headers[0].Number.Uint64()]; ok {
+ if hash := headers[0].Hash(); want != hash {
+ p.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want)
+ return errors.New("whitelist block mismatch")
+ }
+ p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
+ }
// Irrelevant of the fork checks, send the header to the fetcher just in case
headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
}
@@ -658,7 +675,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
p.SetHead(trueHead, trueTD)
// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
- // a singe block (as the true TD is below the propagated block), however this
+ // a single block (as the true TD is below the propagated block), however this
// scenario should easily be covered by the fetcher.
currentBlock := pm.blockchain.CurrentBlock()
if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/handler_test.go b/vendor/github.com/ethereum/go-ethereum/eth/handler_test.go
index 7811cd48..9fffd958 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/handler_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/handler_test.go
@@ -478,7 +478,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
- pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db)
+ pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db, nil)
if err != nil {
t.Fatalf("failed to start test protocol manager: %v", err)
}
@@ -559,7 +559,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
- pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db)
+ pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db, nil)
if err != nil {
t.Fatalf("failed to start test protocol manager: %v", err)
}
@@ -585,7 +585,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
}
}(peer)
}
- timeoutCh := time.NewTimer(time.Millisecond * 100).C
+ timeout := time.After(300 * time.Millisecond)
var receivedCount int
outer:
for {
@@ -597,7 +597,7 @@ outer:
if receivedCount == totalPeers {
break outer
}
- case <-timeoutCh:
+ case <-timeout:
break outer
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/helper_test.go b/vendor/github.com/ethereum/go-ethereum/eth/helper_test.go
index 4e38a129..b18a02ba 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/helper_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/helper_test.go
@@ -66,7 +66,7 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func
panic(err)
}
- pm, err := NewProtocolManager(gspec.Config, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db)
+ pm, err := NewProtocolManager(gspec.Config, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db, nil)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go
index 04dd6fe8..addd3288 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go
@@ -1,14 +1,14 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
-// 4byte_tracer.js
-// bigram_tracer.js
-// call_tracer.js
-// evmdis_tracer.js
-// noop_tracer.js
-// opcount_tracer.js
-// prestate_tracer.js
-// trigram_tracer.js
-// unigram_tracer.js
+// 4byte_tracer.js (2.933kB)
+// bigram_tracer.js (1.712kB)
+// call_tracer.js (8.596kB)
+// evmdis_tracer.js (4.194kB)
+// noop_tracer.js (1.271kB)
+// opcount_tracer.js (1.372kB)
+// prestate_tracer.js (3.892kB)
+// trigram_tracer.js (1.788kB)
+// unigram_tracer.js (1.51kB)
package tracers
@@ -28,7 +28,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
- return nil, fmt.Errorf("Read %q: %v", name, err)
+ return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
@@ -36,7 +36,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close()
if err != nil {
- return nil, fmt.Errorf("Read %q: %v", name, err)
+ return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
@@ -197,7 +197,7 @@ func opcount_tracerJs() (*asset, error) {
return a, nil
}
-var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x57\xdd\x6f\x1b\xb9\x11\x7f\xde\xfd\x2b\xa6\x7e\x91\x84\x53\x56\xce\x15\xb8\x02\x72\x5d\x60\xa3\x28\x89\x00\x9d\x6d\x48\x4a\x5d\xf7\x70\x0f\x5c\x72\x76\xc5\x13\x45\x2e\x48\xae\x3e\x10\xf8\x7f\x2f\x86\xfb\x21\xcb\x67\x27\x6e\xeb\x27\x2f\x39\xfc\xcd\xf7\x6f\x46\xa3\x11\x4c\x4c\x79\xb4\xb2\x58\x7b\xf8\xf9\xf2\xfd\xdf\x60\xb5\x46\x28\xcc\x3b\xf4\x6b\xb4\x58\x6d\x21\xad\xfc\xda\x58\x17\x8f\x46\xb0\x5a\x4b\x07\xb9\x54\x08\xd2\x41\xc9\xac\x07\x93\x83\x7f\x26\xaf\x64\x66\x99\x3d\x26\xf1\x68\x54\xbf\x79\xf1\x9a\x10\x72\x8b\x08\xce\xe4\x7e\xcf\x2c\x8e\xe1\x68\x2a\xe0\x4c\x83\x45\x21\x9d\xb7\x32\xab\x3c\x82\xf4\xc0\xb4\x18\x19\x0b\x5b\x23\x64\x7e\x24\x48\xe9\xa1\xd2\x02\x6d\x50\xed\xd1\x6e\x5d\x6b\xc7\xe7\x9b\xaf\x30\x47\xe7\xd0\xc2\x67\xd4\x68\x99\x82\xbb\x2a\x53\x92\xc3\x5c\x72\xd4\x0e\x81\x39\x28\xe9\xc4\xad\x51\x40\x16\xe0\xe8\xe1\x27\x32\x65\xd9\x98\x02\x9f\x4c\xa5\x05\xf3\xd2\xe8\x21\xa0\x24\xcb\x61\x87\xd6\x49\xa3\xe1\xaf\xad\xaa\x06\x70\x08\xc6\x12\x48\x9f\x79\x72\xc0\x82\x29\xe9\xdd\x00\x98\x3e\x82\x62\xfe\xf4\xf4\x0d\x01\x39\xf9\x2d\x40\xea\xa0\x66\x6d\x4a\x04\xbf\x66\x9e\xbc\xde\x4b\xa5\x20\x43\xa8\x1c\xe6\x95\x1a\x12\x5a\x56\x79\xb8\x9f\xad\xbe\xdc\x7e\x5d\x41\x7a\xf3\x00\xf7\xe9\x62\x91\xde\xac\x1e\xae\x60\x2f\xfd\xda\x54\x1e\x70\x87\x35\x94\xdc\x96\x4a\xa2\x80\x3d\xb3\x96\x69\x7f\x04\x93\x13\xc2\xaf\xd3\xc5\xe4\x4b\x7a\xb3\x4a\x3f\xcc\xe6\xb3\xd5\x03\x18\x0b\x9f\x66\xab\x9b\xe9\x72\x09\x9f\x6e\x17\x90\xc2\x5d\xba\x58\xcd\x26\x5f\xe7\xe9\x02\xee\xbe\x2e\xee\x6e\x97\xd3\x04\x96\x48\x56\x21\xbd\xff\x71\xcc\xf3\x90\x3d\x8b\x20\xd0\x33\xa9\x5c\x1b\x89\x07\x53\x81\x5b\x9b\x4a\x09\x58\xb3\x1d\x82\x45\x8e\x72\x87\x02\x18\x70\x53\x1e\xdf\x9c\x54\xc2\x62\xca\xe8\x22\xf8\xfc\x6a\x41\xc2\x2c\x07\x6d\xfc\x10\x1c\x22\xfc\x7d\xed\x7d\x39\x1e\x8d\xf6\xfb\x7d\x52\xe8\x2a\x31\xb6\x18\xa9\x1a\xce\x8d\xfe\x91\xc4\x84\x59\x5a\x74\x9e\x79\x5c\x59\xc6\xd1\x82\xa9\x7c\x59\x79\x07\xae\xca\x73\xc9\x25\x6a\x0f\x52\xe7\xc6\x6e\x43\xa5\x80\x37\xc0\x2d\x32\x8f\xc0\x40\x19\xce\x14\xe0\x01\x79\x15\xee\xea\x48\x87\x72\xb5\x4c\x3b\xc6\xc3\x69\x6e\xcd\x96\x7c\xad\x9c\xa7\x7f\x9c\xc3\x6d\xa6\x50\x40\x81\x1a\x9d\x74\x90\x29\xc3\x37\x49\xfc\x2d\x8e\x9e\x18\x43\x75\x12\x3c\x6c\x84\x42\x6d\xec\xb1\x67\x11\xb2\x4a\x2a\x21\x75\x91\xc4\x51\x2b\x3d\x06\x5d\x29\x35\x8c\x03\x84\x32\x66\x53\x95\x29\xe7\xa6\x0a\xb6\xff\x81\xdc\xd7\x60\xae\x44\x2e\x73\x2a\x0e\xd6\xdd\x7a\x13\xae\x3a\xbd\x26\x23\xf9\x24\x8e\xce\x60\xc6\x90\x57\x3a\xb8\xd3\x67\x42\xd8\x21\x88\x6c\xf0\x2d\x8e\xa2\x1d\xb3\x84\x05\xd7\xe0\xcd\x17\x3c\x84\xcb\xc1\x55\x1c\x45\x32\x87\xbe\x5f\x4b\x97\xb4\xc0\xbf\x31\xce\x7f\x87\xeb\xeb\xeb\xd0\xd4\xb9\xd4\x28\x06\x40\x10\xd1\x4b\x62\xf5\x4d\x94\x31\xc5\x34\xc7\x31\xf4\x2e\x0f\x3d\xf8\x09\x44\x96\x14\xe8\x3f\xd4\xa7\xb5\xb2\xc4\x9b\xa5\xb7\x52\x17\xfd\xf7\xbf\x0c\x86\xe1\x95\x36\xe1\x0d\x34\xe2\x37\xa6\x13\xae\xef\xb9\x11\xe1\xba\xb1\xb9\x96\x9a\x18\xd1\x08\x35\x52\xce\x1b\xcb\x0a\x1c\xc3\xb7\x47\xfa\x7e\x24\xaf\x1e\xe3\xe8\xf1\x2c\xca\xcb\x5a\xe8\x95\x28\x37\x10\x80\xda\xdb\xae\xce\x0b\x49\x9d\xfa\x34\x01\x01\xef\x7b\x49\x58\xb6\xa6\x3c\x4b\xc2\x06\x8f\x3f\xce\x04\x5d\x48\x71\xe8\x2e\x36\x78\x1c\x5c\xc5\xaf\xa6\x28\x69\x8c\xfe\x4d\x8a\xc3\xcb\xf9\x22\xc0\x1d\x53\x1d\x60\x1d\xbf\x25\x21\x9c\xec\x1a\x04\xdd\x41\x07\xc9\xfe\xe5\x1a\x2e\x2e\x0f\x97\xff\xe7\xdf\x45\x63\xc1\x0b\x25\xf3\xcc\xec\x37\x98\xf6\x78\x9e\x4f\x8b\xae\x52\x9e\xda\x4e\xea\x9d\xd9\x10\x81\xae\x29\x4f\x4a\x85\xd4\x98\x92\xaa\xc6\xd5\x0c\x96\x21\x6a\x90\x1e\x2d\x23\x0a\x37\x3b\xb4\x34\xbd\xc0\xa2\xaf\xac\x76\x5d\x3a\x73\xa9\x99\x6a\x81\x9b\xec\x7b\xcb\x78\xdd\xbb\xf5\xf9\x93\x9c\x72\x7f\x08\xd9\x0c\x3e\x8e\x46\x90\x7a\x20\x3f\xa1\x34\x52\xfb\x21\xec\x11\x34\xa2\x20\x02\x12\x28\x2a\xee\x03\x5e\x6f\xc7\x54\x85\xbd\x9a\x64\x88\xaa\xc3\x53\x53\xd1\x44\x7a\x42\x42\xc3\x60\xe0\xd6\xec\xc2\xa8\xcd\x18\xdf\x40\xd3\xf8\xc6\xca\x42\xea\xb8\x89\xe9\x59\xd3\x93\x45\x09\x01\x07\xb3\x42\xcd\x50\xee\xe9\xe4\x43\xc8\x7f\x26\x8b\x99\xf6\xcf\x8a\xa8\x8e\x7c\xfb\x74\xf0\x7b\xd2\x34\x71\xe2\x88\x78\xfb\x3f\x0f\x86\xf0\xfe\x97\xae\x32\xbd\x21\x28\xf8\x31\x98\x37\xaf\x43\xc5\xcf\x2b\xe2\xe5\x67\x41\x0d\x31\xc9\x4f\x41\x6b\xe2\xaa\x8c\xd2\x51\xfb\x19\xe2\x78\xce\x26\x57\xdf\xc1\x3d\xf7\xad\xc5\x6d\x42\x93\x30\x21\x5e\x07\xad\x53\xf4\x11\xb9\xc5\x2d\x4d\x17\xca\x02\x67\x4a\xa1\xed\x39\x08\xdc\x35\x6c\xca\x29\xe4\x0b\xb7\xa5\x3f\xb6\x33\xc7\x33\x5b\xa0\x77\x3f\x36\x2c\xe0\xbc\x7b\xd7\x52\x71\x08\xc5\xb1\x44\xb8\xbe\x86\xde\x64\x31\x4d\x57\xd3\x5e\xd3\x4c\xa3\x11\xdc\x63\xd8\xc8\x32\x25\x33\xa1\x8e\x20\x50\xa1\xc7\xda\x2e\xa3\x43\x88\x3a\x6a\x1a\xd2\x6a\x45\x4b\x0f\x1e\xa4\xf3\x52\x17\x50\x33\xd6\x9e\xe6\x7b\x03\x17\x7a\x84\xb3\xca\x51\xb5\x3e\x1b\x86\xde\xd0\x66\x63\x91\xf8\x8d\xe6\x50\x68\x37\xa6\x64\xb7\x09\xe5\xd2\x3a\x0f\xa5\x62\x1c\x13\xc2\xeb\x8c\x79\x3d\xbf\x0d\x33\x93\xea\x45\x68\xc1\x00\x74\x1a\xb4\x4c\xd1\xa0\x26\xf5\x0e\xfa\x2d\xc6\x20\x8e\x22\xdb\x4a\x3f\xc1\xbe\x3a\x51\x82\xf3\x58\x3e\x25\x04\x5a\x70\x70\x87\x44\xe5\x81\x0d\xea\xa1\x4c\xba\xfe\xf9\x6b\xb3\x05\xa0\x4b\xe2\x88\xde\x3d\xe9\x6b\x65\x8a\xf3\xbe\x16\x75\x58\x78\x65\x2d\xe5\xbf\x1b\x05\x39\xf5\xf8\x1f\x95\xf3\x14\x53\x4b\xe1\x69\xd8\xe2\x25\xb2\x0e\xd4\x4c\x53\x7f\xf0\xe7\x21\x4a\xf3\x33\xcc\x2b\x52\xd7\x4c\xcb\x7a\xab\x2c\x8d\x47\xed\x25\x53\xea\x48\x79\xd8\x5b\x5a\xa7\x68\x81\x1a\x82\x93\x24\x15\x18\x27\x88\x4a\xcd\x55\x25\xea\x32\x08\x75\xdc\xe0\xb9\x60\xf3\xf9\x1e\xb6\x45\xe7\x58\x81\x09\x55\x52\x2e\x0f\xcd\x26\xab\xa1\x57\x93\x5c\x7f\xd0\x4b\x3a\x23\xcf\x29\x46\x99\x22\x69\x8b\x8c\xb8\x3a\x15\xc2\xa2\x73\xfd\x41\xc3\x39\x5d\x66\xef\xd7\xa8\x29\xf8\xa0\x71\x0f\xdd\x8a\xc4\x38\xa7\x95\x51\x0c\x81\x09\x41\xd4\xf6\x6c\x9d\x89\xa3\xc8\xed\xa5\xe7\x6b\x08\x9a\x4c\x79\xea\xc5\x41\x53\xff\x9c\x39\x84\x8b\xe9\xbf\x56\x93\xdb\x8f\xd3\xc9\xed\xdd\xc3\xc5\x18\xce\xce\x96\xb3\x7f\x4f\xbb\xb3\x0f\xe9\x3c\xbd\x99\x4c\x2f\xc6\xa7\x39\x74\xee\x90\x37\xad\x0b\xa4\xd0\x79\xc6\x37\x49\x89\xb8\xe9\x5f\x9e\xf3\xc0\xc9\xc1\x28\xca\x2c\xb2\xcd\xd5\xc9\x98\xba\x41\x1b\x1d\x2d\xe5\xc2\x35\xbc\x1a\xac\xab\xd7\xad\x99\x34\xf2\xfd\x96\xc8\x4f\x2b\x51\xa0\x8a\xef\xda\x91\xce\xe7\x9d\xe7\xf4\x41\xe1\xe8\x0e\x3e\x4e\xe7\xd3\xcf\xe9\x6a\x7a\x26\xb5\x5c\xa5\xab\xd9\xa4\x3e\xfa\xaf\x43\xf4\xfe\xcd\x21\xea\x2d\x97\xab\xdb\xc5\xb4\x37\x6e\xbe\xe6\xb7\xe9\xc7\xde\x9f\x14\x36\x7b\xd3\xf7\x8a\xcc\x9b\x7b\x63\xc5\xff\x92\xab\x27\xbb\x43\xce\x5e\x5a\x1d\x02\x09\x71\x5f\x3d\xfb\x89\x00\x4c\xb7\xfc\x91\xd7\x3f\x93\xa2\xf0\xfe\x45\xc6\x78\x8c\x1f\xe3\xff\x04\x00\x00\xff\xff\xb5\x44\x89\xaf\xbc\x0f\x00\x00")
+var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdf\x6f\x1a\x49\x12\x7e\x9e\xf9\x2b\x4a\xfb\x02\x68\xc9\x90\xec\x49\x7b\x12\x3e\x9f\x34\x21\x24\x41\x62\x6d\x0b\xc8\xf9\x7c\xab\x7d\xe8\xe9\xae\x19\x7a\x69\xba\x47\xfd\x03\x8c\x22\xff\xef\xa7\xea\x99\x01\xc3\xda\x49\xee\xde\xcc\x74\xf5\x57\x55\x5f\x55\x7d\x5d\x1e\x8d\x60\x62\xea\x83\x95\xd5\xda\xc3\x2f\x6f\xdf\xfd\x1d\x56\x6b\x84\xca\xbc\x41\xbf\x46\x8b\x61\x0b\x79\xf0\x6b\x63\x5d\x3a\x1a\xc1\x6a\x2d\x1d\x94\x52\x21\x48\x07\x35\xb3\x1e\x4c\x09\xfe\xc2\x5e\xc9\xc2\x32\x7b\xc8\xd2\xd1\xa8\xb9\xf3\xe2\x31\x21\x94\x16\x11\x9c\x29\xfd\x9e\x59\x1c\xc3\xc1\x04\xe0\x4c\x83\x45\x21\x9d\xb7\xb2\x08\x1e\x41\x7a\x60\x5a\x8c\x8c\x85\xad\x11\xb2\x3c\x10\xa4\xf4\x10\xb4\x40\x1b\x5d\x7b\xb4\x5b\xd7\xc5\xf1\xe9\xe6\x0b\xcc\xd1\x39\xb4\xf0\x09\x35\x5a\xa6\xe0\x2e\x14\x4a\x72\x98\x4b\x8e\xda\x21\x30\x07\x35\x7d\x71\x6b\x14\x50\x44\x38\xba\xf8\x91\x42\x59\xb6\xa1\xc0\x47\x13\xb4\x60\x5e\x1a\x3d\x04\x94\x14\x39\xec\xd0\x3a\x69\x34\xfc\xad\x73\xd5\x02\x0e\xc1\x58\x02\xe9\x33\x4f\x09\x58\x30\x35\xdd\x1b\x00\xd3\x07\x50\xcc\x9f\xae\xfe\x00\x21\xa7\xbc\x05\x48\x1d\xdd\xac\x4d\x8d\xe0\xd7\xcc\x53\xd6\x7b\xa9\x14\x14\x08\xc1\x61\x19\xd4\x90\xd0\x8a\xe0\xe1\x7e\xb6\xfa\x7c\xfb\x65\x05\xf9\xcd\x03\xdc\xe7\x8b\x45\x7e\xb3\x7a\xb8\x82\xbd\xf4\x6b\x13\x3c\xe0\x0e\x1b\x28\xb9\xad\x95\x44\x01\x7b\x66\x2d\xd3\xfe\x00\xa6\x24\x84\xdf\xa6\x8b\xc9\xe7\xfc\x66\x95\xbf\x9f\xcd\x67\xab\x07\x30\x16\x3e\xce\x56\x37\xd3\xe5\x12\x3e\xde\x2e\x20\x87\xbb\x7c\xb1\x9a\x4d\xbe\xcc\xf3\x05\xdc\x7d\x59\xdc\xdd\x2e\xa7\x19\x2c\x91\xa2\x42\xba\xff\x7d\xce\xcb\x58\x3d\x8b\x20\xd0\x33\xa9\x5c\xc7\xc4\x83\x09\xe0\xd6\x26\x28\x01\x6b\xb6\x43\xb0\xc8\x51\xee\x50\x00\x03\x6e\xea\xc3\x0f\x17\x95\xb0\x98\x32\xba\x8a\x39\xbf\xda\x90\x30\x2b\x41\x1b\x3f\x04\x87\x08\xff\x58\x7b\x5f\x8f\x47\xa3\xfd\x7e\x9f\x55\x3a\x64\xc6\x56\x23\xd5\xc0\xb9\xd1\x3f\xb3\x94\x30\x6b\x8b\xce\x33\x8f\x2b\xcb\x38\x5a\x30\xc1\xd7\xc1\x3b\x70\xa1\x2c\x25\x97\xa8\x3d\x48\x5d\x1a\xbb\x8d\x9d\x02\xde\x00\xb7\xc8\x3c\x02\x03\x65\x38\x53\x80\x8f\xc8\x43\x3c\x6b\x98\x8e\xed\x6a\x99\x76\x8c\xc7\xaf\xa5\x35\x5b\xca\x35\x38\x4f\x7f\x38\x87\xdb\x42\xa1\x80\x0a\x35\x3a\xe9\xa0\x50\x86\x6f\xb2\xf4\x6b\x9a\x3c\x0b\x86\xfa\x24\x66\xd8\x1a\xc5\xde\xd8\x63\xcf\x22\x14\x41\x2a\x21\x75\x95\xa5\x49\x67\x3d\x06\x1d\x94\x1a\xa6\x11\x42\x19\xb3\x09\x75\xce\xb9\x09\x31\xf6\x3f\x91\xfb\x06\xcc\xd5\xc8\x65\x49\xcd\xc1\x8e\xa7\xde\xc4\xa3\xa3\x5f\x53\x90\x7d\x96\x26\x67\x30\x63\x28\x83\x8e\xe9\xf4\x99\x10\x76\x08\xa2\x18\x7c\x4d\x93\x64\xc7\x2c\x61\xc1\x35\x78\xf3\x19\x1f\xe3\xe1\xe0\x2a\x4d\x12\x59\x42\xdf\xaf\xa5\xcb\x3a\xe0\xdf\x19\xe7\x7f\xc0\xf5\xf5\x75\x1c\xea\x52\x6a\x14\x03\x20\x88\xe4\x25\xb3\xe6\x24\x29\x98\x62\x9a\xe3\x18\x7a\x6f\x1f\x7b\xf0\x33\x88\x22\xab\xd0\xbf\x6f\xbe\x36\xce\x32\x6f\x96\xde\x4a\x5d\xf5\xdf\xfd\x3a\x18\xc6\x5b\xda\xc4\x3b\xd0\x9a\xdf\x98\xa3\x71\x73\xce\x8d\x88\xc7\x6d\xcc\x8d\xd5\xc4\x88\xd6\xa8\xb5\x72\xde\x58\x56\xe1\x18\xbe\x3e\xd1\xef\x27\xca\xea\x29\x4d\x9e\xce\x58\x5e\x36\x46\xaf\xb0\xdc\x42\x00\x6a\x6f\x8f\x7d\x5e\x49\x9a\xd4\xe7\x05\x88\x78\xdf\x2a\xc2\xb2\x0b\xe5\xa2\x08\x1b\x3c\x7c\xbf\x12\x74\x20\xc5\xe3\xf1\x60\x83\x87\xc1\x55\xfa\x6a\x89\xb2\x36\xe8\xdf\xa5\x78\xfc\xd1\x7a\x5d\xdc\x39\xe3\x75\x49\x56\xa7\x78\x07\x83\x0b\x1e\x2d\xba\xa0\x3c\xb5\xbb\xd4\x3b\xb3\x21\xe1\x5a\x13\x3f\x4a\x45\x4a\x4c\x4d\xd5\x72\x8d\x72\x14\x88\x1a\xa4\x47\xcb\x48\x3a\xcd\x0e\x2d\xbd\x1a\x60\xd1\x07\xab\xdd\x91\xc6\x52\x6a\xa6\x3a\xe0\x96\x75\x6f\x19\x6f\x66\xa6\xf9\xfe\x8c\x4b\xee\x1f\x23\x8b\x31\xbb\xd1\x08\x72\x0f\x94\x22\xd4\x46\x6a\x3f\x84\x3d\x82\x46\x14\x34\xf8\x02\x45\xe0\x3e\xe2\xf5\x76\x4c\x05\xec\x35\xc3\x4d\x12\x19\xaf\x9a\x40\x2f\xc1\xb3\xe1\x1f\xc6\x00\xb7\x66\x17\x9f\xb8\x82\xf1\x0d\xb4\x03\x67\xac\xac\xa4\x4e\x5b\x3a\xcf\x86\x8d\x22\xca\x08\x38\x86\x15\x6b\x45\x45\xa4\x2f\xef\x99\x82\x6b\x28\x64\x35\xd3\xfe\xa2\x78\x0d\xe9\xdd\xd5\xc1\x1f\x59\x3b\x3c\x99\x23\xc1\xeb\xff\x32\x18\xc2\xbb\x5f\x8f\x1d\xe1\x0d\x41\xc1\xf7\xc1\xbc\x79\x1d\x2a\xbd\x6c\x86\x97\xaf\x45\x37\x34\xc1\x3f\x47\xaf\x99\x0b\x05\x95\xa3\xc9\x33\xf2\x78\x3e\xc5\x57\xdf\xc0\x3d\xcf\xad\xc3\x6d\xa9\xc9\x98\x10\xaf\x83\x36\x25\xfa\x80\xdc\xe2\x96\x54\x9d\xaa\xc0\x99\x52\x68\x7b\x0e\xa2\x66\x0c\xdb\x76\x8a\xf5\xc2\x6d\xed\x0f\x9d\xd6\x7b\x66\x2b\xf4\xee\xfb\x81\x45\x9c\x37\x6f\x3a\x09\x8c\x54\x1c\x6a\x84\xeb\x6b\xe8\x4d\x16\xd3\x7c\x35\xed\xb5\x63\x34\x1a\xc1\x3d\xc6\x4d\xa8\x50\xb2\x10\xea\x00\x02\x15\x7a\x6c\xe2\x32\x3a\x52\x74\x94\x84\x21\xad\x34\xb4\x6c\xe0\xa3\x74\x5e\xea\x0a\x1a\xa5\xd8\xd3\xbb\xda\xc2\xc5\x19\xe1\x2c\x38\xea\xd6\x8b\x47\xc8\x1b\xda\x28\x2c\x92\xae\x90\xfe\xc7\x71\x63\x4a\x1e\x37\x90\x52\x5a\xe7\xa1\x56\x8c\x63\x46\x78\xc7\x60\x5e\xaf\x6f\x3b\xc9\xe4\x7a\x11\x47\x30\x02\x9d\x1e\x38\xa6\xe8\x81\x24\xf7\x0e\xfa\x1d\xc6\x20\x4d\x12\xdb\x59\x3f\xc3\xbe\x3a\x49\x82\xf3\x58\x3f\x17\x04\x5a\x2c\x70\x87\x24\xa1\x51\x0d\x9a\xc7\x90\x7c\xfd\xeb\xb7\xf6\xf5\x45\x97\xa5\x09\xdd\x7b\x36\xd7\xca\x54\xe7\x73\x2d\x1a\x5a\x78\xb0\x96\xea\x7f\x94\xe0\x92\x66\xfc\xcf\xe0\x3c\x71\x6a\x89\x9e\x56\x2d\x5e\x12\xc9\x28\x89\xf4\xda\x0e\xfe\x2a\x86\xf4\x6e\xc5\x77\x82\xdc\xb5\xaf\x54\xb3\xcd\xd5\xc6\xa3\xf6\x92\x29\x75\xa0\x3a\xec\x2d\xad\x31\xb4\xb8\x0c\xc1\x49\xb2\x8a\x8a\x13\x4d\xa5\xe6\x2a\x88\xa6\x0d\x62\x1f\xb7\x78\x2e\xc6\x7c\xbe\xff\x6c\xd1\x39\x56\x61\x46\x9d\x54\xca\xc7\x76\x83\xd4\xd0\x6b\x44\xae\x3f\xe8\x65\xc7\x20\xcf\x25\x46\x99\x2a\xeb\x9a\x8c\x64\x3a\x17\xc2\xa2\x73\xfd\x41\xab\x39\xc7\xca\xde\xaf\x51\x13\xf9\xa0\x71\x0f\xc7\xd5\x84\x71\x4e\xab\x9a\x18\x02\x13\x82\xa4\xed\x62\x8d\x48\x93\xc4\xed\xa5\xe7\x6b\x88\x9e\x4c\x7d\x9a\xc5\x41\xdb\xff\x9c\x39\x84\x9f\xa6\xff\x5e\x4d\x6e\x3f\x4c\x27\xb7\x77\x0f\x3f\x8d\xe1\xec\xdb\x72\xf6\x9f\xe9\xf1\xdb\xfb\x7c\x9e\xdf\x4c\xa6\x3f\x8d\xe3\xdb\xfc\x42\x42\xde\x74\x29\x90\x43\xe7\x19\xdf\x64\x35\xe2\xa6\xff\xf6\x5c\x07\x4e\x09\x26\x49\x61\x91\x6d\xae\x4e\xc1\x34\x03\xda\xfa\xe8\x24\x17\xae\xe1\x55\xb2\xae\x5e\x8f\x66\xd2\xda\xf7\x3b\x21\x3f\xad\x22\x51\x2a\xbe\x19\x47\x3e\x9f\x1f\x33\xa7\x1f\x44\xc7\xf1\xc3\x87\xe9\x7c\xfa\x29\x5f\x4d\xcf\xac\x96\xab\x7c\x35\x9b\x34\x9f\xfe\x67\x8a\xde\xfd\x30\x45\xbd\xe5\x72\x75\xbb\x98\xf6\xc6\xed\xaf\xf9\x6d\xfe\xa1\xf7\x17\x87\xed\xbe\xf2\xad\x26\xf3\xe6\xde\x58\xf1\xff\xd4\xea\xd9\xee\x50\xb2\x97\x56\x87\x28\x42\xdc\x87\x8b\xd5\x1c\x98\xee\xf4\xa3\x6c\xfe\x3d\x49\xe2\xfd\x17\x15\xe3\x29\x7d\x4a\xff\x1b\x00\x00\xff\xff\x7c\xdb\x3f\x79\x34\x0f\x00\x00")
func prestate_tracerJsBytes() ([]byte, error) {
return bindataRead(
@@ -213,7 +213,7 @@ func prestate_tracerJs() (*asset, error) {
}
info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd0, 0xd5, 0x5, 0x92, 0xed, 0xf4, 0x69, 0x2e, 0x14, 0x48, 0x35, 0x67, 0xcc, 0xf2, 0x3e, 0xc7, 0xf, 0x18, 0x22, 0x7a, 0x4d, 0x6f, 0x31, 0xad, 0x3c, 0x92, 0x77, 0xb4, 0x1, 0x2a, 0xd3, 0x7c}}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd, 0xb0, 0x72, 0x28, 0xc7, 0x27, 0x97, 0x4d, 0xe, 0xbf, 0x29, 0xe1, 0xa8, 0xd7, 0x52, 0x13, 0xa1, 0x19, 0xc3, 0xfb, 0x8d, 0x5b, 0xcb, 0xdd, 0xa5, 0xd7, 0x98, 0x34, 0x6a, 0xbf, 0x33, 0x6c}}
return a, nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js
index 99f71d2c..56aa2b21 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js
+++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js
@@ -40,10 +40,7 @@
var idx = toHex(key);
if (this.prestate[acc].storage[idx] === undefined) {
- var val = toHex(db.getState(addr, key));
- if (val != "0x0000000000000000000000000000000000000000000000000000000000000000") {
- this.prestate[acc].storage[idx] = toHex(db.getState(addr, key));
- }
+ this.prestate[acc].storage[idx] = toHex(db.getState(addr, key));
}
},
diff --git a/vendor/github.com/ethereum/go-ethereum/interfaces.go b/vendor/github.com/ethereum/go-ethereum/interfaces.go
index 26b0fcbc..be783440 100644
--- a/vendor/github.com/ethereum/go-ethereum/interfaces.go
+++ b/vendor/github.com/ethereum/go-ethereum/interfaces.go
@@ -146,7 +146,7 @@ type FilterQuery struct {
// {{A}} matches topic A in first position
// {{}, {B}} matches any topic in first position, B in second position
// {{A}, {B}} matches topic A in first position, B in second position
- // {{A, B}}, {C, D}} matches topic (A OR B) in first position, (C OR D) in second position
+ // {{A, B}, {C, D}} matches topic (A OR B) in first position, (C OR D) in second position
Topics [][]common.Hash
}
diff --git a/vendor/github.com/ethereum/go-ethereum/internal/cmdtest/test_cmd.go b/vendor/github.com/ethereum/go-ethereum/internal/cmdtest/test_cmd.go
index 20e82ec2..f8b1b43c 100644
--- a/vendor/github.com/ethereum/go-ethereum/internal/cmdtest/test_cmd.go
+++ b/vendor/github.com/ethereum/go-ethereum/internal/cmdtest/test_cmd.go
@@ -27,6 +27,7 @@ import (
"regexp"
"strings"
"sync"
+ "syscall"
"testing"
"text/template"
"time"
@@ -50,6 +51,8 @@ type TestCmd struct {
stdout *bufio.Reader
stdin io.WriteCloser
stderr *testlogger
+ // Err will contain the process exit error or interrupt signal error
+ Err error
}
// Run exec's the current binary using name as argv[0] which will trigger the
@@ -182,11 +185,25 @@ func (tt *TestCmd) ExpectExit() {
}
func (tt *TestCmd) WaitExit() {
- tt.cmd.Wait()
+ tt.Err = tt.cmd.Wait()
}
func (tt *TestCmd) Interrupt() {
- tt.cmd.Process.Signal(os.Interrupt)
+ tt.Err = tt.cmd.Process.Signal(os.Interrupt)
+}
+
+// ExitStatus exposes the process' OS exit code
+// It will only return a valid value after the process has finished.
+func (tt *TestCmd) ExitStatus() int {
+ if tt.Err != nil {
+ exitErr := tt.Err.(*exec.ExitError)
+ if exitErr != nil {
+ if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
+ return status.ExitStatus()
+ }
+ }
+ }
+ return 0
}
// StderrText returns any stderr output written so far.
diff --git a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go
index 0c751c32..656555b3 100644
--- a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go
+++ b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go
@@ -339,7 +339,7 @@ func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool {
return fetchKeystore(s.am).Lock(addr) == nil
}
-// signTransactions sets defaults and signs the given transaction
+// signTransaction sets defaults and signs the given transaction
// NOTE: the caller needs to ensure that the nonceLock is held, if applicable,
// and release it after the transaction has been submitted to the tx pool
func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args *SendTxArgs, passwd string) (*types.Transaction, error) {
@@ -683,7 +683,7 @@ type CallArgs struct {
Data hexutil.Bytes `json:"data"`
}
-func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration) ([]byte, uint64, bool, error) {
+func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration) ([]byte, uint64, bool, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
@@ -724,7 +724,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
defer cancel()
// Get a new instance of the EVM.
- evm, vmError, err := s.b.GetEVM(ctx, msg, state, header, vmCfg)
+ evm, vmError, err := s.b.GetEVM(ctx, msg, state, header)
if err != nil {
return nil, 0, false, err
}
@@ -748,7 +748,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
// Call executes the given transaction on the state for the given block number.
// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
- result, _, _, err := s.doCall(ctx, args, blockNr, vm.Config{}, 5*time.Second)
+ result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second)
return (hexutil.Bytes)(result), err
}
@@ -777,7 +777,7 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h
executable := func(gas uint64) bool {
args.Gas = hexutil.Uint64(gas)
- _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, vm.Config{}, 0)
+ _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0)
if err != nil || failed {
return false
}
diff --git a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/backend.go b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/backend.go
index c9ffe230..e23ee03b 100644
--- a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/backend.go
+++ b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/backend.go
@@ -53,7 +53,7 @@ type Backend interface {
GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetTd(blockHash common.Hash) *big.Int
- GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error)
+ GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error)
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription
diff --git a/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go b/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go
index a5f31965..06bfcef6 100644
--- a/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go
+++ b/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go
@@ -384,6 +384,18 @@ web3._extend({
params: 1,
inputFormatter: [null]
}),
+ new web3._extend.Method({
+ name: 'standardTraceBadBlockToFile',
+ call: 'debug_standardTraceBadBlockToFile',
+ params: 2,
+ inputFormatter: [null, null]
+ }),
+ new web3._extend.Method({
+ name: 'standardTraceBlockToFile',
+ call: 'debug_standardTraceBlockToFile',
+ params: 2,
+ inputFormatter: [null, null]
+ }),
new web3._extend.Method({
name: 'traceBlockByNumber',
call: 'debug_traceBlockByNumber',
diff --git a/vendor/github.com/ethereum/go-ethereum/les/api_backend.go b/vendor/github.com/ethereum/go-ethereum/les/api_backend.go
index aa748a4e..75313962 100644
--- a/vendor/github.com/ethereum/go-ethereum/les/api_backend.go
+++ b/vendor/github.com/ethereum/go-ethereum/les/api_backend.go
@@ -105,10 +105,10 @@ func (b *LesApiBackend) GetTd(hash common.Hash) *big.Int {
return b.eth.blockchain.GetTdByHash(hash)
}
-func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) {
+func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {
state.SetBalance(msg.From(), math.MaxBig256)
context := core.NewEVMContext(msg, header, b.eth.blockchain, nil)
- return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), state.Error, nil
+ return vm.NewEVM(context, state, b.eth.chainConfig, vm.Config{}), state.Error, nil
}
func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
diff --git a/vendor/github.com/ethereum/go-ethereum/les/backend.go b/vendor/github.com/ethereum/go-ethereum/les/backend.go
index a3474a68..d0db7101 100644
--- a/vendor/github.com/ethereum/go-ethereum/les/backend.go
+++ b/vendor/github.com/ethereum/go-ethereum/les/backend.go
@@ -82,7 +82,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.ConstantinopleOverride)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
return nil, genesisErr
}
diff --git a/vendor/github.com/ethereum/go-ethereum/les/fetcher.go b/vendor/github.com/ethereum/go-ethereum/les/fetcher.go
index f0d3b188..2615f69d 100644
--- a/vendor/github.com/ethereum/go-ethereum/les/fetcher.go
+++ b/vendor/github.com/ethereum/go-ethereum/les/fetcher.go
@@ -141,36 +141,39 @@ func (f *lightFetcher) syncLoop() {
s := requesting
requesting = false
var (
- rq *distReq
- reqID uint64
+ rq *distReq
+ reqID uint64
+ syncing bool
)
if !f.syncing && !(newAnnounce && s) {
- rq, reqID = f.nextRequest()
+ rq, reqID, syncing = f.nextRequest()
}
- syncing := f.syncing
f.lock.Unlock()
if rq != nil {
requesting = true
- _, ok := <-f.pm.reqDist.queue(rq)
- if !ok {
+ if _, ok := <-f.pm.reqDist.queue(rq); ok {
+ if syncing {
+ f.lock.Lock()
+ f.syncing = true
+ f.lock.Unlock()
+ } else {
+ go func() {
+ time.Sleep(softRequestTimeout)
+ f.reqMu.Lock()
+ req, ok := f.requested[reqID]
+ if ok {
+ req.timeout = true
+ f.requested[reqID] = req
+ }
+ f.reqMu.Unlock()
+ // keep starting new requests while possible
+ f.requestChn <- false
+ }()
+ }
+ } else {
f.requestChn <- false
}
-
- if !syncing {
- go func() {
- time.Sleep(softRequestTimeout)
- f.reqMu.Lock()
- req, ok := f.requested[reqID]
- if ok {
- req.timeout = true
- f.requested[reqID] = req
- }
- f.reqMu.Unlock()
- // keep starting new requests while possible
- f.requestChn <- false
- }()
- }
}
case reqID := <-f.timeoutChn:
f.reqMu.Lock()
@@ -209,6 +212,7 @@ func (f *lightFetcher) syncLoop() {
f.checkSyncedHeaders(p)
f.syncing = false
f.lock.Unlock()
+ f.requestChn <- false
}
}
}
@@ -405,7 +409,7 @@ func (f *lightFetcher) requestedID(reqID uint64) bool {
// nextRequest selects the peer and announced head to be requested next, amount
// to be downloaded starting from the head backwards is also returned
-func (f *lightFetcher) nextRequest() (*distReq, uint64) {
+func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
var (
bestHash common.Hash
bestAmount uint64
@@ -427,14 +431,12 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) {
}
}
if bestTd == f.maxConfirmedTd {
- return nil, 0
+ return nil, 0, false
}
- f.syncing = bestSyncing
-
var rq *distReq
reqID := genReqID()
- if f.syncing {
+ if bestSyncing {
rq = &distReq{
getCost: func(dp distPeer) uint64 {
return 0
@@ -500,7 +502,7 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) {
},
}
}
- return rq, reqID
+ return rq, reqID, bestSyncing
}
// deliverHeaders delivers header download request responses for processing
diff --git a/vendor/github.com/ethereum/go-ethereum/les/flowcontrol/control.go b/vendor/github.com/ethereum/go-ethereum/les/flowcontrol/control.go
index d50eb809..8ef4ba51 100644
--- a/vendor/github.com/ethereum/go-ethereum/les/flowcontrol/control.go
+++ b/vendor/github.com/ethereum/go-ethereum/les/flowcontrol/control.go
@@ -82,7 +82,6 @@ func (peer *ClientNode) RequestProcessed(cost uint64) (bv, realCost uint64) {
time := mclock.Now()
peer.recalcBV(time)
peer.bufValue -= cost
- peer.recalcBV(time)
rcValue, rcost := peer.cm.processed(peer.cmNode, time)
if rcValue < peer.params.BufLimit {
bv := peer.params.BufLimit - rcValue
diff --git a/vendor/github.com/ethereum/go-ethereum/les/serverpool.go b/vendor/github.com/ethereum/go-ethereum/les/serverpool.go
index 0fe6e49b..52b54b37 100644
--- a/vendor/github.com/ethereum/go-ethereum/les/serverpool.go
+++ b/vendor/github.com/ethereum/go-ethereum/les/serverpool.go
@@ -683,7 +683,7 @@ func (e *poolEntry) DecodeRLP(s *rlp.Stream) error {
}
func encodePubkey64(pub *ecdsa.PublicKey) []byte {
- return crypto.FromECDSAPub(pub)[:1]
+ return crypto.FromECDSAPub(pub)[1:]
}
func decodePubkey64(b []byte) (*ecdsa.PublicKey, error) {
diff --git a/vendor/github.com/ethereum/go-ethereum/light/postprocess.go b/vendor/github.com/ethereum/go-ethereum/light/postprocess.go
index 1cfd7535..dd1b74a7 100644
--- a/vendor/github.com/ethereum/go-ethereum/light/postprocess.go
+++ b/vendor/github.com/ethereum/go-ethereum/light/postprocess.go
@@ -159,7 +159,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *co
diskdb: db,
odr: odr,
trieTable: trieTable,
- triedb: trie.NewDatabase(trieTable),
+ triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
sectionSize: size,
}
return core.NewChainIndexer(db, ethdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht")
@@ -281,7 +281,7 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin
diskdb: db,
odr: odr,
trieTable: trieTable,
- triedb: trie.NewDatabase(trieTable),
+ triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
parentSize: parentSize,
size: size,
}
diff --git a/vendor/github.com/ethereum/go-ethereum/light/trie.go b/vendor/github.com/ethereum/go-ethereum/light/trie.go
index c07e9946..ab4e18b4 100644
--- a/vendor/github.com/ethereum/go-ethereum/light/trie.go
+++ b/vendor/github.com/ethereum/go-ethereum/light/trie.go
@@ -108,7 +108,7 @@ func (t *odrTrie) TryGet(key []byte) ([]byte, error) {
func (t *odrTrie) TryUpdate(key, value []byte) error {
key = crypto.Keccak256(key)
return t.do(key, func() error {
- return t.trie.TryDelete(key)
+ return t.trie.TryUpdate(key, value)
})
}
diff --git a/vendor/github.com/ethereum/go-ethereum/light/trie_test.go b/vendor/github.com/ethereum/go-ethereum/light/trie_test.go
index 51ce9017..5b5fce31 100644
--- a/vendor/github.com/ethereum/go-ethereum/light/trie_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/light/trie_test.go
@@ -64,7 +64,7 @@ func diffTries(t1, t2 state.Trie) error {
spew.Dump(i2)
return fmt.Errorf("tries have different keys %x, %x", i1.Key, i2.Key)
}
- if !bytes.Equal(i2.Value, i2.Value) {
+ if !bytes.Equal(i1.Value, i2.Value) {
return fmt.Errorf("tries differ at key %x", i1.Key)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/influxdb/influxdb.go b/vendor/github.com/ethereum/go-ethereum/metrics/influxdb/influxdb.go
index 31a5c21b..c4ef9272 100644
--- a/vendor/github.com/ethereum/go-ethereum/metrics/influxdb/influxdb.go
+++ b/vendor/github.com/ethereum/go-ethereum/metrics/influxdb/influxdb.go
@@ -58,6 +58,34 @@ func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, userna
rep.run()
}
+// InfluxDBWithTagsOnce runs once an InfluxDB reporter and post the given metrics.Registry with the specified tags
+func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error {
+ u, err := uurl.Parse(url)
+ if err != nil {
+ return fmt.Errorf("Unable to parse InfluxDB. url: %s, err: %v", url, err)
+ }
+
+ rep := &reporter{
+ reg: r,
+ url: *u,
+ database: database,
+ username: username,
+ password: password,
+ namespace: namespace,
+ tags: tags,
+ cache: make(map[string]int64),
+ }
+ if err := rep.makeClient(); err != nil {
+ return fmt.Errorf("Unable to make InfluxDB client. err: %v", err)
+ }
+
+ if err := rep.send(); err != nil {
+ return fmt.Errorf("Unable to send to InfluxDB. err: %v", err)
+ }
+
+ return nil
+}
+
func (r *reporter) makeClient() (err error) {
r.client, err = client.NewClient(client.Config{
URL: r.url,
diff --git a/vendor/github.com/ethereum/go-ethereum/miner/worker.go b/vendor/github.com/ethereum/go-ethereum/miner/worker.go
index 8579c5c8..48473796 100644
--- a/vendor/github.com/ethereum/go-ethereum/miner/worker.go
+++ b/vendor/github.com/ethereum/go-ethereum/miner/worker.go
@@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@@ -692,7 +691,7 @@ func (w *worker) updateSnapshot() {
func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) {
snap := w.current.state.Snapshot()
- receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, vm.Config{})
+ receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig())
if err != nil {
w.current.state.RevertToSnapshot(snap)
return nil, err
diff --git a/vendor/github.com/ethereum/go-ethereum/mobile/big.go b/vendor/github.com/ethereum/go-ethereum/mobile/big.go
index dd7b1587..86ea9324 100644
--- a/vendor/github.com/ethereum/go-ethereum/mobile/big.go
+++ b/vendor/github.com/ethereum/go-ethereum/mobile/big.go
@@ -84,6 +84,13 @@ func (bi *BigInt) SetString(x string, base int) {
// BigInts represents a slice of big ints.
type BigInts struct{ bigints []*big.Int }
+// NewBigInts creates a slice of uninitialized big numbers.
+func NewBigInts(size int) *BigInts {
+ return &BigInts{
+ bigints: make([]*big.Int, size),
+ }
+}
+
// Size returns the number of big ints in the slice.
func (bi *BigInts) Size() int {
return len(bi.bigints)
diff --git a/vendor/github.com/ethereum/go-ethereum/node/config.go b/vendor/github.com/ethereum/go-ethereum/node/config.go
index 8f10f4f6..7b32a590 100644
--- a/vendor/github.com/ethereum/go-ethereum/node/config.go
+++ b/vendor/github.com/ethereum/go-ethereum/node/config.go
@@ -24,6 +24,7 @@ import (
"path/filepath"
"runtime"
"strings"
+ "sync"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
@@ -152,6 +153,10 @@ type Config struct {
// Logger is a custom logger to use with the p2p.Server.
Logger log.Logger `toml:",omitempty"`
+
+ staticNodesWarning bool
+ trustedNodesWarning bool
+ oldGethResourceWarning bool
}
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
@@ -263,8 +268,8 @@ var isOldGethResource = map[string]bool{
"chaindata": true,
"nodes": true,
"nodekey": true,
- "static-nodes.json": true,
- "trusted-nodes.json": true,
+ "static-nodes.json": false, // no warning for these because they have their
+ "trusted-nodes.json": false, // own separate warning.
}
// ResolvePath resolves path in the instance directory.
@@ -277,13 +282,15 @@ func (c *Config) ResolvePath(path string) string {
}
// Backwards-compatibility: ensure that data directory files created
// by geth 1.4 are used if they exist.
- if c.name() == "geth" && isOldGethResource[path] {
+ if warn, isOld := isOldGethResource[path]; isOld {
oldpath := ""
- if c.Name == "geth" {
+ if c.name() == "geth" {
oldpath = filepath.Join(c.DataDir, path)
}
if oldpath != "" && common.FileExist(oldpath) {
- // TODO: print warning
+ if warn {
+ c.warnOnce(&c.oldGethResourceWarning, "Using deprecated resource file %s, please move this file to the 'geth' subdirectory of datadir.", oldpath)
+ }
return oldpath
}
}
@@ -337,17 +344,17 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
// StaticNodes returns a list of node enode URLs configured as static nodes.
func (c *Config) StaticNodes() []*enode.Node {
- return c.parsePersistentNodes(c.ResolvePath(datadirStaticNodes))
+ return c.parsePersistentNodes(&c.staticNodesWarning, c.ResolvePath(datadirStaticNodes))
}
// TrustedNodes returns a list of node enode URLs configured as trusted nodes.
func (c *Config) TrustedNodes() []*enode.Node {
- return c.parsePersistentNodes(c.ResolvePath(datadirTrustedNodes))
+ return c.parsePersistentNodes(&c.trustedNodesWarning, c.ResolvePath(datadirTrustedNodes))
}
// parsePersistentNodes parses a list of discovery node URLs loaded from a .json
// file from within the data directory.
-func (c *Config) parsePersistentNodes(path string) []*enode.Node {
+func (c *Config) parsePersistentNodes(w *bool, path string) []*enode.Node {
// Short circuit if no node config is present
if c.DataDir == "" {
return nil
@@ -355,10 +362,12 @@ func (c *Config) parsePersistentNodes(path string) []*enode.Node {
if _, err := os.Stat(path); err != nil {
return nil
}
+ c.warnOnce(w, "Found deprecated node list file %s, please use the TOML config file instead.", path)
+
// Load the nodes from the config file.
var nodelist []string
if err := common.LoadJSON(path, &nodelist); err != nil {
- log.Error(fmt.Sprintf("Can't load node file %s: %v", path, err))
+ log.Error(fmt.Sprintf("Can't load node list file: %v", err))
return nil
}
// Interpret the list as a discovery node array
@@ -440,3 +449,20 @@ func makeAccountManager(conf *Config) (*accounts.Manager, string, error) {
}
return accounts.NewManager(backends...), ephemeral, nil
}
+
+var warnLock sync.Mutex
+
+func (c *Config) warnOnce(w *bool, format string, args ...interface{}) {
+ warnLock.Lock()
+ defer warnLock.Unlock()
+
+ if *w {
+ return
+ }
+ l := c.Logger
+ if l == nil {
+ l = log.Root()
+ }
+ l.Warn(fmt.Sprintf(format, args...))
+ *w = true
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/node/node.go b/vendor/github.com/ethereum/go-ethereum/node/node.go
index 85299dba..c35a5097 100644
--- a/vendor/github.com/ethereum/go-ethereum/node/node.go
+++ b/vendor/github.com/ethereum/go-ethereum/node/node.go
@@ -287,7 +287,7 @@ func (n *Node) startInProc(apis []rpc.API) error {
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
return err
}
- n.log.Debug("InProc registered", "service", api.Service, "namespace", api.Namespace)
+ n.log.Debug("InProc registered", "namespace", api.Namespace)
}
n.inprocHandler = handler
return nil
@@ -322,7 +322,7 @@ func (n *Node) stopIPC() {
n.ipcListener.Close()
n.ipcListener = nil
- n.log.Info("IPC endpoint closed", "endpoint", n.ipcEndpoint)
+ n.log.Info("IPC endpoint closed", "url", n.ipcEndpoint)
}
if n.ipcHandler != nil {
n.ipcHandler.Stop()
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go b/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go
index afd4c9a2..9f7f1d41 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go
@@ -434,7 +434,7 @@ func (tab *Table) loadSeedNodes() {
for i := range seeds {
seed := seeds[i]
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
- log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
+ log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.add(seed)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go
index a6cabf08..cdeb28dd 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go
@@ -567,12 +567,11 @@ loop:
net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
if n.state != nil && n.state.canQuery {
return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
- } else {
- if n.state == unknown {
- net.ping(n, n.addr())
- }
- return nil
}
+ if n.state == unknown {
+ net.ping(n, n.addr())
+ }
+ return nil
})
case <-statsDump.C:
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go
index 06a1a584..770406a2 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go
@@ -16,29 +16,32 @@
package protocols
-import "github.com/ethereum/go-ethereum/metrics"
+import (
+ "time"
+
+ "github.com/ethereum/go-ethereum/metrics"
+)
//define some metrics
var (
- //NOTE: these metrics just define the interfaces and are currently *NOT persisted* over sessions
//All metrics are cumulative
//total amount of units credited
- mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", nil)
+ mBalanceCredit metrics.Counter
//total amount of units debited
- mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", nil)
+ mBalanceDebit metrics.Counter
//total amount of bytes credited
- mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", nil)
+ mBytesCredit metrics.Counter
//total amount of bytes debited
- mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", nil)
+ mBytesDebit metrics.Counter
//total amount of credited messages
- mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", nil)
+ mMsgCredit metrics.Counter
//total amount of debited messages
- mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", nil)
+ mMsgDebit metrics.Counter
//how many times local node had to drop remote peers
- mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", nil)
+ mPeerDrops metrics.Counter
//how many times local node overdrafted and dropped
- mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", nil)
+ mSelfDrops metrics.Counter
)
//Prices defines how prices are being passed on to the accounting instance
@@ -105,6 +108,26 @@ func NewAccounting(balance Balance, po Prices) *Accounting {
return ah
}
+//SetupAccountingMetrics creates a separate registry for p2p accounting metrics;
+//this registry should be independent of any other metrics as it persists at different endpoints.
+//It also instantiates the given metrics and starts the persisting go-routine which
+//at the passed interval writes the metrics to a LevelDB
+func SetupAccountingMetrics(reportInterval time.Duration, path string) *AccountingMetrics {
+ //create an empty registry
+ registry := metrics.NewRegistry()
+ //instantiate the metrics
+ mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", registry)
+ mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", registry)
+ mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", registry)
+ mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", registry)
+ mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", registry)
+ mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", registry)
+ mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", registry)
+ mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", registry)
+ //create the DB and start persisting
+ return NewAccountingMetrics(registry, reportInterval, path)
+}
+
//Implement Hook.Send
// Send takes a peer, a size and a msg and
// - calculates the cost for the local node sending a msg of size to peer using the Prices interface
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting_simulation_test.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting_simulation_test.go
index 65b737ab..e90a1d81 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting_simulation_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting_simulation_test.go
@@ -20,7 +20,10 @@ import (
"context"
"flag"
"fmt"
+ "io/ioutil"
"math/rand"
+ "os"
+ "path/filepath"
"reflect"
"sync"
"testing"
@@ -66,6 +69,13 @@ func init() {
func TestAccountingSimulation(t *testing.T) {
//setup the balances objects for every node
bal := newBalances(*nodes)
+ //setup the metrics system or tests will fail trying to write metrics
+ dir, err := ioutil.TempDir("", "account-sim")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ SetupAccountingMetrics(1*time.Second, filepath.Join(dir, "metrics.db"))
//define the node.Service for this test
services := adapters.Services{
"accounting": func(ctx *adapters.ServiceContext) (node.Service, error) {
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go
index 7dddd852..b16720dd 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go
@@ -381,7 +381,7 @@ func (p *Peer) handleIncoming(handle func(ctx context.Context, msg interface{})
// * arguments
// * context
// * the local handshake to be sent to the remote peer
-// * funcion to be called on the remote handshake (can be nil)
+// * function to be called on the remote handshake (can be nil)
// * expects a remote handshake back of the same type
// * the dialing peer needs to send the handshake first and then waits for remote
// * the listening peer waits for the remote handshake and then sends it
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter.go
new file mode 100644
index 00000000..215d4fe3
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter.go
@@ -0,0 +1,147 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package protocols
+
+import (
+ "encoding/binary"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+//AccountMetrics abstracts away the metrics DB and
+//the reporter to persist metrics
+type AccountingMetrics struct {
+ reporter *reporter
+}
+
+//Close will be called when the node is being shutdown
+//for a graceful cleanup
+func (am *AccountingMetrics) Close() {
+ close(am.reporter.quit)
+ am.reporter.db.Close()
+}
+
+//reporter is an internal structure used to write p2p accounting related
+//metrics to a LevelDB. It will periodically write the accrued metrics to the DB.
+type reporter struct {
+ reg metrics.Registry //the registry for these metrics (independent of other metrics)
+ interval time.Duration //duration at which the reporter will persist metrics
+ db *leveldb.DB //the actual DB
+ quit chan struct{} //quit the reporter loop
+}
+
+//NewMetricsDB creates a new LevelDB instance used to persist metrics defined
+//inside p2p/protocols/accounting.go
+func NewAccountingMetrics(r metrics.Registry, d time.Duration, path string) *AccountingMetrics {
+ var val = make([]byte, 8)
+ var err error
+
+ //Create the LevelDB
+ db, err := leveldb.OpenFile(path, nil)
+ if err != nil {
+ log.Error(err.Error())
+ return nil
+ }
+
+ //Check for all defined metrics that there is a value in the DB
+ //If there is, assign it to the metric. This means that the node
+ //has been running before and that metrics have been persisted.
+ metricsMap := map[string]metrics.Counter{
+ "account.balance.credit": mBalanceCredit,
+ "account.balance.debit": mBalanceDebit,
+ "account.bytes.credit": mBytesCredit,
+ "account.bytes.debit": mBytesDebit,
+ "account.msg.credit": mMsgCredit,
+ "account.msg.debit": mMsgDebit,
+ "account.peerdrops": mPeerDrops,
+ "account.selfdrops": mSelfDrops,
+ }
+ //iterate the map and get the values
+ for key, metric := range metricsMap {
+ val, err = db.Get([]byte(key), nil)
+ //until the first time a value is being written,
+ //this will return an error.
+ //it could be beneficial though to log errors later,
+ //but that would require a different logic
+ if err == nil {
+ metric.Inc(int64(binary.BigEndian.Uint64(val)))
+ }
+ }
+
+ //create the reporter
+ rep := &reporter{
+ reg: r,
+ interval: d,
+ db: db,
+ quit: make(chan struct{}),
+ }
+
+ //run the go routine
+ go rep.run()
+
+ m := &AccountingMetrics{
+ reporter: rep,
+ }
+
+ return m
+}
+
+//run is the goroutine which periodically sends the metrics to the configured LevelDB
+func (r *reporter) run() {
+ intervalTicker := time.NewTicker(r.interval)
+
+ for {
+ select {
+ case <-intervalTicker.C:
+ //at each tick send the metrics
+ if err := r.save(); err != nil {
+ log.Error("unable to send metrics to LevelDB", "err", err)
+ //If there is an error in writing, exit the routine; we assume here that the error is
+ //severe and don't attempt to write again.
+ //Also, this should prevent leaking when the node is stopped
+ return
+ }
+ case <-r.quit:
+ //graceful shutdown
+ return
+ }
+ }
+}
+
+//send the metrics to the DB
+func (r *reporter) save() error {
+ //create a LevelDB Batch
+ batch := leveldb.Batch{}
+ //for each metric in the registry (which is independent)...
+ r.reg.Each(func(name string, i interface{}) {
+ metric, ok := i.(metrics.Counter)
+ if ok {
+ //assuming every metric here to be a Counter (separate registry)
+ //...create a snapshot...
+ ms := metric.Snapshot()
+ byteVal := make([]byte, 8)
+ binary.BigEndian.PutUint64(byteVal, uint64(ms.Count()))
+ //...and save the value to the DB
+ batch.Put([]byte(name), byteVal)
+ }
+ })
+ return r.db.Write(&batch, nil)
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter_test.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter_test.go
new file mode 100644
index 00000000..b9f06e67
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter_test.go
@@ -0,0 +1,77 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package protocols
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+)
+
+//TestReporter tests that the metrics being collected for p2p accounting
+//are being persisted and available after restart of a node.
+//It simulates restarting by just recreating the DB as if the node had restarted.
+func TestReporter(t *testing.T) {
+ //create a test directory
+ dir, err := ioutil.TempDir("", "reporter-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ //setup the metrics
+ log.Debug("Setting up metrics first time")
+ reportInterval := 5 * time.Millisecond
+ metrics := SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db"))
+ log.Debug("Done.")
+
+ //do some metrics
+ mBalanceCredit.Inc(12)
+ mBytesCredit.Inc(34)
+ mMsgDebit.Inc(9)
+
+ //give the reporter time to write the metrics to DB
+ time.Sleep(20 * time.Millisecond)
+
+ //set the metrics to nil - this effectively simulates the node having shut down...
+ mBalanceCredit = nil
+ mBytesCredit = nil
+ mMsgDebit = nil
+ //close the DB also, or we can't create a new one
+ metrics.Close()
+
+ //setup the metrics again
+ log.Debug("Setting up metrics second time")
+ metrics = SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db"))
+ defer metrics.Close()
+ log.Debug("Done.")
+
+ //now check the metrics, they should have the same value as before "shutdown"
+ if mBalanceCredit.Count() != 12 {
+ t.Fatalf("Expected counter to be %d, but is %d", 12, mBalanceCredit.Count())
+ }
+ if mBytesCredit.Count() != 34 {
+ t.Fatalf("Expected counter to be %d, but is %d", 23, mBytesCredit.Count())
+ }
+ if mMsgDebit.Count() != 9 {
+ t.Fatalf("Expected counter to be %d, but is %d", 9, mMsgDebit.Count())
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/server.go b/vendor/github.com/ethereum/go-ethereum/p2p/server.go
index 66786086..566f01ff 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/server.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/server.go
@@ -22,7 +22,6 @@ import (
"crypto/ecdsa"
"encoding/hex"
"errors"
- "fmt"
"net"
"sort"
"sync"
@@ -391,7 +390,7 @@ type sharedUDPConn struct {
func (s *sharedUDPConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) {
packet, ok := <-s.unhandled
if !ok {
- return 0, nil, fmt.Errorf("Connection was closed")
+ return 0, nil, errors.New("Connection was closed")
}
l := len(packet.Data)
if l > len(b) {
@@ -425,7 +424,7 @@ func (srv *Server) Start() (err error) {
// static fields
if srv.PrivateKey == nil {
- return fmt.Errorf("Server.PrivateKey must be set to a non-nil key")
+ return errors.New("Server.PrivateKey must be set to a non-nil key")
}
if srv.newTransport == nil {
srv.newTransport = newRLPX
@@ -903,7 +902,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
if dialDest != nil {
dialPubkey = new(ecdsa.PublicKey)
if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil {
- return fmt.Errorf("dial destination doesn't have a secp256k1 public key")
+ return errors.New("dial destination doesn't have a secp256k1 public key")
}
}
// Run the encryption handshake.
@@ -937,7 +936,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
return err
}
if id := c.node.ID(); !bytes.Equal(crypto.Keccak256(phs.ID), id[:]) {
- clog.Trace("Wrong devp2p handshake identity", "phsid", fmt.Sprintf("%x", phs.ID))
+ clog.Trace("Wrong devp2p handshake identity", "phsid", hex.EncodeToString(phs.ID))
return DiscUnexpectedIdentity
}
c.caps, c.name = phs.Caps, phs.Name
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
index 92ccfde8..ab9f582c 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
@@ -20,6 +20,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"sync"
"time"
@@ -705,8 +706,11 @@ func (net *Network) snapshot(addServices []string, removeServices []string) (*Sn
return snap, nil
}
+var snapshotLoadTimeout = 120 * time.Second
+
// Load loads a network snapshot
func (net *Network) Load(snap *Snapshot) error {
+ // Start nodes.
for _, n := range snap.Nodes {
if _, err := net.NewNodeWithConfig(n.Node.Config); err != nil {
return err
@@ -718,6 +722,69 @@ func (net *Network) Load(snap *Snapshot) error {
return err
}
}
+
+ // Prepare connection events counter.
+ allConnected := make(chan struct{}) // closed when all connections are established
+ done := make(chan struct{}) // ensures that the event loop goroutine is terminated
+ defer close(done)
+
+ // Subscribe to event channel.
+ // It needs to be done outside of the event loop goroutine (created below)
+ // to ensure that the event channel is blocking before connect calls are made.
+ events := make(chan *Event)
+ sub := net.Events().Subscribe(events)
+ defer sub.Unsubscribe()
+
+ go func() {
+ // Expected number of connections.
+ total := len(snap.Conns)
+ // Set of all established connections from the snapshot, not other connections.
+ // Key array element 0 is the connection One field value, and element 1 connection Other field.
+ connections := make(map[[2]enode.ID]struct{}, total)
+
+ for {
+ select {
+ case e := <-events:
+ // Ignore control events as they do not represent
+ // connect or disconnect (Up) state change.
+ if e.Control {
+ continue
+ }
+ // Detect only connection events.
+ if e.Type != EventTypeConn {
+ continue
+ }
+ connection := [2]enode.ID{e.Conn.One, e.Conn.Other}
+ // Nodes are still not connected or have been disconnected.
+ if !e.Conn.Up {
+ // Delete the connection from the set of established connections.
+ // This will prevent false positive in case disconnections happen.
+ delete(connections, connection)
+ log.Warn("load snapshot: unexpected disconnection", "one", e.Conn.One, "other", e.Conn.Other)
+ continue
+ }
+ // Check that the connection is from the snapshot.
+ for _, conn := range snap.Conns {
+ if conn.One == e.Conn.One && conn.Other == e.Conn.Other {
+ // Add the connection to the set of established connections.
+ connections[connection] = struct{}{}
+ if len(connections) == total {
+ // Signal that all nodes are connected.
+ close(allConnected)
+ return
+ }
+
+ break
+ }
+ }
+ case <-done:
+ // Load function returned, terminate this goroutine.
+ return
+ }
+ }
+ }()
+
+ // Start connecting.
for _, conn := range snap.Conns {
if !net.GetNode(conn.One).Up || !net.GetNode(conn.Other).Up {
@@ -729,6 +796,14 @@ func (net *Network) Load(snap *Snapshot) error {
return err
}
}
+
+ select {
+ // Wait until all connections from the snapshot are established.
+ case <-allConnected:
+ // Make sure that we do not wait forever.
+ case <-time.After(snapshotLoadTimeout):
+ return errors.New("snapshot connections not established")
+ }
return nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/params/config.go b/vendor/github.com/ethereum/go-ethereum/params/config.go
index 007e4a66..2935ef1f 100644
--- a/vendor/github.com/ethereum/go-ethereum/params/config.go
+++ b/vendor/github.com/ethereum/go-ethereum/params/config.go
@@ -42,17 +42,17 @@ var (
EIP155Block: big.NewInt(2675000),
EIP158Block: big.NewInt(2675000),
ByzantiumBlock: big.NewInt(4370000),
- ConstantinopleBlock: nil,
+ ConstantinopleBlock: big.NewInt(7080000),
Ethash: new(EthashConfig),
}
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "mainnet",
- SectionIndex: 203,
- SectionHead: common.HexToHash("0xc9e05fc67c6a9815adc8072eb18805b53da53a9a6a273e05541e1b7542cf937a"),
- CHTRoot: common.HexToHash("0xb85f42447d59f7c3e6679b9a37ed983593fd52efd6251b883592662e95769d5b"),
- BloomRoot: common.HexToHash("0xf93d50cb4c49b403c6fd33cd60896d3b36184275be0a51bae4df5e8844ac624c"),
+ SectionIndex: 208,
+ SectionHead: common.HexToHash("0x5e9f7696c397d9df8f3b1abda857753575c6f5cff894e1a3d9e1a2af1bd9d6ac"),
+ CHTRoot: common.HexToHash("0x954a63134f6897f015f026387c59c98c4dae7b336610ff5a143455aac9153e9d"),
+ BloomRoot: common.HexToHash("0x8006c5e44b14d90d7cc9cd5fa1cb48cf53697ee3bbbf4b76fdfa70b0242500a9"),
}
// TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network.
@@ -73,10 +73,10 @@ var (
// TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
TestnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "testnet",
- SectionIndex: 134,
- SectionHead: common.HexToHash("0x17053ecbe045bebefaa01e7716cc85a4e22647e181416cc1098ccbb73a088931"),
- CHTRoot: common.HexToHash("0x4d2b86422e46ed76f0e3f50f06632c409f809c8375e53c8bc0f782bcb93dd49a"),
- BloomRoot: common.HexToHash("0xccba62232ee56c2967afc58f136a47ba7dc545ae586e6be666430d94516306c7"),
+ SectionIndex: 139,
+ SectionHead: common.HexToHash("0x9fad89a5e3b993c8339b9cf2cbbeb72cd08774ea6b71b105b3dd880420c618f4"),
+ CHTRoot: common.HexToHash("0xc815833881989c5d2035147e1a79a33d22cbc5313e104ff01e6ab405bd28b317"),
+ BloomRoot: common.HexToHash("0xd94ee9f3c480858f53ec5d059aebdbb2e8d904702f100875ee59ec5f366e841d"),
}
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
@@ -90,7 +90,7 @@ var (
EIP155Block: big.NewInt(3),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(1035301),
- ConstantinopleBlock: nil,
+ ConstantinopleBlock: big.NewInt(3660663),
Clique: &CliqueConfig{
Period: 15,
Epoch: 30000,
@@ -100,10 +100,10 @@ var (
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
Name: "rinkeby",
- SectionIndex: 100,
- SectionHead: common.HexToHash("0xf18f9b43e16f37b12e68818536ffe455ff18d676274ffdd856a8520ed61bb514"),
- CHTRoot: common.HexToHash("0x473f5d603b1fedad75d97fd58692130b9ac9ade1aca01eb9363d79bd1c43c791"),
- BloomRoot: common.HexToHash("0xa39ced3ddbb87e909c7531df2afb6414bea9c9a60ab94da9c6b467535f05326e"),
+ SectionIndex: 105,
+ SectionHead: common.HexToHash("0xec8147d43f936258aaf1b9b9ec91b0a853abf7109f436a23649be809ea43d507"),
+ CHTRoot: common.HexToHash("0xd92703b444846a3db928e87e450770e5d5cbe193131dc8f7c4cf18b4de925a75"),
+ BloomRoot: common.HexToHash("0xff45a6f807138a2cde0cea0c209d9ce5ad8e43ccaae5a7c41af801bb72a1ef96"),
}
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
@@ -111,16 +111,16 @@ var (
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
+ AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
+ AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
- TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
+ TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
TestRules = TestChainConfig.Rules(new(big.Int))
)
diff --git a/vendor/github.com/ethereum/go-ethereum/params/version.go b/vendor/github.com/ethereum/go-ethereum/params/version.go
index 1abbd1a7..ba9ab202 100644
--- a/vendor/github.com/ethereum/go-ethereum/params/version.go
+++ b/vendor/github.com/ethereum/go-ethereum/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 8 // Minor version component of the current release
- VersionPatch = 18 // Patch version component of the current release
+ VersionPatch = 20 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/client_example_test.go b/vendor/github.com/ethereum/go-ethereum/rpc/client_example_test.go
index 9c21c12d..3bb8717b 100644
--- a/vendor/github.com/ethereum/go-ethereum/rpc/client_example_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/rpc/client_example_test.go
@@ -25,7 +25,7 @@ import (
"github.com/ethereum/go-ethereum/rpc"
)
-// In this example, our client whishes to track the latest 'block number'
+// In this example, our client wishes to track the latest 'block number'
// known to the server. The server supports two methods:
//
// eth_getBlockByNumber("latest", {})
diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/doc.go b/vendor/github.com/ethereum/go-ethereum/rpc/doc.go
index 9a6c4abb..c60381b5 100644
--- a/vendor/github.com/ethereum/go-ethereum/rpc/doc.go
+++ b/vendor/github.com/ethereum/go-ethereum/rpc/doc.go
@@ -32,7 +32,7 @@ An example method:
func (s *CalcService) Add(a, b int) (int, error)
When the returned error isn't nil the returned integer is ignored and the error is
-send back to the client. Otherwise the returned integer is send back to the client.
+sent back to the client. Otherwise the returned integer is sent back to the client.
Optional arguments are supported by accepting pointer values as arguments. E.g.
if we want to do the addition in an optional finite field we can accept a mod
diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/ipc.go b/vendor/github.com/ethereum/go-ethereum/rpc/ipc.go
index b05e503d..4cce1cf7 100644
--- a/vendor/github.com/ethereum/go-ethereum/rpc/ipc.go
+++ b/vendor/github.com/ethereum/go-ethereum/rpc/ipc.go
@@ -29,12 +29,12 @@ func (srv *Server) ServeListener(l net.Listener) error {
for {
conn, err := l.Accept()
if netutil.IsTemporaryError(err) {
- log.Warn("RPC accept error", "err", err)
+ log.Warn("IPC accept error", "err", err)
continue
} else if err != nil {
return err
}
- log.Trace("Accepted connection", "addr", conn.RemoteAddr())
+ log.Trace("IPC accepted connection")
go srv.ServeCodec(NewJSONCodec(conn), OptionMethodInvocation|OptionSubscriptions)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/signer/core/api.go b/vendor/github.com/ethereum/go-ethereum/signer/core/api.go
index 2b96cdb5..e9a33578 100644
--- a/vendor/github.com/ethereum/go-ethereum/signer/core/api.go
+++ b/vendor/github.com/ethereum/go-ethereum/signer/core/api.go
@@ -82,7 +82,7 @@ type SignerUI interface {
// OnSignerStartup is invoked when the signer boots, and tells the UI info about external API location and version
// information
OnSignerStartup(info StartupInfo)
- // OnInputRequried is invoked when clef requires user input, for example master password or
+ // OnInputRequired is invoked when clef requires user input, for example master password or
// pin-code for unlocking hardware wallets
OnInputRequired(info UserInputRequest) (UserInputResponse, error)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/OWNERS b/vendor/github.com/ethereum/go-ethereum/swarm/OWNERS
index d4204e08..4b9ca96e 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/OWNERS
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/OWNERS
@@ -7,7 +7,6 @@ swarm
├── fuse ────────────────── @jmozah, @holisticode
├── grafana_dashboards ──── @nonsense
├── metrics ─────────────── @nonsense, @holisticode
-├── multihash ───────────── @nolash
├── network ─────────────── ethersphere
│ ├── bitvector ───────── @zelig, @janos, @gbalint
│ ├── priorityqueue ───── @zelig, @janos, @gbalint
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go
index 7bb63196..33a8e353 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go
@@ -42,7 +42,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
@@ -417,7 +416,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
return reader, mimeType, status, nil, err
}
// get the data of the update
- _, rsrcData, err := a.feed.GetContent(entry.Feed)
+ _, contentAddr, err := a.feed.GetContent(entry.Feed)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
@@ -425,23 +424,23 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
return reader, mimeType, status, nil, err
}
- // extract multihash
- decodedMultihash, err := multihash.FromMultihash(rsrcData)
- if err != nil {
+ // extract content hash
+ if len(contentAddr) != storage.AddressLength {
apiGetInvalid.Inc(1)
status = http.StatusUnprocessableEntity
- log.Warn("invalid multihash in feed update", "err", err)
- return reader, mimeType, status, nil, err
+ errorMessage := fmt.Sprintf("invalid swarm hash in feed update. Expected %d bytes. Got %d", storage.AddressLength, len(contentAddr))
+ log.Warn(errorMessage)
+ return reader, mimeType, status, nil, errors.New(errorMessage)
}
- manifestAddr = storage.Address(decodedMultihash)
- log.Trace("feed update contains multihash", "key", manifestAddr)
+ manifestAddr = storage.Address(contentAddr)
+ log.Trace("feed update contains swarm hash", "key", manifestAddr)
- // get the manifest the multihash digest points to
+ // get the manifest the swarm hash points to
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
- log.Warn(fmt.Sprintf("loadManifestTrie (feed update multihash) error: %v", err))
+ log.Warn(fmt.Sprintf("loadManifestTrie (feed update) error: %v", err))
return reader, mimeType, status, nil, err
}
@@ -451,8 +450,8 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
if entry == nil {
status = http.StatusNotFound
apiGetNotFound.Inc(1)
- err = fmt.Errorf("manifest (feed update multihash) entry for '%s' not found", path)
- log.Trace("manifest (feed update multihash) entry not found", "key", manifestAddr, "path", path)
+ err = fmt.Errorf("manifest (feed update) entry for '%s' not found", path)
+ log.Trace("manifest (feed update) entry not found", "key", manifestAddr, "path", path)
return reader, mimeType, status, nil, err
}
}
@@ -472,7 +471,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
// no entry found
status = http.StatusNotFound
apiGetNotFound.Inc(1)
- err = fmt.Errorf("manifest entry for '%s' not found", path)
+ err = fmt.Errorf("Not found: could not find resource '%s'", path)
log.Trace("manifest entry not found", "key", contentAddr, "path", path)
}
return
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go
index d9837ca7..f793ca8b 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go
@@ -19,6 +19,7 @@ package client
import (
"archive/tar"
"bytes"
+ "context"
"encoding/json"
"errors"
"fmt"
@@ -26,6 +27,7 @@ import (
"io/ioutil"
"mime/multipart"
"net/http"
+ "net/http/httptrace"
"net/textproto"
"net/url"
"os"
@@ -33,9 +35,14 @@ import (
"regexp"
"strconv"
"strings"
+ "time"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
+ "github.com/pborman/uuid"
)
var (
@@ -474,6 +481,11 @@ type UploadFn func(file *File) error
// TarUpload uses the given Uploader to upload files to swarm as a tar stream,
// returning the resulting manifest hash
func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, toEncrypt bool) (string, error) {
+ ctx, sp := spancontext.StartSpan(context.Background(), "api.client.tarupload")
+ defer sp.Finish()
+
+ var tn time.Time
+
reqR, reqW := io.Pipe()
defer reqR.Close()
addr := hash
@@ -489,6 +501,12 @@ func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, t
if err != nil {
return "", err
}
+
+ trace := GetClientTrace("swarm api client - upload tar", "api.client.uploadtar", uuid.New()[:8], &tn)
+
+ req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
+ transport := http.DefaultTransport
+
req.Header.Set("Content-Type", "application/x-tar")
if defaultPath != "" {
q := req.URL.Query()
@@ -529,8 +547,8 @@ func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, t
}
reqW.CloseWithError(err)
}()
-
- res, err := http.DefaultClient.Do(req)
+ tn = time.Now()
+ res, err := transport.RoundTrip(req)
if err != nil {
return "", err
}
@@ -728,3 +746,57 @@ func (c *Client) GetFeedRequest(query *feed.Query, manifestAddressOrDomain strin
}
return &metadata, nil
}
+
+func GetClientTrace(traceMsg, metricPrefix, ruid string, tn *time.Time) *httptrace.ClientTrace {
+ trace := &httptrace.ClientTrace{
+ GetConn: func(_ string) {
+ log.Trace(traceMsg+" - http get", "event", "GetConn", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".getconn", nil).Update(time.Since(*tn))
+ },
+ GotConn: func(_ httptrace.GotConnInfo) {
+ log.Trace(traceMsg+" - http get", "event", "GotConn", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".gotconn", nil).Update(time.Since(*tn))
+ },
+ PutIdleConn: func(err error) {
+ log.Trace(traceMsg+" - http get", "event", "PutIdleConn", "ruid", ruid, "err", err)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".putidle", nil).Update(time.Since(*tn))
+ },
+ GotFirstResponseByte: func() {
+ log.Trace(traceMsg+" - http get", "event", "GotFirstResponseByte", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".firstbyte", nil).Update(time.Since(*tn))
+ },
+ Got100Continue: func() {
+ log.Trace(traceMsg, "event", "Got100Continue", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".got100continue", nil).Update(time.Since(*tn))
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ log.Trace(traceMsg, "event", "DNSStart", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsstart", nil).Update(time.Since(*tn))
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ log.Trace(traceMsg, "event", "DNSDone", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsdone", nil).Update(time.Since(*tn))
+ },
+ ConnectStart: func(network, addr string) {
+ log.Trace(traceMsg, "event", "ConnectStart", "ruid", ruid, "network", network, "addr", addr)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".connectstart", nil).Update(time.Since(*tn))
+ },
+ ConnectDone: func(network, addr string, err error) {
+ log.Trace(traceMsg, "event", "ConnectDone", "ruid", ruid, "network", network, "addr", addr, "err", err)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".connectdone", nil).Update(time.Since(*tn))
+ },
+ WroteHeaders: func() {
+ log.Trace(traceMsg, "event", "WroteHeaders(request)", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".wroteheaders", nil).Update(time.Since(*tn))
+ },
+ Wait100Continue: func() {
+ log.Trace(traceMsg, "event", "Wait100Continue", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".wait100continue", nil).Update(time.Since(*tn))
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ log.Trace(traceMsg, "event", "WroteRequest", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".wroterequest", nil).Update(time.Since(*tn))
+ },
+ }
+ return trace
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client_test.go
index 76b34939..39f6e479 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client_test.go
@@ -25,13 +25,13 @@ import (
"sort"
"testing"
+ "github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/swarm/api"
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
)
@@ -368,58 +368,99 @@ func newTestSigner() (*feed.GenericSigner, error) {
return feed.NewGenericSigner(privKey), nil
}
-// test the transparent resolving of multihash feed updates with bzz:// scheme
+// Test the transparent resolving of feed updates with bzz:// scheme
//
-// first upload data, and store the multihash to the resulting manifest in a feed update
-// retrieving the update with the multihash should return the manifest pointing directly to the data
+// First upload data to bzz:, and store the Swarm hash to the resulting manifest in a feed update.
+// This effectively uses a feed to store a pointer to content rather than the content itself
+// Retrieving the update with the Swarm hash should return the manifest pointing directly to the data
// and raw retrieve of that hash should return the data
-func TestClientCreateFeedMultihash(t *testing.T) {
+func TestClientBzzWithFeed(t *testing.T) {
signer, _ := newTestSigner()
+ // Initialize a Swarm test server
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
- client := NewClient(srv.URL)
+ swarmClient := NewClient(srv.URL)
defer srv.Close()
- // add the data our multihash aliased manifest will point to
- databytes := []byte("bar")
+ // put together some data for our test:
+ dataBytes := []byte(`
+ //
+ // Create some data our manifest will point to. Data that could be very big and wouldn't fit in a feed update.
+ // So what we are going to do is upload it to Swarm bzz:// and obtain a **manifest hash** pointing to it:
+ //
+ // MANIFEST HASH --> DATA
+ //
+ // Then, we store that **manifest hash** into a Swarm Feed update. Once we have done this,
+ // we can use the **feed manifest hash** in bzz:// instead, this way: bzz://feed-manifest-hash.
+ //
+ // FEED MANIFEST HASH --> MANIFEST HASH --> DATA
+ //
+ // Given that we can update the feed at any time with a new **manifest hash** but the **feed manifest hash**
+ // stays constant, we have effectively created a fixed address to changing content. (Applause)
+ //
+ // FEED MANIFEST HASH (the same) --> MANIFEST HASH(2) --> DATA(2)
+ //
+ `)
- swarmHash, err := client.UploadRaw(bytes.NewReader(databytes), int64(len(databytes)), false)
- if err != nil {
- t.Fatalf("Error uploading raw test data: %s", err)
+ // Create a virtual File out of memory containing the above data
+ f := &File{
+ ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)),
+ ManifestEntry: api.ManifestEntry{
+ ContentType: "text/plain",
+ Mode: 0660,
+ Size: int64(len(dataBytes)),
+ },
}
- s := common.FromHex(swarmHash)
- mh := multihash.ToMultihash(s)
+ // upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
+ manifestAddressHex, err := swarmClient.Upload(f, "", false)
+ if err != nil {
+ t.Fatalf("Error creating manifest: %s", err)
+ }
- // our feed topic
- topic, _ := feed.NewTopic("foo.eth", nil)
+ // convert the hex-encoded manifest hash to a 32-byte slice
+ manifestAddress := common.FromHex(manifestAddressHex)
- createRequest := feed.NewFirstRequest(topic)
+ if len(manifestAddress) != storage.AddressLength {
+ t.Fatalf("Something went wrong. Got a hash of an unexpected length. Expected %d bytes. Got %d", storage.AddressLength, len(manifestAddress))
+ }
- createRequest.SetData(mh)
- if err := createRequest.Sign(signer); err != nil {
+ // Now create a **feed manifest**. For that, we need a topic:
+ topic, _ := feed.NewTopic("interesting topic indeed", nil)
+
+ // Build a feed request to update data
+ request := feed.NewFirstRequest(topic)
+
+ // Put the 32-byte address of the manifest into the feed update
+ request.SetData(manifestAddress)
+
+ // Sign the update
+ if err := request.Sign(signer); err != nil {
t.Fatalf("Error signing update: %s", err)
}
- feedManifestHash, err := client.CreateFeedWithManifest(createRequest)
-
+ // Publish the update and at the same time request a **feed manifest** to be created
+ feedManifestAddressHex, err := swarmClient.CreateFeedWithManifest(request)
if err != nil {
t.Fatalf("Error creating feed manifest: %s", err)
}
- correctManifestAddrHex := "bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b"
- if feedManifestHash != correctManifestAddrHex {
- t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestHash)
+ // Check we have received the exact **feed manifest** to be expected
+ // given the topic and user signing the updates:
+ correctFeedManifestAddrHex := "747c402e5b9dc715a25a4393147512167bab018a007fad7cdcd9adc7fce1ced2"
+ if feedManifestAddressHex != correctFeedManifestAddrHex {
+ t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctFeedManifestAddrHex, feedManifestAddressHex)
}
// Check we get a not found error when trying to get feed updates with a made-up manifest
- _, err = client.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
+ _, err = swarmClient.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
if err != ErrNoFeedUpdatesFound {
t.Fatalf("Expected to receive ErrNoFeedUpdatesFound error. Got: %s", err)
}
- reader, err := client.QueryFeed(nil, correctManifestAddrHex)
+ // If we query the feed directly we should get **manifest hash** back:
+ reader, err := swarmClient.QueryFeed(nil, correctFeedManifestAddrHex)
if err != nil {
t.Fatalf("Error retrieving feed updates: %s", err)
}
@@ -428,10 +469,27 @@ func TestClientCreateFeedMultihash(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(mh, gotData) {
- t.Fatalf("Expected: %v, got %v", mh, gotData)
+
+ //Check that indeed the **manifest hash** is retrieved
+ if !bytes.Equal(manifestAddress, gotData) {
+ t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
}
+ // Now the final test we were looking for: Use bzz:// and that should resolve all manifests
+ // and return the original data directly:
+ f, err = swarmClient.Download(feedManifestAddressHex, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ gotData, err = ioutil.ReadAll(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that we get back the original data:
+ if !bytes.Equal(dataBytes, gotData) {
+ t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
+ }
}
// TestClientCreateUpdateFeed will check that feeds can be created and updated via the HTTP client.
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go
index ccc040c5..f7f819ea 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go
@@ -5,6 +5,7 @@ import (
"net/http"
"runtime/debug"
"strings"
+ "time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
@@ -73,9 +74,15 @@ func ParseURI(h http.Handler) http.Handler {
func InitLoggingResponseWriter(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ tn := time.Now()
+
writer := newLoggingResponseWriter(w)
h.ServeHTTP(writer, r)
- log.Debug("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode)
+
+ ts := time.Since(tn)
+ log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode, "time", ts*time.Millisecond)
+ metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).Update(ts)
+ metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.%d.time", r.Method, writer.statusCode), nil).Update(ts)
})
}
@@ -88,6 +95,7 @@ func InstrumentOpenTracing(h http.Handler) http.Handler {
}
spanName := fmt.Sprintf("http.%s.%s", r.Method, uri.Scheme)
ctx, sp := spancontext.StartSpan(r.Context(), spanName)
+
defer sp.Finish()
h.ServeHTTP(w, r.WithContext(ctx))
})
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server_test.go
index 1ef3deec..e82762ce 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server_test.go
@@ -45,7 +45,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/testutil"
@@ -69,60 +68,91 @@ func newTestSigner() (*feed.GenericSigner, error) {
return feed.NewGenericSigner(privKey), nil
}
-// test the transparent resolving of multihash-containing feed updates with bzz:// scheme
+// Test the transparent resolving of feed updates with bzz:// scheme
//
-// first upload data, and store the multihash to the resulting manifest in a feed update
-// retrieving the update with the multihash should return the manifest pointing directly to the data
+// First upload data to bzz:, and store the Swarm hash to the resulting manifest in a feed update.
+// This effectively uses a feed to store a pointer to content rather than the content itself
+// Retrieving the update with the Swarm hash should return the manifest pointing directly to the data
// and raw retrieve of that hash should return the data
-func TestBzzFeedMultihash(t *testing.T) {
+func TestBzzWithFeed(t *testing.T) {
signer, _ := newTestSigner()
+ // Initialize Swarm test server
srv := NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
- // add the data our multihash aliased manifest will point to
- databytes := "bar"
- testBzzUrl := fmt.Sprintf("%s/bzz:/", srv.URL)
- resp, err := http.Post(testBzzUrl, "text/plain", bytes.NewReader([]byte(databytes)))
+ // put together some data for our test:
+ dataBytes := []byte(`
+ //
+ // Create some data our manifest will point to. Data that could be very big and wouldn't fit in a feed update.
+ // So what we are going to do is upload it to Swarm bzz:// and obtain a **manifest hash** pointing to it:
+ //
+ // MANIFEST HASH --> DATA
+ //
+ // Then, we store that **manifest hash** into a Swarm Feed update. Once we have done this,
+ // we can use the **feed manifest hash** in bzz:// instead, this way: bzz://feed-manifest-hash.
+ //
+ // FEED MANIFEST HASH --> MANIFEST HASH --> DATA
+ //
+ // Given that we can update the feed at any time with a new **manifest hash** but the **feed manifest hash**
+ // stays constant, we have effectively created a fixed address to changing content. (Applause)
+ //
+ // FEED MANIFEST HASH (the same) --> MANIFEST HASH(2) --> DATA(2) ...
+ //
+ `)
+
+ // POST data to bzz and get back a content-addressed **manifest hash** pointing to it.
+ resp, err := http.Post(fmt.Sprintf("%s/bzz:/", srv.URL), "text/plain", bytes.NewReader([]byte(dataBytes)))
if err != nil {
t.Fatal(err)
}
+
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
- b, err := ioutil.ReadAll(resp.Body)
+ manifestAddressHex, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
- s := common.FromHex(string(b))
- mh := multihash.ToMultihash(s)
- log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
+ manifestAddress := common.FromHex(string(manifestAddressHex))
- topic, _ := feed.NewTopic("foo.eth", nil)
+ log.Info("added data", "manifest", string(manifestAddressHex))
+
+ // At this point we have uploaded the data and have a manifest pointing to it
+ // Now store that manifest address in a feed update.
+ // We also want a feed manifest, so we can use it to refer to the feed.
+
+ // First, create a topic for our feed:
+ topic, _ := feed.NewTopic("interesting topic indeed", nil)
+
+ // Create a feed update request:
updateRequest := feed.NewFirstRequest(topic)
- updateRequest.SetData(mh)
+ // Store the **manifest address** as data into the feed update.
+ updateRequest.SetData(manifestAddress)
+ // Sign the update
if err := updateRequest.Sign(signer); err != nil {
t.Fatal(err)
}
- log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
+ log.Info("added data", "data", common.ToHex(manifestAddress))
- testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL))
+ // Build the feed update http request:
+ feedUpdateURL, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL))
if err != nil {
t.Fatal(err)
}
- query := testUrl.Query()
+ query := feedUpdateURL.Query()
body := updateRequest.AppendValues(query) // this adds all query parameters and returns the data to be posted
- query.Set("manifest", "1") // indicate we want a manifest back
- testUrl.RawQuery = query.Encode()
+ query.Set("manifest", "1") // indicate we want a feed manifest back
+ feedUpdateURL.RawQuery = query.Encode()
- // create the multihash update
- resp, err = http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body))
+ // submit the feed update request to Swarm
+ resp, err = http.Post(feedUpdateURL.String(), "application/octet-stream", bytes.NewReader(body))
if err != nil {
t.Fatal(err)
}
@@ -130,24 +160,25 @@ func TestBzzFeedMultihash(t *testing.T) {
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
- b, err = ioutil.ReadAll(resp.Body)
+
+ feedManifestAddressHex, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
- rsrcResp := &storage.Address{}
- err = json.Unmarshal(b, rsrcResp)
+ feedManifestAddress := &storage.Address{}
+ err = json.Unmarshal(feedManifestAddressHex, feedManifestAddress)
if err != nil {
- t.Fatalf("data %s could not be unmarshaled: %v", b, err)
+ t.Fatalf("data %s could not be unmarshaled: %v", feedManifestAddressHex, err)
}
- correctManifestAddrHex := "bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b"
- if rsrcResp.Hex() != correctManifestAddrHex {
- t.Fatalf("Response feed manifest address mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
+ correctManifestAddrHex := "747c402e5b9dc715a25a4393147512167bab018a007fad7cdcd9adc7fce1ced2"
+ if feedManifestAddress.Hex() != correctManifestAddrHex {
+ t.Fatalf("Response feed manifest address mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestAddress.Hex())
}
// get bzz manifest transparent feed update resolve
- testBzzUrl = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
- resp, err = http.Get(testBzzUrl)
+ getBzzURL := fmt.Sprintf("%s/bzz:/%s", srv.URL, feedManifestAddress)
+ resp, err = http.Get(getBzzURL)
if err != nil {
t.Fatal(err)
}
@@ -155,12 +186,12 @@ func TestBzzFeedMultihash(t *testing.T) {
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
- b, err = ioutil.ReadAll(resp.Body)
+ retrievedData, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(b, []byte(databytes)) {
- t.Fatalf("retrieved data mismatch, expected %x, got %x", databytes, b)
+ if !bytes.Equal(retrievedData, []byte(dataBytes)) {
+ t.Fatalf("retrieved data mismatch, expected %x, got %x", dataBytes, retrievedData)
}
}
@@ -245,7 +276,8 @@ func TestBzzFeed(t *testing.T) {
t.Fatalf("Expected manifest Feed '%s', got '%s'", correctFeedHex, manifest.Entries[0].Feed.Hex())
}
- // get bzz manifest transparent feed update resolve
+ // take the chance to have bzz: crash on resolving a feed update that does not contain
+ // a swarm hash:
testBzzUrl := fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
resp, err = http.Get(testBzzUrl)
if err != nil {
@@ -253,7 +285,7 @@ func TestBzzFeed(t *testing.T) {
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
- t.Fatal("Expected error status since feed update does not contain multihash. Received 200 OK")
+ t.Fatal("Expected error status since feed update does not contain a Swarm hash. Received 200 OK")
}
_, err = ioutil.ReadAll(resp.Body)
if err != nil {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/ldbstore.json b/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/ldbstore.json
deleted file mode 100644
index 2d64380b..00000000
--- a/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/ldbstore.json
+++ /dev/null
@@ -1,2278 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "$$hashKey": "object:325",
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": null,
- "graphTooltip": 1,
- "id": 5,
- "iteration": 1527598894689,
- "links": [],
- "panels": [
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 40,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 42,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.cachehit.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get cachehit",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 43,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.cachemiss.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get cachemiss",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 7
- },
- "id": 44,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total LocalStore.GetOrCreateRequest",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 7
- },
- "id": 47,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.errfetching.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore GetOrCreateRequest ErrFetching",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 45,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.hit.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore.GetOrCreateRequest hit",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 13
- },
- "id": 49,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.miss.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore GetOrCreateRequest miss",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 19
- },
- "id": 48,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.error.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get error",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 19
- },
- "id": 46,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.errfetching.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get ErrFetching",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "LocalStore",
- "type": "row"
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 1
- },
- "id": 27,
- "panels": [],
- "title": "LDBStore",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 2
- },
- "id": 29,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.get.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore get",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 2
- },
- "id": 30,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.put.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore put",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 8
- },
- "id": 31,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.synciterator.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore SyncIterator",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 8
- },
- "id": 32,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.synciterator.seek.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore SyncIterator Seek/Next",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 14
- },
- "id": 50,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.collectgarbage.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore Collect Garbage",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 14
- },
- "id": 51,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.collectgarbage.delete.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore Collect Garbage - Actual Deletes",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 20
- },
- "id": 34,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 39
- },
- "id": 36,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbdatabase.get.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBDatabase get",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 39
- },
- "id": 37,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbdatabase.write.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBDatabase write",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 45
- },
- "id": 38,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbdatabase.newiterator.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBDatabase NewIterator",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "LDBDatabase",
- "type": "row"
- }
- ],
- "refresh": "10s",
- "schemaVersion": 16,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "auto": false,
- "auto_count": 30,
- "auto_min": "10s",
- "current": {
- "text": "10s",
- "value": "10s"
- },
- "hide": 0,
- "label": "resolution",
- "name": "myinterval",
- "options": [
- {
- "selected": false,
- "text": "5s",
- "value": "5s"
- },
- {
- "selected": true,
- "text": "10s",
- "value": "10s"
- },
- {
- "selected": false,
- "text": "30s",
- "value": "30s"
- },
- {
- "selected": false,
- "text": "100s",
- "value": "100s"
- }
- ],
- "query": "5s,10s,30s,100s",
- "refresh": 2,
- "type": "interval"
- },
- {
- "allValue": null,
- "current": {
- "text": "swarm_30399 + swarm_30400 + swarm_30401",
- "value": [
- "swarm_30399",
- "swarm_30400",
- "swarm_30401"
- ]
- },
- "datasource": "metrics",
- "hide": 0,
- "includeAll": true,
- "label": null,
- "multi": true,
- "name": "host",
- "options": [],
- "query": "SHOW TAG VALUES WITH KEY = \"host\"",
- "refresh": 1,
- "regex": "",
- "sort": 1,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "swarm.http.request.GET.time.span",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "LDBStore and LDBDatabase",
- "uid": "zS6beG7iz",
- "version": 28
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/swarm.json b/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/swarm.json
deleted file mode 100644
index 3ee244d1..00000000
--- a/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/swarm.json
+++ /dev/null
@@ -1,3198 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "$$hashKey": "object:147",
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": null,
- "graphTooltip": 1,
- "id": 2,
- "iteration": 1527598859072,
- "links": [],
- "panels": [
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 34,
- "panels": [],
- "title": "P2P",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 36,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.send.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P Send() - messages sent",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 37,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "p95($tag_host)",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.send_t.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P Send() timer - 95%ile",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 10
- },
- "id": 38,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "1 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority.1.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- },
- {
- "alias": "2 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority.2.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- },
- {
- "alias": "3 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority.3.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "C",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P SendPriority() - messages sent",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 10
- },
- "id": 39,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "1 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority_t.1.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": []
- },
- {
- "alias": "2 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority_t.2.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P SendPriority() timer - 95%ile",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 19
- },
- "id": 40,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.registry.peers.gauge",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "last"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Registry Peers",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 28
- },
- "id": 32,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 2
- },
- "id": 14,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.stack.uptime.gauge",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Uptime",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "Uptime",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 29
- },
- "id": 28,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 7
- },
- "id": 2,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "GET",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "null"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.GET.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- },
- {
- "alias": "POST",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "null"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.POST.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total HTTP Requests",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 7
- },
- "id": 26,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.GET.time.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP GET requests 95% timer",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 15,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.GET.time.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p50"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP GET requests 50% timer",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 13
- },
- "id": 8,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "POST",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.POST.time.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP POST requests 95% timer",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "HTTP",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 30
- },
- "id": 30,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 0,
- "y": 8
- },
- "id": 16,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.lazychunkreader.read.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LazyChunkReader read() calls",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 12,
- "y": 8
- },
- "id": 18,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.lazychunkreader.read.err.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LazyChunkReader read errors",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 17,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.lazychunkreader.read.bytes.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LazyChunkReader bytes read",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "decbytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "LazyChunkReader",
- "type": "row"
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 31
- },
- "id": 25,
- "panels": [],
- "title": "All measurements",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 32
- },
- "id": 3,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.api.get.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "API Get (BZZ)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 32
- },
- "id": 13,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.network.stream.request_from_peers.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Request from peers",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 38
- },
- "id": 11,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.network.stream.received_chunks.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Received chunks",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 38
- },
- "id": 12,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.storage.cache.requests.size.gauge",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "max"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Requests cache entries",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 44
- },
- "id": 9,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.network.stream.handle_retrieve_request_msg.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Handle retrieve request msg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 44
- },
- "id": 20,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.syncer.setnextbatch.iterator.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "syncer setnextbatch iterator calls",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 50
- },
- "id": 21,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handlewantedhashesmsg.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer HandleWantedHashesMsg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 50
- },
- "id": 22,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handlesubscribemsg.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer HandleSubscribeMsg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 56
- },
- "id": 23,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handlewantedhashesmsg.actualget.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer HandleWantedHashesMsg actual get",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 56
- },
- "id": 19,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handleofferedhashes.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer OfferedHashesMsg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "refresh": "30s",
- "schemaVersion": 16,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "auto": false,
- "auto_count": 30,
- "auto_min": "10s",
- "current": {
- "text": "10s",
- "value": "10s"
- },
- "hide": 0,
- "label": "resolution",
- "name": "myinterval",
- "options": [
- {
- "selected": false,
- "text": "5s",
- "value": "5s"
- },
- {
- "selected": true,
- "text": "10s",
- "value": "10s"
- },
- {
- "selected": false,
- "text": "30s",
- "value": "30s"
- },
- {
- "selected": false,
- "text": "100s",
- "value": "100s"
- }
- ],
- "query": "5s,10s,30s,100s",
- "refresh": 2,
- "type": "interval"
- },
- {
- "allValue": null,
- "current": {
- "text": "swarm_30399 + swarm_30400 + swarm_30401 + swarm_30402",
- "value": [
- "swarm_30399",
- "swarm_30400",
- "swarm_30401",
- "swarm_30402"
- ]
- },
- "datasource": "metrics",
- "hide": 0,
- "includeAll": true,
- "label": null,
- "multi": true,
- "name": "host",
- "options": [],
- "query": "SHOW TAG VALUES WITH KEY = \"host\"",
- "refresh": 1,
- "regex": "",
- "sort": 1,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "swarm.http.request.GET.time.span",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "Swarm",
- "uid": "vmEtxxgmz",
- "version": 138
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go b/vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go
index 79490fd3..7c12120a 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go
@@ -27,26 +27,26 @@ import (
)
var (
- metricsEnableInfluxDBExportFlag = cli.BoolFlag{
+ MetricsEnableInfluxDBExportFlag = cli.BoolFlag{
Name: "metrics.influxdb.export",
Usage: "Enable metrics export/push to an external InfluxDB database",
}
- metricsInfluxDBEndpointFlag = cli.StringFlag{
+ MetricsInfluxDBEndpointFlag = cli.StringFlag{
Name: "metrics.influxdb.endpoint",
Usage: "Metrics InfluxDB endpoint",
Value: "http://127.0.0.1:8086",
}
- metricsInfluxDBDatabaseFlag = cli.StringFlag{
+ MetricsInfluxDBDatabaseFlag = cli.StringFlag{
Name: "metrics.influxdb.database",
Usage: "Metrics InfluxDB database",
Value: "metrics",
}
- metricsInfluxDBUsernameFlag = cli.StringFlag{
+ MetricsInfluxDBUsernameFlag = cli.StringFlag{
Name: "metrics.influxdb.username",
Usage: "Metrics InfluxDB username",
Value: "",
}
- metricsInfluxDBPasswordFlag = cli.StringFlag{
+ MetricsInfluxDBPasswordFlag = cli.StringFlag{
Name: "metrics.influxdb.password",
Usage: "Metrics InfluxDB password",
Value: "",
@@ -55,7 +55,7 @@ var (
// It is used so that we can group all nodes and average a measurement across all of them, but also so
// that we can select a specific node and inspect its measurements.
// https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key
- metricsInfluxDBHostTagFlag = cli.StringFlag{
+ MetricsInfluxDBHostTagFlag = cli.StringFlag{
Name: "metrics.influxdb.host.tag",
Usage: "Metrics InfluxDB `host` tag attached to all measurements",
Value: "localhost",
@@ -65,20 +65,24 @@ var (
// Flags holds all command-line flags required for metrics collection.
var Flags = []cli.Flag{
utils.MetricsEnabledFlag,
- metricsEnableInfluxDBExportFlag,
- metricsInfluxDBEndpointFlag, metricsInfluxDBDatabaseFlag, metricsInfluxDBUsernameFlag, metricsInfluxDBPasswordFlag, metricsInfluxDBHostTagFlag,
+ MetricsEnableInfluxDBExportFlag,
+ MetricsInfluxDBEndpointFlag,
+ MetricsInfluxDBDatabaseFlag,
+ MetricsInfluxDBUsernameFlag,
+ MetricsInfluxDBPasswordFlag,
+ MetricsInfluxDBHostTagFlag,
}
func Setup(ctx *cli.Context) {
if gethmetrics.Enabled {
log.Info("Enabling swarm metrics collection")
var (
- enableExport = ctx.GlobalBool(metricsEnableInfluxDBExportFlag.Name)
- endpoint = ctx.GlobalString(metricsInfluxDBEndpointFlag.Name)
- database = ctx.GlobalString(metricsInfluxDBDatabaseFlag.Name)
- username = ctx.GlobalString(metricsInfluxDBUsernameFlag.Name)
- password = ctx.GlobalString(metricsInfluxDBPasswordFlag.Name)
- hosttag = ctx.GlobalString(metricsInfluxDBHostTagFlag.Name)
+ enableExport = ctx.GlobalBool(MetricsEnableInfluxDBExportFlag.Name)
+ endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
+ database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
+ username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
+ password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
+ hosttag = ctx.GlobalString(MetricsInfluxDBHostTagFlag.Name)
)
// Start system runtime metrics collection
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash.go b/vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash.go
deleted file mode 100644
index 3306e3a6..00000000
--- a/vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package multihash
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
-)
-
-const (
- defaultMultihashLength = 32
- defaultMultihashTypeCode = 0x1b
-)
-
-var (
- multihashTypeCode uint8
- MultihashLength = defaultMultihashLength
-)
-
-func init() {
- multihashTypeCode = defaultMultihashTypeCode
- MultihashLength = defaultMultihashLength
-}
-
-// check if valid swarm multihash
-func isSwarmMultihashType(code uint8) bool {
- return code == multihashTypeCode
-}
-
-// GetMultihashLength returns the digest length of the provided multihash
-// It will fail if the multihash is not a valid swarm mulithash
-func GetMultihashLength(data []byte) (int, int, error) {
- cursor := 0
- typ, c := binary.Uvarint(data)
- if c <= 0 {
- return 0, 0, errors.New("unreadable hashtype field")
- }
- if !isSwarmMultihashType(uint8(typ)) {
- return 0, 0, fmt.Errorf("hash code %x is not a swarm hashtype", typ)
- }
- cursor += c
- hashlength, c := binary.Uvarint(data[cursor:])
- if c <= 0 {
- return 0, 0, errors.New("unreadable length field")
- }
- cursor += c
-
- // we cheekily assume hashlength < maxint
- inthashlength := int(hashlength)
- if len(data[c:]) < inthashlength {
- return 0, 0, errors.New("length mismatch")
- }
- return inthashlength, cursor, nil
-}
-
-// FromMulithash returns the digest portion of the multihash
-// It will fail if the multihash is not a valid swarm multihash
-func FromMultihash(data []byte) ([]byte, error) {
- hashLength, _, err := GetMultihashLength(data)
- if err != nil {
- return nil, err
- }
- return data[len(data)-hashLength:], nil
-}
-
-// ToMulithash wraps the provided digest data with a swarm mulithash header
-func ToMultihash(hashData []byte) []byte {
- buf := bytes.NewBuffer(nil)
- b := make([]byte, 8)
- c := binary.PutUvarint(b, uint64(multihashTypeCode))
- buf.Write(b[:c])
- c = binary.PutUvarint(b, uint64(len(hashData)))
- buf.Write(b[:c])
- buf.Write(hashData)
- return buf.Bytes()
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash_test.go
deleted file mode 100644
index 85df741d..00000000
--- a/vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package multihash
-
-import (
- "bytes"
- "math/rand"
- "testing"
-)
-
-// parse multihash, and check that invalid multihashes fail
-func TestCheckMultihash(t *testing.T) {
- hashbytes := make([]byte, 32)
- c, err := rand.Read(hashbytes)
- if err != nil {
- t.Fatal(err)
- } else if c < 32 {
- t.Fatal("short read")
- }
-
- expected := ToMultihash(hashbytes)
-
- l, hl, _ := GetMultihashLength(expected)
- if l != 32 {
- t.Fatalf("expected length %d, got %d", 32, l)
- } else if hl != 2 {
- t.Fatalf("expected header length %d, got %d", 2, hl)
- }
- if _, _, err := GetMultihashLength(expected[1:]); err == nil {
- t.Fatal("expected failure on corrupt header")
- }
- if _, _, err := GetMultihashLength(expected[:len(expected)-2]); err == nil {
- t.Fatal("expected failure on short content")
- }
- dh, _ := FromMultihash(expected)
- if !bytes.Equal(dh, hashbytes) {
- t.Fatalf("expected content hash %x, got %x", hashbytes, dh)
- }
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go
index 1aa1ae42..ebef5459 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go
@@ -165,8 +165,8 @@ func (h *Hive) Run(p *BzzPeer) error {
// otherwise just send depth to new peer
dp.NotifyDepth(depth)
}
+ NotifyPeer(p.BzzAddr, h.Kademlia)
}
- NotifyPeer(p.BzzAddr, h.Kademlia)
defer h.Off(dp)
return dp.Run(dp.HandleMsg)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go
index cd94741b..a8ecaa4b 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go
@@ -81,14 +81,15 @@ func NewKadParams() *KadParams {
// Kademlia is a table of live peers and a db of known peers (node records)
type Kademlia struct {
lock sync.RWMutex
- *KadParams // Kademlia configuration parameters
- base []byte // immutable baseaddress of the table
- addrs *pot.Pot // pots container for known peer addresses
- conns *pot.Pot // pots container for live peer connections
- depth uint8 // stores the last current depth of saturation
- nDepth int // stores the last neighbourhood depth
- nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
- addrCountC chan int // returned by AddrCountC function to signal peer count change
+ *KadParams // Kademlia configuration parameters
+ base []byte // immutable baseaddress of the table
+ addrs *pot.Pot // pots container for known peer addresses
+ conns *pot.Pot // pots container for live peer connections
+ depth uint8 // stores the last current depth of saturation
+ nDepth int // stores the last neighbourhood depth
+ nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
+ addrCountC chan int // returned by AddrCountC function to signal peer count change
+ Pof func(pot.Val, pot.Val, int) (int, bool) // function for calculating kademlia routing distance between two addresses
}
// NewKademlia creates a Kademlia table for base address addr
@@ -103,6 +104,7 @@ func NewKademlia(addr []byte, params *KadParams) *Kademlia {
KadParams: params,
addrs: pot.NewPot(nil, 0),
conns: pot.NewPot(nil, 0),
+ Pof: pof,
}
}
@@ -175,7 +177,7 @@ func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) {
k.lock.Lock()
defer k.lock.Unlock()
minsize := k.MinBinSize
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
// if there is a callable neighbour within the current proxBin, connect
// this makes sure nearest neighbour set is fully connected
var ppo int
@@ -289,6 +291,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
// neighbourhood depth on each change.
// Not receiving from the returned channel will block On function
// when the neighbourhood depth is changed.
+// TODO: Why is this exported, and if it should be; why can't we have more subscribers than one?
func (k *Kademlia) NeighbourhoodDepthC() <-chan int {
k.lock.Lock()
defer k.lock.Unlock()
@@ -305,7 +308,7 @@ func (k *Kademlia) sendNeighbourhoodDepthChange() {
// It provides signaling of neighbourhood depth change.
// This part of the code is sending new neighbourhood depth to nDepthC if that condition is met.
if k.nDepthC != nil {
- nDepth := k.neighbourhoodDepth()
+ nDepth := depthForPot(k.conns, k.MinProxBinSize, k.base)
if nDepth != k.nDepth {
k.nDepth = nDepth
k.nDepthC <- nDepth
@@ -361,7 +364,7 @@ func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(con
var startPo int
var endPo int
- kadDepth := k.neighbourhoodDepth()
+ kadDepth := depthForPot(k.conns, k.MinProxBinSize, k.base)
k.conns.EachBin(base, pof, o, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
if startPo > 0 && endPo != k.MaxProxDisplay {
@@ -395,7 +398,7 @@ func (k *Kademlia) eachConn(base []byte, o int, f func(*Peer, int, bool) bool) {
if len(base) == 0 {
base = k.base
}
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
k.conns.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
if po > o {
return true
@@ -417,7 +420,7 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool
if len(base) == 0 {
base = k.base
}
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
k.addrs.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
if po > o {
return true
@@ -426,21 +429,72 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool
})
}
-// neighbourhoodDepth returns the proximity order that defines the distance of
+func (k *Kademlia) NeighbourhoodDepth() (depth int) {
+ k.lock.RLock()
+ defer k.lock.RUnlock()
+ return depthForPot(k.conns, k.MinProxBinSize, k.base)
+}
+
+// depthForPot returns the proximity order that defines the distance of
// the nearest neighbour set with cardinality >= MinProxBinSize
// if there is altogether less than MinProxBinSize peers it returns 0
// caller must hold the lock
-func (k *Kademlia) neighbourhoodDepth() (depth int) {
- if k.conns.Size() < k.MinProxBinSize {
+func depthForPot(p *pot.Pot, minProxBinSize int, pivotAddr []byte) (depth int) {
+ if p.Size() <= minProxBinSize {
return 0
}
+
+ // total number of peers in iteration
var size int
+
+ // true if iteration has all prox peers
+ var b bool
+
+ // last po recorded in iteration
+ var lastPo int
+
f := func(v pot.Val, i int) bool {
+ // po == 256 means that addr is the pivot address(self)
+ if i == 256 {
+ return true
+ }
size++
- depth = i
- return size < k.MinProxBinSize
+
+ // this means we have all nn-peers.
+ // depth is by default set to the bin of the farthest nn-peer
+ if size == minProxBinSize {
+ b = true
+ depth = i
+ return true
+ }
+
+ // if there are empty bins between farthest nn and current node,
+ // the depth should recalculated to be
+ // the farthest of those empty bins
+ //
+ // 0 abac ccde
+ // 1 2a2a
+ // 2 589f <--- nearest non-nn
+ // ============ DEPTH 3 ===========
+ // 3 <--- don't count as empty bins
+ // 4 <--- don't count as empty bins
+ // 5 cbcb cdcd <---- furthest nn
+ // 6 a1a2 b3c4
+ if b && i < depth {
+ depth = i + 1
+ lastPo = i
+ return false
+ }
+ lastPo = i
+ return true
+ }
+ p.EachNeighbour(pivotAddr, pof, f)
+
+ // cover edge case where more than one farthest nn
+ // AND we only have nn-peers
+ if lastPo == depth {
+ depth = 0
}
- k.conns.EachNeighbour(k.base, pof, f)
return depth
}
@@ -500,7 +554,7 @@ func (k *Kademlia) string() string {
liverows := make([]string, k.MaxProxDisplay)
peersrows := make([]string, k.MaxProxDisplay)
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
rest := k.conns.Size()
k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
var rowlen int
@@ -570,6 +624,7 @@ type PeerPot struct {
// as hexadecimal representations of the address.
// used for testing only
func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot {
+
// create a table of all nodes for health check
np := pot.NewPot(nil, 0)
for _, addr := range addrs {
@@ -578,34 +633,47 @@ func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot {
ppmap := make(map[string]*PeerPot)
for i, a := range addrs {
- pl := 256
- prev := 256
+
+ // actual kademlia depth
+ depth := depthForPot(np, kadMinProxSize, a)
+
+ // upon entering a new iteration
+ // this will hold the value the po should be
+ // if it's one higher than the po in the last iteration
+ prevPo := 256
+
+ // all empty bins which are outside neighbourhood depth
var emptyBins []int
+
+ // all nn-peers
var nns [][]byte
- np.EachNeighbour(addrs[i], pof, func(val pot.Val, po int) bool {
- a := val.([]byte)
+
+ np.EachNeighbour(a, pof, func(val pot.Val, po int) bool {
+ addr := val.([]byte)
+ // po == 256 means that addr is the pivot address(self)
if po == 256 {
return true
}
- if pl == 256 || pl == po {
- nns = append(nns, a)
+
+ // iterate through the neighbours, going from the closest to the farthest
+ // we calculate the nearest neighbours that should be in the set
+ // depth in this case equates to:
+ // 1. Within all bins that are higher or equal than depth there are
+ // at least minProxBinSize peers connected
+ // 2. depth-1 bin is not empty
+ if po >= depth {
+ nns = append(nns, addr)
+ prevPo = depth - 1
+ return true
}
- if pl == 256 && len(nns) >= kadMinProxSize {
- pl = po
- prev = po
+ for j := prevPo; j > po; j-- {
+ emptyBins = append(emptyBins, j)
}
- if prev < pl {
- for j := prev; j > po; j-- {
- emptyBins = append(emptyBins, j)
- }
- }
- prev = po - 1
+ prevPo = po - 1
return true
})
- for j := prev; j >= 0; j-- {
- emptyBins = append(emptyBins, j)
- }
- log.Trace(fmt.Sprintf("%x NNS: %s", addrs[i][:4], LogAddrs(nns)))
+
+ log.Trace(fmt.Sprintf("%x NNS: %s, emptyBins: %s", addrs[i][:4], LogAddrs(nns), logEmptyBins(emptyBins)))
ppmap[common.Bytes2Hex(a)] = &PeerPot{nns, emptyBins}
}
return ppmap
@@ -620,7 +688,7 @@ func (k *Kademlia) saturation(n int) int {
prev++
return prev == po && size >= n
})
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
if depth < prev {
return depth
}
@@ -633,8 +701,11 @@ func (k *Kademlia) full(emptyBins []int) (full bool) {
prev := 0
e := len(emptyBins)
ok := true
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
k.conns.EachBin(k.base, pof, 0, func(po, _ int, _ func(func(val pot.Val, i int) bool) bool) bool {
+ if po >= depth {
+ return false
+ }
if prev == depth+1 {
return true
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go
index d2e051f4..184a2d94 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go
@@ -25,6 +25,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/protocols"
"github.com/ethereum/go-ethereum/swarm/pot"
)
@@ -73,6 +76,76 @@ func Register(k *Kademlia, regs ...string) {
}
}
+// tests the validity of neighborhood depth calculations
+//
+// in particular, it tests that if there are one or more consecutive
+// empty bins above the farthest "nearest neighbor-peer" then
+// the depth should be set at the farthest of those empty bins
+//
+// TODO: Make test adapt to change in MinProxBinSize
+func TestNeighbourhoodDepth(t *testing.T) {
+ baseAddressBytes := RandomAddr().OAddr
+ kad := NewKademlia(baseAddressBytes, NewKadParams())
+
+ baseAddress := pot.NewAddressFromBytes(baseAddressBytes)
+
+ closerAddress := pot.RandomAddressAt(baseAddress, 7)
+ closerPeer := newTestDiscoveryPeer(closerAddress, kad)
+ kad.On(closerPeer)
+ depth := kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("expected depth 0, was %d", depth)
+ }
+
+ sameAddress := pot.RandomAddressAt(baseAddress, 7)
+ samePeer := newTestDiscoveryPeer(sameAddress, kad)
+ kad.On(samePeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("expected depth 0, was %d", depth)
+ }
+
+ midAddress := pot.RandomAddressAt(baseAddress, 4)
+ midPeer := newTestDiscoveryPeer(midAddress, kad)
+ kad.On(midPeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 5 {
+ t.Fatalf("expected depth 5, was %d", depth)
+ }
+
+ kad.Off(midPeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("expected depth 0, was %d", depth)
+ }
+
+ fartherAddress := pot.RandomAddressAt(baseAddress, 1)
+ fartherPeer := newTestDiscoveryPeer(fartherAddress, kad)
+ kad.On(fartherPeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 2 {
+ t.Fatalf("expected depth 2, was %d", depth)
+ }
+
+ midSameAddress := pot.RandomAddressAt(baseAddress, 4)
+ midSamePeer := newTestDiscoveryPeer(midSameAddress, kad)
+ kad.Off(closerPeer)
+ kad.On(midPeer)
+ kad.On(midSamePeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 2 {
+ t.Fatalf("expected depth 2, was %d", depth)
+ }
+
+ kad.Off(fartherPeer)
+ log.Trace(kad.string())
+ time.Sleep(time.Millisecond)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("expected depth 0, was %d", depth)
+ }
+}
+
func testSuggestPeer(k *Kademlia, expAddr string, expPo int, expWant bool) error {
addr, o, want := k.SuggestPeer()
if binStr(addr) != expAddr {
@@ -376,7 +449,7 @@ func TestKademliaHiveString(t *testing.T) {
Register(k, "10000000", "10000001")
k.MaxProxDisplay = 8
h := k.String()
- expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), MinProxBinSize: 2, MinBinSize: 1, MaxBinSize: 4\n000 0 | 2 8100 (0) 8000 (0)\n============ DEPTH: 1 ==========================================\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
+ expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), MinProxBinSize: 2, MinBinSize: 1, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
if expH[104:] != h[104:] {
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h)
}
@@ -644,3 +717,17 @@ func TestKademliaCase5(t *testing.T) {
"78fafa0809929a1279ece089a51d12457c2d8416dff859aeb2ccc24bb50df5ec", "1dd39b1257e745f147cbbc3cadd609ccd6207c41056dbc4254bba5d2527d3ee5", "5f61dd66d4d94aec8fcc3ce0e7885c7edf30c43143fa730e2841c5d28e3cd081", "8aa8b0472cb351d967e575ad05c4b9f393e76c4b01ef4b3a54aac5283b78abc9", "4502f385152a915b438a6726ce3ea9342e7a6db91a23c2f6bee83a885ed7eb82", "718677a504249db47525e959ef1784bed167e1c46f1e0275b9c7b588e28a3758", "7c54c6ed1f8376323896ed3a4e048866410de189e9599dd89bf312ca4adb96b5", "18e03bd3378126c09e799a497150da5c24c895aedc84b6f0dbae41fc4bac081a", "23db76ac9e6e58d9f5395ca78252513a7b4118b4155f8462d3d5eec62486cadc", "40ae0e8f065e96c7adb7fa39505136401f01780481e678d718b7f6dbb2c906ec", "c1539998b8bae19d339d6bbb691f4e9daeb0e86847545229e80fe0dffe716e92", "ed139d73a2699e205574c08722ca9f030ad2d866c662f1112a276b91421c3cb9", "5bdb19584b7a36d09ca689422ef7e6bb681b8f2558a6b2177a8f7c812f631022", "636c9de7fe234ffc15d67a504c69702c719f626c17461d3f2918e924cd9d69e2", "de4455413ff9335c440d52458c6544191bd58a16d85f700c1de53b62773064ea", "de1963310849527acabc7885b6e345a56406a8f23e35e436b6d9725e69a79a83", "a80a50a467f561210a114cba6c7fb1489ed43a14d61a9edd70e2eb15c31f074d", "7804f12b8d8e6e4b375b242058242068a3809385e05df0e64973cde805cf729c", "60f9aa320c02c6f2e6370aa740cf7cea38083fa95fca8c99552cda52935c1520", "d8da963602390f6c002c00ce62a84b514edfce9ebde035b277a957264bb54d21", "8463d93256e026fe436abad44697152b9a56ac8e06a0583d318e9571b83d073c", "9a3f78fcefb9a05e40a23de55f6153d7a8b9d973ede43a380bf46bb3b3847de1", "e3bb576f4b3760b9ca6bff59326f4ebfc4a669d263fb7d67ab9797adea54ed13", "4d5cdbd6dcca5bdf819a0fe8d175dc55cc96f088d37462acd5ea14bc6296bdbe", "5a0ed28de7b5258c727cb85447071c74c00a5fbba9e6bc0393bc51944d04ab2a", "61e4ddb479c283c638f4edec24353b6cc7a3a13b930824aad016b0996ca93c47", "7e3610868acf714836cafaaa7b8c009a9ac6e3a6d443e5586cf661530a204ee2", "d74b244d4345d2c86e30a097105e4fb133d53c578320285132a952cdaa64416e", "cfeed57d0f935bfab89e3f630a7c97e0b1605f0724d85a008bbfb92cb47863a8", "580837af95055670e20d494978f60c7f1458dc4b9e389fc7aa4982b2aca3bce3", "df55c0c49e6c8a83d82dfa1c307d3bf6a20e18721c80d8ec4f1f68dc0a137ced", "5f149c51ce581ba32a285439a806c063ced01ccd4211cd024e6a615b8f216f95", "1eb76b00aeb127b10dd1b7cd4c3edeb4d812b5a658f0feb13e85c4d2b7c6fe06", "7a56ba7c3fb7cbfb5561a46a75d95d7722096b45771ec16e6fa7bbfab0b35dfe", "4bae85ad88c28470f0015246d530adc0cd1778bdd5145c3c6b538ee50c4e04bd", "afd1892e2a7145c99ec0ebe9ded0d3fec21089b277a68d47f45961ec5e39e7e0", "953138885d7b36b0ef79e46030f8e61fd7037fbe5ce9e0a94d728e8c8d7eab86", "de761613ef305e4f628cb6bf97d7b7dc69a9d513dc233630792de97bcda777a6", "3f3087280063d09504c084bbf7fdf984347a72b50d097fd5b086ffabb5b3fb4c", "7d18a94bb1ebfdef4d3e454d2db8cb772f30ca57920dd1e402184a9e598581a0", "a7d6fbdc9126d9f10d10617f49fb9f5474ffe1b229f76b7dd27cebba30eccb5d", "fad0246303618353d1387ec10c09ee991eb6180697ed3470ed9a6b377695203d", "1cf66e09ea51ee5c23df26615a9e7420be2ac8063f28f60a3bc86020e94fe6f3", "8269cdaa153da7c358b0b940791af74d7c651cd4d3f5ed13acfe6d0f2c539e7f", "90d52eaaa60e74bf1c79106113f2599471a902d7b1c39ac1f55b20604f453c09", "9788fd0c09190a3f3d0541f68073a2f44c2fcc45bb97558a7c319f36c25a75b3", "10b68fc44157ecfdae238ee6c1ce0333f906ad04d1a4cb1505c8e35c3c87fbb0", "e5284117fdf3757920475c786e0004cb00ba0932163659a89b36651a01e57394", "403ad51d911e113dcd5f9ff58c94f6d278886a2a4da64c3ceca2083282c92de3",
)
}
+
+func newTestDiscoveryPeer(addr pot.Address, kad *Kademlia) *Peer {
+ rw := &p2p.MsgPipeRW{}
+ p := p2p.NewPeer(enode.ID{}, "foo", []p2p.Cap{})
+ pp := protocols.NewPeer(p, rw, &protocols.Spec{})
+ bp := &BzzPeer{
+ Peer: pp,
+ BzzAddr: &BzzAddr{
+ OAddr: addr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", addr[:])),
+ },
+ }
+ return NewPeer(bp, kad)
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go
index d1d359de..191d67e5 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go
@@ -95,8 +95,7 @@ func TestNetworkID(t *testing.T) {
kademlias[node].EachAddr(nil, 0, func(addr *BzzAddr, _ int, _ bool) bool {
found := false
for _, nd := range netIDGroup {
- p := nd.Bytes()
- if bytes.Equal(p, addr.Address()) {
+ if bytes.Equal(kademlias[nd].BaseAddr(), addr.Address()) {
found = true
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go
index 66ae94a8..4b9b28cd 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go
@@ -44,7 +44,7 @@ const (
// BzzSpec is the spec of the generic swarm handshake
var BzzSpec = &protocols.Spec{
Name: "bzz",
- Version: 7,
+ Version: 8,
MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{
HandshakeMsg{},
@@ -54,7 +54,7 @@ var BzzSpec = &protocols.Spec{
// DiscoverySpec is the spec for the bzz discovery subprotocols
var DiscoverySpec = &protocols.Spec{
Name: "hive",
- Version: 6,
+ Version: 8,
MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{
peersMsg{},
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go
index f0d26662..53ceda74 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go
@@ -31,7 +31,7 @@ import (
)
const (
- TestProtocolVersion = 7
+ TestProtocolVersion = 8
TestProtocolNetworkID = 3
)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events.go
index 594d3622..d73c3af4 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events.go
@@ -20,16 +20,18 @@ import (
"context"
"sync"
- "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/simulations"
)
// PeerEvent is the type of the channel returned by Simulation.PeerEvents.
type PeerEvent struct {
// NodeID is the ID of node that the event is caught on.
NodeID enode.ID
+ // PeerID is the ID of the peer node that the event is caught on.
+ PeerID enode.ID
// Event is the event that is caught.
- Event *p2p.PeerEvent
+ Event *simulations.Event
// Error is the error that may have happened during event watching.
Error error
}
@@ -37,9 +39,13 @@ type PeerEvent struct {
// PeerEventsFilter defines a filter on PeerEvents to exclude messages with
// defined properties. Use PeerEventsFilter methods to set required options.
type PeerEventsFilter struct {
- t *p2p.PeerEventType
- protocol *string
- msgCode *uint64
+ eventType simulations.EventType
+
+ connUp *bool
+
+ msgReceive *bool
+ protocol *string
+ msgCode *uint64
}
// NewPeerEventsFilter returns a new PeerEventsFilter instance.
@@ -47,20 +53,48 @@ func NewPeerEventsFilter() *PeerEventsFilter {
return &PeerEventsFilter{}
}
-// Type sets the filter to only one peer event type.
-func (f *PeerEventsFilter) Type(t p2p.PeerEventType) *PeerEventsFilter {
- f.t = &t
+// Connect sets the filter to events when two nodes connect.
+func (f *PeerEventsFilter) Connect() *PeerEventsFilter {
+ f.eventType = simulations.EventTypeConn
+ b := true
+ f.connUp = &b
+ return f
+}
+
+// Drop sets the filter to events when two nodes disconnect.
+func (f *PeerEventsFilter) Drop() *PeerEventsFilter {
+ f.eventType = simulations.EventTypeConn
+ b := false
+ f.connUp = &b
+ return f
+}
+
+// ReceivedMessages sets the filter to only messages that are received.
+func (f *PeerEventsFilter) ReceivedMessages() *PeerEventsFilter {
+ f.eventType = simulations.EventTypeMsg
+ b := true
+ f.msgReceive = &b
+ return f
+}
+
+// SentMessages sets the filter to only messages that are sent.
+func (f *PeerEventsFilter) SentMessages() *PeerEventsFilter {
+ f.eventType = simulations.EventTypeMsg
+ b := false
+ f.msgReceive = &b
return f
}
// Protocol sets the filter to only one message protocol.
func (f *PeerEventsFilter) Protocol(p string) *PeerEventsFilter {
+ f.eventType = simulations.EventTypeMsg
f.protocol = &p
return f
}
// MsgCode sets the filter to only one msg code.
func (f *PeerEventsFilter) MsgCode(c uint64) *PeerEventsFilter {
+ f.eventType = simulations.EventTypeMsg
f.msgCode = &c
return f
}
@@ -80,19 +114,8 @@ func (s *Simulation) PeerEvents(ctx context.Context, ids []enode.ID, filters ...
go func(id enode.ID) {
defer s.shutdownWG.Done()
- client, err := s.Net.GetNode(id).Client()
- if err != nil {
- subsWG.Done()
- eventC <- PeerEvent{NodeID: id, Error: err}
- return
- }
- events := make(chan *p2p.PeerEvent)
- sub, err := client.Subscribe(ctx, "admin", events, "peerEvents")
- if err != nil {
- subsWG.Done()
- eventC <- PeerEvent{NodeID: id, Error: err}
- return
- }
+ events := make(chan *simulations.Event)
+ sub := s.Net.Events().Subscribe(events)
defer sub.Unsubscribe()
subsWG.Done()
@@ -110,28 +133,55 @@ func (s *Simulation) PeerEvents(ctx context.Context, ids []enode.ID, filters ...
case <-s.Done():
return
case e := <-events:
+ // ignore control events
+ if e.Control {
+ continue
+ }
match := len(filters) == 0 // if there are no filters match all events
for _, f := range filters {
- if f.t != nil && *f.t != e.Type {
- continue
+ if f.eventType == simulations.EventTypeConn && e.Conn != nil {
+ if *f.connUp != e.Conn.Up {
+ continue
+ }
+ // all connection filter parameters matched, break the loop
+ match = true
+ break
}
- if f.protocol != nil && *f.protocol != e.Protocol {
- continue
+ if f.eventType == simulations.EventTypeMsg && e.Msg != nil {
+ if f.msgReceive != nil && *f.msgReceive != e.Msg.Received {
+ continue
+ }
+ if f.protocol != nil && *f.protocol != e.Msg.Protocol {
+ continue
+ }
+ if f.msgCode != nil && *f.msgCode != e.Msg.Code {
+ continue
+ }
+ // all message filter parameters matched, break the loop
+ match = true
+ break
}
- if f.msgCode != nil && e.MsgCode != nil && *f.msgCode != *e.MsgCode {
- continue
+ }
+ var peerID enode.ID
+ switch e.Type {
+ case simulations.EventTypeConn:
+ peerID = e.Conn.One
+ if peerID == id {
+ peerID = e.Conn.Other
+ }
+ case simulations.EventTypeMsg:
+ peerID = e.Msg.One
+ if peerID == id {
+ peerID = e.Msg.Other
}
- // all filter parameters matched, break the loop
- match = true
- break
}
if match {
select {
- case eventC <- PeerEvent{NodeID: id, Event: e}:
+ case eventC <- PeerEvent{NodeID: id, PeerID: peerID, Event: e}:
case <-ctx.Done():
if err := ctx.Err(); err != nil {
select {
- case eventC <- PeerEvent{NodeID: id, Error: err}:
+ case eventC <- PeerEvent{NodeID: id, PeerID: peerID, Error: err}:
case <-s.Done():
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/example_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/example_test.go
index 84b0634b..7b620461 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/example_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/example_test.go
@@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
@@ -34,6 +33,10 @@ import (
// BucketKeyKademlia key. This allows to use WaitTillHealthy to block until
// all nodes have the their Kadmlias healthy.
func ExampleSimulation_WaitTillHealthy() {
+
+ log.Error("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
+ return
+
sim := simulation.New(map[string]simulation.ServiceFunc{
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
addr := network.NewAddr(ctx.Config.Node())
@@ -87,7 +90,7 @@ func ExampleSimulation_PeerEvents() {
log.Error("peer event", "err", e.Error)
continue
}
- log.Info("peer event", "node", e.NodeID, "peer", e.Event.Peer, "msgcode", e.Event.MsgCode)
+ log.Info("peer event", "node", e.NodeID, "peer", e.PeerID, "type", e.Event.Type)
}
}()
}
@@ -100,7 +103,7 @@ func ExampleSimulation_PeerEvents_disconnections() {
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ simulation.NewPeerEventsFilter().Drop(),
)
go func() {
@@ -109,7 +112,7 @@ func ExampleSimulation_PeerEvents_disconnections() {
log.Error("peer drop", "err", d.Error)
continue
}
- log.Warn("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ log.Warn("peer drop", "node", d.NodeID, "peer", d.PeerID)
}
}()
}
@@ -124,8 +127,8 @@ func ExampleSimulation_PeerEvents_multipleFilters() {
context.Background(),
sim.NodeIDs(),
// Watch when bzz messages 1 and 4 are received.
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("bzz").MsgCode(1),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("bzz").MsgCode(4),
+ simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("bzz").MsgCode(1),
+ simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("bzz").MsgCode(4),
)
go func() {
@@ -134,7 +137,7 @@ func ExampleSimulation_PeerEvents_multipleFilters() {
log.Error("bzz message", "err", m.Error)
continue
}
- log.Info("bzz message", "node", m.NodeID, "peer", m.Event.Peer)
+ log.Info("bzz message", "node", m.NodeID, "peer", m.PeerID)
}
}()
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go
index f895181d..7982810c 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go
@@ -33,6 +33,7 @@ var BucketKeyKademlia BucketKey = "kademlia"
// WaitTillHealthy is blocking until the health of all kademlias is true.
// If error is not nil, a map of kademlia that was found not healthy is returned.
+// TODO: Check correctness since change in kademlia depth calculation logic
func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (ill map[enode.ID]*network.Kademlia, err error) {
// Prepare PeerPot map for checking Kademlia health
var ppmap map[string]*network.PeerPot
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia_test.go
index 285644a0..f02b0e54 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia_test.go
@@ -28,11 +28,11 @@ import (
)
func TestWaitTillHealthy(t *testing.T) {
+
sim := New(map[string]ServiceFunc{
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
addr := network.NewAddr(ctx.Config.Node())
hp := network.NewHiveParams()
- hp.Discovery = false
config := &network.BzzConfig{
OverlayAddr: addr.Over(),
UnderlayAddr: addr.Under(),
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go
index 086ab606..01346ef1 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go
@@ -160,6 +160,41 @@ func TestAddNodeWithService(t *testing.T) {
}
}
+func TestAddNodeMultipleServices(t *testing.T) {
+ sim := New(map[string]ServiceFunc{
+ "noop1": noopServiceFunc,
+ "noop2": noopService2Func,
+ })
+ defer sim.Close()
+
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n := sim.Net.GetNode(id).Node.(*adapters.SimNode)
+ if n.Service("noop1") == nil {
+ t.Error("service noop1 not found on node")
+ }
+ if n.Service("noop2") == nil {
+ t.Error("service noop2 not found on node")
+ }
+}
+
+func TestAddNodeDuplicateServiceError(t *testing.T) {
+ sim := New(map[string]ServiceFunc{
+ "noop1": noopServiceFunc,
+ "noop2": noopServiceFunc,
+ })
+ defer sim.Close()
+
+ wantErr := "duplicate service: *simulation.noopService"
+ _, err := sim.AddNode()
+ if err.Error() != wantErr {
+ t.Errorf("got error %q, want %q", err, wantErr)
+ }
+}
+
func TestAddNodes(t *testing.T) {
sim := New(noopServiceFuncMap)
defer sim.Close()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go
index f6d3ce22..e5435b9f 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go
@@ -68,6 +68,10 @@ type ServiceFunc func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Se
// New creates a new Simulation instance with new
// simulations.Network initialized with provided services.
+// Services map must have unique keys as service names and
+// every ServiceFunc must return a node.Service of the unique type.
+// This restriction is required by node.Node.Start() function
+// which is used to start node.Service returned by ServiceFunc.
func New(services map[string]ServiceFunc) (s *Simulation) {
s = &Simulation{
buckets: make(map[enode.ID]*sync.Map),
@@ -76,6 +80,9 @@ func New(services map[string]ServiceFunc) (s *Simulation) {
adapterServices := make(map[string]adapters.ServiceFunc, len(services))
for name, serviceFunc := range services {
+ // Scope this variables correctly
+ // as they will be in the adapterServices[name] function accessed later.
+ name, serviceFunc := name, serviceFunc
s.serviceNames = append(s.serviceNames, name)
adapterServices[name] = func(ctx *adapters.ServiceContext) (node.Service, error) {
b := new(sync.Map)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go
index eed09bf5..ca8599d7 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go
@@ -205,3 +205,16 @@ func (t *noopService) Start(server *p2p.Server) error {
func (t *noopService) Stop() error {
return nil
}
+
+// a helper function for most basic noop service
+// of a different type then noopService to test
+// multiple services on one node.
+func noopService2Func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+ return new(noopService2), nil, nil
+}
+
+// noopService2 is the service that does not do anything
+// but implements node.Service interface.
+type noopService2 struct {
+ noopService
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go
index caf7ff1f..284ae639 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go
@@ -64,12 +64,12 @@ func init() {
type Simulation struct {
mtx sync.Mutex
- stores map[enode.ID]*state.InmemoryStore
+ stores map[enode.ID]state.Store
}
func NewSimulation() *Simulation {
return &Simulation{
- stores: make(map[enode.ID]*state.InmemoryStore),
+ stores: make(map[enode.ID]state.Store),
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go
index 721b873b..e0a7f7e1 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go
@@ -38,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
- mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
"github.com/ethereum/go-ethereum/swarm/testutil"
colorable "github.com/mattn/go-colorable"
)
@@ -69,21 +68,6 @@ func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
-func createGlobalStore() (string, *mockdb.GlobalStore, error) {
- var globalStore *mockdb.GlobalStore
- globalStoreDir, err := ioutil.TempDir("", "global.store")
- if err != nil {
- log.Error("Error initiating global store temp directory!", "err", err)
- return "", nil, err
- }
- globalStore, err = mockdb.NewGlobalStore(globalStoreDir)
- if err != nil {
- log.Error("Error initiating global store!", "err", err)
- return "", nil, err
- }
- return globalStoreDir, globalStore, nil
-}
-
func newStreamerTester(t *testing.T, registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
// setup
addr := network.RandomAddr() // tested peers peer address
@@ -114,7 +98,7 @@ func newStreamerTester(t *testing.T, registryOptions *RegistryOptions) (*p2ptest
delivery := NewDelivery(to, netStore)
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
- streamer := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), registryOptions)
+ streamer := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), registryOptions, nil)
teardown := func() {
streamer.Close()
removeDataDir()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go
index 0109fbde..c73298d9 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go
@@ -39,6 +39,7 @@ const (
var (
processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil)
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
+ retrieveChunkFail = metrics.NewRegisteredCounter("network.stream.retrieve_chunks_fail.count", nil)
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil)
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil)
@@ -169,7 +170,8 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
go func() {
chunk, err := d.chunkStore.Get(ctx, req.Addr)
if err != nil {
- log.Warn("ChunkStore.Get can not retrieve chunk", "err", err)
+ retrieveChunkFail.Inc(1)
+ log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err)
return
}
if req.SkipCheck {
@@ -255,7 +257,7 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
}
sp = d.getPeer(id)
if sp == nil {
- log.Warn("Delivery.RequestFromPeers: peer not found", "id", id)
+ //log.Warn("Delivery.RequestFromPeers: peer not found", "id", id)
return true
}
spID = &id
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go
index c9a53011..f69f8049 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go
@@ -290,7 +290,7 @@ func TestRequestFromPeers(t *testing.T) {
Peer: protocolsPeer,
}, to)
to.On(peer)
- r := NewRegistry(addr.ID(), delivery, nil, nil, nil)
+ r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
sp := &Peer{
@@ -331,7 +331,7 @@ func TestRequestFromPeersWithLightNode(t *testing.T) {
Peer: protocolsPeer,
}, to)
to.On(peer)
- r := NewRegistry(addr.ID(), delivery, nil, nil, nil)
+ r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
sp := &Peer{
Peer: protocolsPeer,
@@ -453,6 +453,8 @@ func TestDeliveryFromNodes(t *testing.T) {
}
func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
node := ctx.Config.Node()
@@ -480,7 +482,7 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
SkipCheck: skipCheck,
Syncing: SyncingDisabled,
Retrieval: RetrievalEnabled,
- })
+ }, nil)
bucket.Store(bucketKeyRegistry, r)
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
@@ -565,13 +567,13 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ simulation.NewPeerEventsFilter().Drop(),
)
go func() {
for d := range disconnections {
if d.Error != nil {
- log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
t.Fatal(d.Error)
}
}
@@ -655,7 +657,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
Syncing: SyncingDisabled,
Retrieval: RetrievalDisabled,
SyncUpdateDelay: 0,
- })
+ }, nil)
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
bucket.Store(bucketKeyFileStore, fileStore)
@@ -697,13 +699,13 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ simulation.NewPeerEventsFilter().Drop(),
)
go func() {
for d := range disconnections {
if d.Error != nil {
- log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
b.Fatal(d.Error)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go
index 037984f2..668cf586 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go
@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/swarm/network"
@@ -53,6 +52,8 @@ func TestIntervalsLiveAndHistory(t *testing.T) {
}
func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
nodes := 2
chunkCount := dataChunkCount
externalStreamName := "externalStream"
@@ -85,7 +86,7 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
Retrieval: RetrievalDisabled,
Syncing: SyncingRegisterOnly,
SkipCheck: skipCheck,
- })
+ }, nil)
bucket.Store(bucketKeyRegistry, r)
r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
@@ -154,7 +155,7 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ simulation.NewPeerEventsFilter().Drop(),
)
err = registry.Subscribe(storer, NewStream(externalStreamName, "", live), history, Top)
@@ -165,7 +166,7 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
go func() {
for d := range disconnections {
if d.Error != nil {
- log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
t.Fatal(d.Error)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_retrieval_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_retrieval_test.go
index ad151934..932e28b3 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_retrieval_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_retrieval_test.go
@@ -130,7 +130,7 @@ func retrievalStreamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s no
Retrieval: RetrievalEnabled,
Syncing: SyncingAutoSubscribe,
SyncUpdateDelay: 3 * time.Second,
- })
+ }, nil)
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
bucket.Store(bucketKeyFileStore, fileStore)
@@ -246,6 +246,7 @@ simulation's `action` function.
The snapshot should have 'streamer' in its service list.
*/
func runRetrievalTest(chunkCount int, nodeCount int) error {
+
sim := simulation.New(retrievalSimServiceMap)
defer sim.Close()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_sync_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_sync_test.go
index 4bd7f38f..4a632c8c 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_sync_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_sync_test.go
@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
@@ -36,7 +35,8 @@ import (
"github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
- mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
+ "github.com/ethereum/go-ethereum/swarm/storage/mock"
+ mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@@ -167,7 +167,7 @@ func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Servic
Retrieval: RetrievalDisabled,
Syncing: SyncingAutoSubscribe,
SyncUpdateDelay: 3 * time.Second,
- })
+ }, nil)
bucket.Store(bucketKeyRegistry, r)
@@ -182,6 +182,8 @@ func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Servic
}
func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(simServiceMap)
defer sim.Close()
@@ -210,12 +212,12 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ simulation.NewPeerEventsFilter().Drop(),
)
go func() {
for d := range disconnections {
- log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
t.Fatal("unexpected disconnect")
cancelSimRun()
}
@@ -269,20 +271,9 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
// or until the timeout is reached.
- var gDir string
- var globalStore *mockdb.GlobalStore
+ var globalStore mock.GlobalStorer
if *useMockStore {
- gDir, globalStore, err = createGlobalStore()
- if err != nil {
- return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
- }
- defer func() {
- os.RemoveAll(gDir)
- err := globalStore.Close()
- if err != nil {
- log.Error("Error closing global store! %v", "err", err)
- }
- }()
+ globalStore = mockmem.NewGlobalStore()
}
REPEAT:
for {
@@ -340,6 +331,8 @@ assuming that the snapshot file identifies a healthy
kademlia network. The snapshot should have 'streamer' in its service list.
*/
func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
n := ctx.Config.Node()
@@ -361,7 +354,7 @@ func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int)
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
Retrieval: RetrievalDisabled,
Syncing: SyncingRegisterOnly,
- })
+ }, nil)
bucket.Store(bucketKeyRegistry, r)
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
@@ -402,12 +395,12 @@ func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int)
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ simulation.NewPeerEventsFilter().Drop(),
)
go func() {
for d := range disconnections {
- log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
t.Fatal("unexpected disconnect")
cancelSimRun()
}
@@ -428,7 +421,7 @@ func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int)
var subscriptionCount int
- filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
+ filter := simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("stream").MsgCode(4)
eventC := sim.PeerEvents(ctx, nodeIDs, filter)
for j, node := range nodeIDs {
@@ -477,14 +470,9 @@ func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int)
return err
}
- var gDir string
- var globalStore *mockdb.GlobalStore
+ var globalStore mock.GlobalStorer
if *useMockStore {
- gDir, globalStore, err = createGlobalStore()
- if err != nil {
- return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
- }
- defer os.RemoveAll(gDir)
+ globalStore = mockmem.NewGlobalStore()
}
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
// or until the timeout is reached.
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go
index 695ff0c5..32e10782 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"math"
+ "reflect"
"sync"
"time"
@@ -87,6 +88,9 @@ type Registry struct {
intervalsStore state.Store
autoRetrieval bool //automatically subscribe to retrieve request stream
maxPeerServers int
+ spec *protocols.Spec //this protocol's spec
+ balance protocols.Balance //implements protocols.Balance, for accounting
+ prices protocols.Prices //implements protocols.Prices, provides prices to accounting
}
// RegistryOptions holds optional values for NewRegistry constructor.
@@ -99,7 +103,7 @@ type RegistryOptions struct {
}
// NewRegistry is Streamer constructor
-func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions) *Registry {
+func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry {
if options == nil {
options = &RegistryOptions{}
}
@@ -119,7 +123,10 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
intervalsStore: intervalsStore,
autoRetrieval: retrieval,
maxPeerServers: options.MaxPeerServers,
+ balance: balance,
}
+ streamer.setupSpec()
+
streamer.api = NewAPI(streamer)
delivery.getPeer = streamer.getPeer
@@ -228,6 +235,17 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
return streamer
}
+//we need to construct a spec instance per node instance
+func (r *Registry) setupSpec() {
+ //first create the "bare" spec
+ r.createSpec()
+ //if balance is nil, this node has been started without swap support (swapEnabled flag is false)
+ if r.balance != nil && !reflect.ValueOf(r.balance).IsNil() {
+ //swap is enabled, so setup the hook
+ r.spec.Hook = protocols.NewAccounting(r.balance, r.prices)
+ }
+}
+
// RegisterClient registers an incoming streamer constructor
func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) {
r.clientMu.Lock()
@@ -492,7 +510,7 @@ func (r *Registry) updateSyncing() {
}
func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
- peer := protocols.NewPeer(p, rw, Spec)
+ peer := protocols.NewPeer(p, rw, r.spec)
bp := network.NewBzzPeer(peer)
np := network.NewPeer(bp, r.delivery.kad)
r.delivery.kad.On(np)
@@ -716,35 +734,43 @@ func (c *clientParams) clientCreated() {
close(c.clientCreatedC)
}
-// Spec is the spec of the streamer protocol
-var Spec = &protocols.Spec{
- Name: "stream",
- Version: 8,
- MaxMsgSize: 10 * 1024 * 1024,
- Messages: []interface{}{
- UnsubscribeMsg{},
- OfferedHashesMsg{},
- WantedHashesMsg{},
- TakeoverProofMsg{},
- SubscribeMsg{},
- RetrieveRequestMsg{},
- ChunkDeliveryMsgRetrieval{},
- SubscribeErrorMsg{},
- RequestSubscriptionMsg{},
- QuitMsg{},
- ChunkDeliveryMsgSyncing{},
- },
+//GetSpec returns the streamer spec to callers
+//This used to be a global variable but for simulations with
+//multiple nodes its fields (notably the Hook) would be overwritten
+func (r *Registry) GetSpec() *protocols.Spec {
+ return r.spec
+}
+
+func (r *Registry) createSpec() {
+ // Spec is the spec of the streamer protocol
+ var spec = &protocols.Spec{
+ Name: "stream",
+ Version: 8,
+ MaxMsgSize: 10 * 1024 * 1024,
+ Messages: []interface{}{
+ UnsubscribeMsg{},
+ OfferedHashesMsg{},
+ WantedHashesMsg{},
+ TakeoverProofMsg{},
+ SubscribeMsg{},
+ RetrieveRequestMsg{},
+ ChunkDeliveryMsgRetrieval{},
+ SubscribeErrorMsg{},
+ RequestSubscriptionMsg{},
+ QuitMsg{},
+ ChunkDeliveryMsgSyncing{},
+ },
+ }
+ r.spec = spec
}
func (r *Registry) Protocols() []p2p.Protocol {
return []p2p.Protocol{
{
- Name: Spec.Name,
- Version: Spec.Version,
- Length: Spec.Length(),
+ Name: r.spec.Name,
+ Version: r.spec.Version,
+ Length: r.spec.Length(),
Run: r.runProtocol,
- // NodeInfo: ,
- // PeerInfo: ,
},
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer_test.go
index a543cae0..3e3cee18 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer_test.go
@@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/swarm/log"
@@ -36,7 +35,8 @@ import (
"github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
- mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
+ "github.com/ethereum/go-ethereum/swarm/storage/mock"
+ mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@@ -49,7 +49,7 @@ func TestSyncerSimulation(t *testing.T) {
testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1)
}
-func createMockStore(globalStore *mockdb.GlobalStore, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
+func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
address := common.BytesToAddress(id.Bytes())
mockStore := globalStore.NewNodeStore(address)
params := storage.NewDefaultLocalStoreParams()
@@ -68,11 +68,12 @@ func createMockStore(globalStore *mockdb.GlobalStore, id enode.ID, addr *network
}
func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
var store storage.ChunkStore
- var globalStore *mockdb.GlobalStore
- var gDir, datadir string
+ var datadir string
node := ctx.Config.Node()
addr := network.NewAddr(node)
@@ -80,11 +81,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
addr.OAddr[0] = byte(0)
if *useMockStore {
- gDir, globalStore, err = createGlobalStore()
- if err != nil {
- return nil, nil, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
- }
- store, datadir, err = createMockStore(globalStore, node.ID(), addr)
+ store, datadir, err = createMockStore(mockmem.NewGlobalStore(), node.ID(), addr)
} else {
store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
}
@@ -95,13 +92,6 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
cleanup = func() {
store.Close()
os.RemoveAll(datadir)
- if *useMockStore {
- err := globalStore.Close()
- if err != nil {
- log.Error("Error closing global store! %v", "err", err)
- }
- os.RemoveAll(gDir)
- }
}
localStore := store.(*storage.LocalStore)
netStore, err := storage.NewNetStore(localStore, nil)
@@ -119,7 +109,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
Retrieval: RetrievalDisabled,
Syncing: SyncingAutoSubscribe,
SkipCheck: skipCheck,
- })
+ }, nil)
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
bucket.Store(bucketKeyFileStore, fileStore)
@@ -151,13 +141,13 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ simulation.NewPeerEventsFilter().Drop(),
)
go func() {
for d := range disconnections {
if d.Error != nil {
- log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
t.Fatal(d.Error)
}
}
@@ -244,3 +234,170 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
t.Fatal(result.Error)
}
}
+
+//TestSameVersionID just checks that if the version is not changed,
+//then streamer peers see each other
+func TestSameVersionID(t *testing.T) {
+ //test version ID
+ v := uint(1)
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ var store storage.ChunkStore
+ var datadir string
+
+ node := ctx.Config.Node()
+ addr := network.NewAddr(node)
+
+ store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyStore, store)
+ cleanup = func() {
+ store.Close()
+ os.RemoveAll(datadir)
+ }
+ localStore := store.(*storage.LocalStore)
+ netStore, err := storage.NewNetStore(localStore, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyDB, netStore)
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ delivery := NewDelivery(kad, netStore)
+ netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
+
+ bucket.Store(bucketKeyDelivery, delivery)
+
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingAutoSubscribe,
+ }, nil)
+ //assign to each node the same version ID
+ r.spec.Version = v
+
+ bucket.Store(bucketKeyRegistry, r)
+
+ return r, cleanup, nil
+
+ },
+ })
+ defer sim.Close()
+
+ //connect just two nodes
+ log.Info("Adding nodes to simulation")
+ _, err := sim.AddNodesAndConnectChain(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Info("Starting simulation")
+ ctx := context.Background()
+ //make sure they have time to connect
+ time.Sleep(200 * time.Millisecond)
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ //get the pivot node's filestore
+ nodes := sim.UpNodeIDs()
+
+ item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
+ if !ok {
+ return fmt.Errorf("No filestore")
+ }
+ registry := item.(*Registry)
+
+ //the peers should connect, thus getting the peer should not return nil
+ if registry.getPeer(nodes[1]) == nil {
+ t.Fatal("Expected the peer to not be nil, but it is")
+ }
+ return nil
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+ log.Info("Simulation ended")
+}
+
+//TestDifferentVersionID proves that if the streamer protocol version doesn't match,
+//then the peers are not connected at streamer level
+func TestDifferentVersionID(t *testing.T) {
+ //create a variable to hold the version ID
+ v := uint(0)
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ var store storage.ChunkStore
+ var datadir string
+
+ node := ctx.Config.Node()
+ addr := network.NewAddr(node)
+
+ store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyStore, store)
+ cleanup = func() {
+ store.Close()
+ os.RemoveAll(datadir)
+ }
+ localStore := store.(*storage.LocalStore)
+ netStore, err := storage.NewNetStore(localStore, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyDB, netStore)
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ delivery := NewDelivery(kad, netStore)
+ netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
+
+ bucket.Store(bucketKeyDelivery, delivery)
+
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingAutoSubscribe,
+ }, nil)
+
+ //increase the version ID for each node
+ v++
+ r.spec.Version = v
+
+ bucket.Store(bucketKeyRegistry, r)
+
+ return r, cleanup, nil
+
+ },
+ })
+ defer sim.Close()
+
+ //connect the nodes
+ log.Info("Adding nodes to simulation")
+ _, err := sim.AddNodesAndConnectChain(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Info("Starting simulation")
+ ctx := context.Background()
+ //make sure they have time to connect
+ time.Sleep(200 * time.Millisecond)
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ //get the pivot node's filestore
+ nodes := sim.UpNodeIDs()
+
+ item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
+ if !ok {
+ return fmt.Errorf("No filestore")
+ }
+ registry := item.(*Registry)
+
+ //getting the other peer should fail due to the different version numbers
+ if registry.getPeer(nodes[1]) != nil {
+ t.Fatal("Expected the peer to be nil, but it is not")
+ }
+ return nil
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+ log.Info("Simulation ended")
+
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/visualized_snapshot_sync_sim_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/visualized_snapshot_sync_sim_test.go
index 437c17e5..f6d61802 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/visualized_snapshot_sync_sim_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/visualized_snapshot_sync_sim_test.go
@@ -84,6 +84,8 @@ func watchSim(sim *simulation.Simulation) (context.Context, context.CancelFunc)
//This test requests bogus hashes into the network
func TestNonExistingHashesWithServer(t *testing.T) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
nodeCount, _, sim := setupSim(retrievalSimServiceMap)
defer sim.Close()
@@ -143,6 +145,7 @@ func sendSimTerminatedEvent(sim *simulation.Simulation) {
//can visualize messages like SendOfferedMsg, WantedHashesMsg, DeliveryMsg
func TestSnapshotSyncWithServer(t *testing.T) {
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
nodeCount, chunkCount, sim := setupSim(simServiceMap)
defer sim.Close()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network_test.go
index d84f2814..41993dfc 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network_test.go
@@ -259,6 +259,8 @@ type testSwarmNetworkOptions struct {
// - May wait for Kademlia on every node to be healthy.
// - Checking if a file is retrievable from all nodes.
func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwarmNetworkStep) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
if o == nil {
o = new(testSwarmNetworkOptions)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go
index eba7bb72..587382d7 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go
@@ -51,7 +51,7 @@ func NewAPI(ps *Pss) *API {
//
// All incoming messages to the node matching this topic will be encapsulated in the APIMsg
// struct and sent to the subscriber
-func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription, error) {
+func (pssapi *API) Receive(ctx context.Context, topic Topic, raw bool, prox bool) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return nil, fmt.Errorf("Subscribe not supported")
@@ -59,7 +59,7 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription,
psssub := notifier.CreateSubscription()
- handler := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ hndlr := NewHandler(func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
apimsg := &APIMsg{
Msg: hexutil.Bytes(msg),
Asymmetric: asymmetric,
@@ -69,9 +69,15 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription,
log.Warn(fmt.Sprintf("notification on pss sub topic rpc (sub %v) msg %v failed!", psssub.ID, msg))
}
return nil
+ })
+ if raw {
+ hndlr.caps.raw = true
+ }
+ if prox {
+ hndlr.caps.prox = true
}
- deregf := pssapi.Register(&topic, handler)
+ deregf := pssapi.Register(&topic, hndlr)
go func() {
defer deregf()
select {
@@ -158,6 +164,10 @@ func (pssapi *API) SendSym(symkeyhex string, topic Topic, msg hexutil.Bytes) err
return pssapi.Pss.SendSym(symkeyhex, topic, msg[:])
}
+func (pssapi *API) SendRaw(addr hexutil.Bytes, topic Topic, msg hexutil.Bytes) error {
+ return pssapi.Pss.SendRaw(PssAddress(addr), topic, msg[:])
+}
+
func (pssapi *API) GetPeerTopics(pubkeyhex string) ([]Topic, error) {
topics, _, err := pssapi.Pss.GetPublickeyPeers(pubkeyhex)
return topics, err
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client.go
index d541081d..5ee387aa 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client.go
@@ -236,7 +236,7 @@ func (c *Client) RunProtocol(ctx context.Context, proto *p2p.Protocol) error {
topichex := topicobj.String()
msgC := make(chan pss.APIMsg)
c.peerPool[topicobj] = make(map[string]*pssRPCRW)
- sub, err := c.rpc.Subscribe(ctx, "pss", msgC, "receive", topichex)
+ sub, err := c.rpc.Subscribe(ctx, "pss", msgC, "receive", topichex, false, false)
if err != nil {
return fmt.Errorf("pss event subscription failed: %v", err)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go
index e3ead77d..5486abaf 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go
@@ -486,7 +486,7 @@ func (api *HandshakeAPI) Handshake(pubkeyid string, topic Topic, sync bool, flus
// Activate handshake functionality on a topic
func (api *HandshakeAPI) AddHandshake(topic Topic) error {
- api.ctrl.deregisterFuncs[topic] = api.ctrl.pss.Register(&topic, api.ctrl.handler)
+ api.ctrl.deregisterFuncs[topic] = api.ctrl.pss.Register(&topic, NewHandler(api.ctrl.handler))
return nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go
index 3731fb9d..d3c89058 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go
@@ -113,7 +113,7 @@ func NewController(ps *pss.Pss) *Controller {
notifiers: make(map[string]*notifier),
subscriptions: make(map[string]*subscription),
}
- ctrl.pss.Register(&controlTopic, ctrl.Handler)
+ ctrl.pss.Register(&controlTopic, pss.NewHandler(ctrl.Handler))
return ctrl
}
@@ -336,7 +336,7 @@ func (c *Controller) handleNotifyWithKeyMsg(msg *Msg) error {
// \TODO keep track of and add actual address
updaterAddr := pss.PssAddress([]byte{})
c.pss.SetSymmetricKey(symkey, topic, &updaterAddr, true)
- c.pss.Register(&topic, c.Handler)
+ c.pss.Register(&topic, pss.NewHandler(c.Handler))
return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload[:len(msg.Payload)-symKeyLength])
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify_test.go
index d4d383a6..6100195b 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify_test.go
@@ -121,7 +121,7 @@ func TestStart(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
defer cancel()
rmsgC := make(chan *pss.APIMsg)
- rightSub, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", controlTopic)
+ rightSub, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", controlTopic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -174,7 +174,7 @@ func TestStart(t *testing.T) {
t.Fatalf("expected payload length %d, have %d", len(updateMsg)+symKeyLength, len(dMsg.Payload))
}
- rightSubUpdate, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", rsrcTopic)
+ rightSubUpdate, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", rsrcTopic, false, false)
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/protocol_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/protocol_test.go
index 4ef3e90a..520c48a2 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/protocol_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/protocol_test.go
@@ -92,7 +92,7 @@ func testProtocol(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -100,7 +100,7 @@ func testProtocol(t *testing.T) {
rmsgC := make(chan APIMsg)
rctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -130,6 +130,7 @@ func testProtocol(t *testing.T) {
log.Debug("lnode ok")
case cerr := <-lctx.Done():
t.Fatalf("test message timed out: %v", cerr)
+ return
}
select {
case <-rmsgC:
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go
index e1e24e1f..d0986d28 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go
@@ -23,11 +23,13 @@ import (
"crypto/rand"
"errors"
"fmt"
+ "hash"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -136,10 +138,10 @@ type Pss struct {
symKeyDecryptCacheCapacity int // max amount of symkeys to keep.
// message handling
- handlers map[Topic]map[*Handler]bool // topic and version based pss payload handlers. See pss.Handle()
- handlersMu sync.RWMutex
- allowRaw bool
- hashPool sync.Pool
+ handlers map[Topic]map[*handler]bool // topic and version based pss payload handlers. See pss.Handle()
+ handlersMu sync.RWMutex
+ hashPool sync.Pool
+ topicHandlerCaps map[Topic]*handlerCaps // caches capabilities of each topic's handlers (see handlerCap* consts in types.go)
// process
quitC chan struct{}
@@ -180,11 +182,12 @@ func NewPss(k *network.Kademlia, params *PssParams) (*Pss, error) {
symKeyDecryptCache: make([]*string, params.SymKeyCacheCapacity),
symKeyDecryptCacheCapacity: params.SymKeyCacheCapacity,
- handlers: make(map[Topic]map[*Handler]bool),
- allowRaw: params.AllowRaw,
+ handlers: make(map[Topic]map[*handler]bool),
+ topicHandlerCaps: make(map[Topic]*handlerCaps),
+
hashPool: sync.Pool{
New: func() interface{} {
- return storage.MakeHashFunc(storage.DefaultHash)()
+ return sha3.NewKeccak256()
},
},
}
@@ -313,30 +316,54 @@ func (p *Pss) PublicKey() *ecdsa.PublicKey {
//
// Returns a deregister function which needs to be called to
// deregister the handler,
-func (p *Pss) Register(topic *Topic, handler Handler) func() {
+func (p *Pss) Register(topic *Topic, hndlr *handler) func() {
p.handlersMu.Lock()
defer p.handlersMu.Unlock()
handlers := p.handlers[*topic]
if handlers == nil {
- handlers = make(map[*Handler]bool)
+ handlers = make(map[*handler]bool)
p.handlers[*topic] = handlers
+ log.Debug("registered handler", "caps", hndlr.caps)
}
- handlers[&handler] = true
- return func() { p.deregister(topic, &handler) }
+ if hndlr.caps == nil {
+ hndlr.caps = &handlerCaps{}
+ }
+ handlers[hndlr] = true
+ if _, ok := p.topicHandlerCaps[*topic]; !ok {
+ p.topicHandlerCaps[*topic] = &handlerCaps{}
+ }
+ if hndlr.caps.raw {
+ p.topicHandlerCaps[*topic].raw = true
+ }
+ if hndlr.caps.prox {
+ p.topicHandlerCaps[*topic].prox = true
+ }
+ return func() { p.deregister(topic, hndlr) }
}
-func (p *Pss) deregister(topic *Topic, h *Handler) {
+func (p *Pss) deregister(topic *Topic, hndlr *handler) {
p.handlersMu.Lock()
defer p.handlersMu.Unlock()
handlers := p.handlers[*topic]
- if len(handlers) == 1 {
+ if len(handlers) > 1 {
delete(p.handlers, *topic)
+ // topic caps might have changed now that a handler is gone
+ caps := &handlerCaps{}
+ for h := range handlers {
+ if h.caps.raw {
+ caps.raw = true
+ }
+ if h.caps.prox {
+ caps.prox = true
+ }
+ }
+ p.topicHandlerCaps[*topic] = caps
return
}
- delete(handlers, h)
+ delete(handlers, hndlr)
}
// get all registered handlers for respective topics
-func (p *Pss) getHandlers(topic Topic) map[*Handler]bool {
+func (p *Pss) getHandlers(topic Topic) map[*handler]bool {
p.handlersMu.RLock()
defer p.handlersMu.RUnlock()
return p.handlers[topic]
@@ -348,12 +375,11 @@ func (p *Pss) getHandlers(topic Topic) map[*Handler]bool {
// Only passes error to pss protocol handler if payload is not valid pssmsg
func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
metrics.GetOrRegisterCounter("pss.handlepssmsg", nil).Inc(1)
-
pssmsg, ok := msg.(*PssMsg)
-
if !ok {
return fmt.Errorf("invalid message type. Expected *PssMsg, got %T ", msg)
}
+ log.Trace("handler", "self", label(p.Kademlia.BaseAddr()), "topic", label(pssmsg.Payload.Topic[:]))
if int64(pssmsg.Expire) < time.Now().Unix() {
metrics.GetOrRegisterCounter("pss.expire", nil).Inc(1)
log.Warn("pss filtered expired message", "from", common.ToHex(p.Kademlia.BaseAddr()), "to", common.ToHex(pssmsg.To))
@@ -365,13 +391,34 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
}
p.addFwdCache(pssmsg)
- if !p.isSelfPossibleRecipient(pssmsg) {
- log.Trace("pss was for someone else :'( ... forwarding", "pss", common.ToHex(p.BaseAddr()))
+ psstopic := Topic(pssmsg.Payload.Topic)
+
+ // raw is simplest handler contingency to check, so check that first
+ var isRaw bool
+ if pssmsg.isRaw() {
+ if !p.topicHandlerCaps[psstopic].raw {
+ log.Debug("No handler for raw message", "topic", psstopic)
+ return nil
+ }
+ isRaw = true
+ }
+
+ // check if we can be recipient:
+ // - no prox handler on message and partial address matches
+ // - prox handler on message and we are in prox regardless of partial address match
+ // store this result so we don't calculate again on every handler
+ var isProx bool
+ if _, ok := p.topicHandlerCaps[psstopic]; ok {
+ isProx = p.topicHandlerCaps[psstopic].prox
+ }
+ isRecipient := p.isSelfPossibleRecipient(pssmsg, isProx)
+ if !isRecipient {
+ log.Trace("pss was for someone else :'( ... forwarding", "pss", common.ToHex(p.BaseAddr()), "prox", isProx)
return p.enqueue(pssmsg)
}
- log.Trace("pss for us, yay! ... let's process!", "pss", common.ToHex(p.BaseAddr()))
- if err := p.process(pssmsg); err != nil {
+ log.Trace("pss for us, yay! ... let's process!", "pss", common.ToHex(p.BaseAddr()), "prox", isProx, "raw", isRaw, "topic", label(pssmsg.Payload.Topic[:]))
+ if err := p.process(pssmsg, isRaw, isProx); err != nil {
qerr := p.enqueue(pssmsg)
if qerr != nil {
return fmt.Errorf("process fail: processerr %v, queueerr: %v", err, qerr)
@@ -384,7 +431,7 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
// Entry point to processing a message for which the current node can be the intended recipient.
// Attempts symmetric and asymmetric decryption with stored keys.
// Dispatches message to all handlers matching the message topic
-func (p *Pss) process(pssmsg *PssMsg) error {
+func (p *Pss) process(pssmsg *PssMsg, raw bool, prox bool) error {
metrics.GetOrRegisterCounter("pss.process", nil).Inc(1)
var err error
@@ -397,10 +444,8 @@ func (p *Pss) process(pssmsg *PssMsg) error {
envelope := pssmsg.Payload
psstopic := Topic(envelope.Topic)
- if pssmsg.isRaw() {
- if !p.allowRaw {
- return errors.New("raw message support disabled")
- }
+
+ if raw {
payload = pssmsg.Payload.Data
} else {
if pssmsg.isSym() {
@@ -422,19 +467,27 @@ func (p *Pss) process(pssmsg *PssMsg) error {
return err
}
}
- p.executeHandlers(psstopic, payload, from, asymmetric, keyid)
+ p.executeHandlers(psstopic, payload, from, raw, prox, asymmetric, keyid)
return nil
}
-func (p *Pss) executeHandlers(topic Topic, payload []byte, from *PssAddress, asymmetric bool, keyid string) {
+func (p *Pss) executeHandlers(topic Topic, payload []byte, from *PssAddress, raw bool, prox bool, asymmetric bool, keyid string) {
handlers := p.getHandlers(topic)
peer := p2p.NewPeer(enode.ID{}, fmt.Sprintf("%x", from), []p2p.Cap{})
- for f := range handlers {
- err := (*f)(payload, peer, asymmetric, keyid)
+ for h := range handlers {
+ if !h.caps.raw && raw {
+ log.Warn("norawhandler")
+ continue
+ }
+ if !h.caps.prox && prox {
+ log.Warn("noproxhandler")
+ continue
+ }
+ err := (h.f)(payload, peer, asymmetric, keyid)
if err != nil {
- log.Warn("Pss handler %p failed: %v", f, err)
+ log.Warn("Pss handler failed", "err", err)
}
}
}
@@ -445,9 +498,23 @@ func (p *Pss) isSelfRecipient(msg *PssMsg) bool {
}
// test match of leftmost bytes in given message to node's Kademlia address
-func (p *Pss) isSelfPossibleRecipient(msg *PssMsg) bool {
+func (p *Pss) isSelfPossibleRecipient(msg *PssMsg, prox bool) bool {
local := p.Kademlia.BaseAddr()
- return bytes.Equal(msg.To, local[:len(msg.To)])
+
+ // if a partial address matches we are possible recipient regardless of prox
+ // if not and prox is not set, we are surely not
+ if bytes.Equal(msg.To, local[:len(msg.To)]) {
+
+ return true
+ } else if !prox {
+ return false
+ }
+
+ depth := p.Kademlia.NeighbourhoodDepth()
+ po, _ := p.Kademlia.Pof(p.Kademlia.BaseAddr(), msg.To, 0)
+ log.Trace("selfpossible", "po", po, "depth", depth)
+
+ return depth <= po
}
/////////////////////////////////////////////////////////////////////
@@ -684,9 +751,6 @@ func (p *Pss) enqueue(msg *PssMsg) error {
//
// Will fail if raw messages are disallowed
func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error {
- if !p.allowRaw {
- return errors.New("Raw messages not enabled")
- }
pssMsgParams := &msgParams{
raw: true,
}
@@ -699,7 +763,17 @@ func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error {
pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix())
pssMsg.Payload = payload
p.addFwdCache(pssMsg)
- return p.enqueue(pssMsg)
+ err := p.enqueue(pssMsg)
+ if err != nil {
+ return err
+ }
+
+ // if we have a proxhandler on this topic
+ // also deliver message to ourselves
+ if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox {
+ return p.process(pssMsg, true, true)
+ }
+ return nil
}
// Send a message using symmetric encryption
@@ -800,7 +874,16 @@ func (p *Pss) send(to []byte, topic Topic, msg []byte, asymmetric bool, key []by
pssMsg.To = to
pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix())
pssMsg.Payload = envelope
- return p.enqueue(pssMsg)
+ err = p.enqueue(pssMsg)
+ if err != nil {
+ return err
+ }
+ if _, ok := p.topicHandlerCaps[topic]; ok {
+ if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox {
+ return p.process(pssMsg, true, true)
+ }
+ }
+ return nil
}
// Forwards a pss message to the peer(s) closest to the to recipient address in the PssMsg struct
@@ -895,6 +978,10 @@ func (p *Pss) cleanFwdCache() {
}
}
+func label(b []byte) string {
+ return fmt.Sprintf("%04x", b[:2])
+}
+
// add a message to the cache
func (p *Pss) addFwdCache(msg *PssMsg) error {
metrics.GetOrRegisterCounter("pss.addfwdcache", nil).Inc(1)
@@ -934,10 +1021,14 @@ func (p *Pss) checkFwdCache(msg *PssMsg) bool {
// Digest of message
func (p *Pss) digest(msg *PssMsg) pssDigest {
- hasher := p.hashPool.Get().(storage.SwarmHash)
+ return p.digestBytes(msg.serialize())
+}
+
+func (p *Pss) digestBytes(msg []byte) pssDigest {
+ hasher := p.hashPool.Get().(hash.Hash)
defer p.hashPool.Put(hasher)
hasher.Reset()
- hasher.Write(msg.serialize())
+ hasher.Write(msg)
digest := pssDigest{}
key := hasher.Sum(nil)
copy(digest[:], key[:digestLength])
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss_test.go
index 66a90be6..72f62acd 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss_test.go
@@ -48,20 +48,23 @@ import (
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/network"
+ "github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
)
var (
- initOnce = sync.Once{}
- debugdebugflag = flag.Bool("vv", false, "veryverbose")
- debugflag = flag.Bool("v", false, "verbose")
- longrunning = flag.Bool("longrunning", false, "do run long-running tests")
- w *whisper.Whisper
- wapi *whisper.PublicWhisperAPI
- psslogmain log.Logger
- pssprotocols map[string]*protoCtrl
- useHandshake bool
+ initOnce = sync.Once{}
+ loglevel = flag.Int("loglevel", 2, "logging verbosity")
+ longrunning = flag.Bool("longrunning", false, "do run long-running tests")
+ w *whisper.Whisper
+ wapi *whisper.PublicWhisperAPI
+ psslogmain log.Logger
+ pssprotocols map[string]*protoCtrl
+ useHandshake bool
+ noopHandlerFunc = func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ return nil
+ }
)
func init() {
@@ -75,16 +78,9 @@ func init() {
func initTest() {
initOnce.Do(
func() {
- loglevel := log.LvlInfo
- if *debugflag {
- loglevel = log.LvlDebug
- } else if *debugdebugflag {
- loglevel = log.LvlTrace
- }
-
psslogmain = log.New("psslog", "*")
hs := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
- hf := log.LvlFilterHandler(loglevel, hs)
+ hf := log.LvlFilterHandler(log.Lvl(*loglevel), hs)
h := log.CallerFileHandler(hf)
log.Root().SetHandler(h)
@@ -280,15 +276,14 @@ func TestAddressMatch(t *testing.T) {
}
pssmsg := &PssMsg{
- To: remoteaddr,
- Payload: &whisper.Envelope{},
+ To: remoteaddr,
}
// differ from first byte
if ps.isSelfRecipient(pssmsg) {
t.Fatalf("isSelfRecipient true but %x != %x", remoteaddr, localaddr)
}
- if ps.isSelfPossibleRecipient(pssmsg) {
+ if ps.isSelfPossibleRecipient(pssmsg, false) {
t.Fatalf("isSelfPossibleRecipient true but %x != %x", remoteaddr[:8], localaddr[:8])
}
@@ -297,7 +292,7 @@ func TestAddressMatch(t *testing.T) {
if ps.isSelfRecipient(pssmsg) {
t.Fatalf("isSelfRecipient true but %x != %x", remoteaddr, localaddr)
}
- if !ps.isSelfPossibleRecipient(pssmsg) {
+ if !ps.isSelfPossibleRecipient(pssmsg, false) {
t.Fatalf("isSelfPossibleRecipient false but %x == %x", remoteaddr[:8], localaddr[:8])
}
@@ -306,13 +301,342 @@ func TestAddressMatch(t *testing.T) {
if !ps.isSelfRecipient(pssmsg) {
t.Fatalf("isSelfRecipient false but %x == %x", remoteaddr, localaddr)
}
- if !ps.isSelfPossibleRecipient(pssmsg) {
+ if !ps.isSelfPossibleRecipient(pssmsg, false) {
t.Fatalf("isSelfPossibleRecipient false but %x == %x", remoteaddr[:8], localaddr[:8])
}
+
+}
+
+// test that message is handled by sender if a prox handler exists and sender is in prox of message
+func TestProxShortCircuit(t *testing.T) {
+
+ // sender node address
+ localAddr := network.RandomAddr().Over()
+ localPotAddr := pot.NewAddressFromBytes(localAddr)
+
+ // set up kademlia
+ kadParams := network.NewKadParams()
+ kad := network.NewKademlia(localAddr, kadParams)
+ peerCount := kad.MinBinSize + 1
+
+ // set up pss
+ privKey, err := crypto.GenerateKey()
+ pssp := NewPssParams().WithPrivateKey(privKey)
+ ps, err := NewPss(kad, pssp)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ // create kademlia peers, so we have peers both inside and outside minproxlimit
+ var peers []*network.Peer
+ proxMessageAddress := pot.RandomAddressAt(localPotAddr, peerCount).Bytes()
+ distantMessageAddress := pot.RandomAddressAt(localPotAddr, 0).Bytes()
+
+ for i := 0; i < peerCount; i++ {
+ rw := &p2p.MsgPipeRW{}
+ ptpPeer := p2p.NewPeer(enode.ID{}, "wanna be with me? [ ] yes [ ] no", []p2p.Cap{})
+ protoPeer := protocols.NewPeer(ptpPeer, rw, &protocols.Spec{})
+ peerAddr := pot.RandomAddressAt(localPotAddr, i)
+ bzzPeer := &network.BzzPeer{
+ Peer: protoPeer,
+ BzzAddr: &network.BzzAddr{
+ OAddr: peerAddr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", peerAddr[:])),
+ },
+ }
+ peer := network.NewPeer(bzzPeer, kad)
+ kad.On(peer)
+ peers = append(peers, peer)
+ }
+
+ // register it marking prox capability
+ delivered := make(chan struct{})
+ rawHandlerFunc := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ log.Trace("in allowraw handler")
+ delivered <- struct{}{}
+ return nil
+ }
+ topic := BytesToTopic([]byte{0x2a})
+ hndlrProxDereg := ps.Register(&topic, &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ prox: true,
+ },
+ })
+ defer hndlrProxDereg()
+
+ // send message too far away for sender to be in prox
+ // reception of this message should time out
+ errC := make(chan error)
+ go func() {
+ err := ps.SendRaw(distantMessageAddress, topic, []byte("foo"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+
+ ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ t.Fatal("raw distant message delivered")
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ }
+
+ // send message that should be within sender prox
+ // this message should be delivered
+ go func() {
+ err := ps.SendRaw(proxMessageAddress, topic, []byte("bar"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal("raw timeout")
+ }
+
+ // try the same prox message with sym and asym send
+ proxAddrPss := PssAddress(proxMessageAddress)
+ symKeyId, err := ps.GenerateSymmetricKey(topic, &proxAddrPss, true)
+ go func() {
+ err := ps.SendSym(symKeyId, topic, []byte("baz"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal("sym timeout")
+ }
+
+ err = ps.SetPeerPublicKey(&privKey.PublicKey, topic, &proxAddrPss)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pubKeyId := hexutil.Encode(crypto.FromECDSAPub(&privKey.PublicKey))
+ go func() {
+ err := ps.SendAsym(pubKeyId, topic, []byte("xyzzy"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal("asym timeout")
+ }
}
-//
-func TestHandlerConditions(t *testing.T) {
+// verify that node can be set as recipient regardless of explicit message address match if minimum one handler of a topic is explicitly set to allow it
+// note that in these tests we use the raw capability on handlers for convenience
+func TestAddressMatchProx(t *testing.T) {
+
+ // recipient node address
+ localAddr := network.RandomAddr().Over()
+ localPotAddr := pot.NewAddressFromBytes(localAddr)
+
+ // set up kademlia
+ kadparams := network.NewKadParams()
+ kad := network.NewKademlia(localAddr, kadparams)
+ nnPeerCount := kad.MinBinSize
+ peerCount := nnPeerCount + 2
+
+ // set up pss
+ privKey, err := crypto.GenerateKey()
+ pssp := NewPssParams().WithPrivateKey(privKey)
+ ps, err := NewPss(kad, pssp)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ // create kademlia peers, so we have peers both inside and outside minproxlimit
+ var peers []*network.Peer
+ for i := 0; i < peerCount; i++ {
+ rw := &p2p.MsgPipeRW{}
+ ptpPeer := p2p.NewPeer(enode.ID{}, "362436 call me anytime", []p2p.Cap{})
+ protoPeer := protocols.NewPeer(ptpPeer, rw, &protocols.Spec{})
+ peerAddr := pot.RandomAddressAt(localPotAddr, i)
+ bzzPeer := &network.BzzPeer{
+ Peer: protoPeer,
+ BzzAddr: &network.BzzAddr{
+ OAddr: peerAddr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", peerAddr[:])),
+ },
+ }
+ peer := network.NewPeer(bzzPeer, kad)
+ kad.On(peer)
+ peers = append(peers, peer)
+ }
+
+ // TODO: create a test in the network package to make a table with n peers where n-m are proxpeers
+ // meanwhile test regression for kademlia since we are compiling the test parameters from different packages
+ var proxes int
+ var conns int
+ kad.EachConn(nil, peerCount, func(p *network.Peer, po int, prox bool) bool {
+ conns++
+ if prox {
+ proxes++
+ }
+ log.Trace("kadconn", "po", po, "peer", p, "prox", prox)
+ return true
+ })
+ if proxes != nnPeerCount {
+ t.Fatalf("expected %d proxpeers, have %d", nnPeerCount, proxes)
+ } else if conns != peerCount {
+ t.Fatalf("expected %d peers total, have %d", peerCount, proxes)
+ }
+
+ // remote address distances from localAddr to try and the expected outcomes if we use prox handler
+ remoteDistances := []int{
+ 255,
+ nnPeerCount + 1,
+ nnPeerCount,
+ nnPeerCount - 1,
+ 0,
+ }
+ expects := []bool{
+ true,
+ true,
+ true,
+ false,
+ false,
+ }
+
+ // first the unit test on the method that calculates possible receipient using prox
+ for i, distance := range remoteDistances {
+ pssMsg := newPssMsg(&msgParams{})
+ pssMsg.To = make([]byte, len(localAddr))
+ copy(pssMsg.To, localAddr)
+ var byteIdx = distance / 8
+ pssMsg.To[byteIdx] ^= 1 << uint(7-(distance%8))
+ log.Trace(fmt.Sprintf("addrmatch %v", bytes.Equal(pssMsg.To, localAddr)))
+ if ps.isSelfPossibleRecipient(pssMsg, true) != expects[i] {
+ t.Fatalf("expected distance %d to be %v", distance, expects[i])
+ }
+ }
+
+ // we move up to higher level and test the actual message handler
+ // for each distance check if we are possible recipient when prox variant is used is set
+
+ // this handler will increment a counter for every message that gets passed to the handler
+ var receives int
+ rawHandlerFunc := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ log.Trace("in allowraw handler")
+ receives++
+ return nil
+ }
+
+ // register it marking prox capability
+ topic := BytesToTopic([]byte{0x2a})
+ hndlrProxDereg := ps.Register(&topic, &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ prox: true,
+ },
+ })
+
+ // test the distances
+ var prevReceive int
+ for i, distance := range remoteDistances {
+ remotePotAddr := pot.RandomAddressAt(localPotAddr, distance)
+ remoteAddr := remotePotAddr.Bytes()
+
+ var data [32]byte
+ rand.Read(data[:])
+ pssMsg := newPssMsg(&msgParams{raw: true})
+ pssMsg.To = remoteAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ Data: data[:],
+ }
+
+ log.Trace("withprox addrs", "local", localAddr, "remote", remoteAddr)
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if (!expects[i] && prevReceive != receives) || (expects[i] && prevReceive == receives) {
+ t.Fatalf("expected distance %d recipient %v when prox is set for handler", distance, expects[i])
+ }
+ prevReceive = receives
+ }
+
+ // now add a non prox-capable handler and test
+ ps.Register(&topic, &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ },
+ })
+ receives = 0
+ prevReceive = 0
+ for i, distance := range remoteDistances {
+ remotePotAddr := pot.RandomAddressAt(localPotAddr, distance)
+ remoteAddr := remotePotAddr.Bytes()
+
+ var data [32]byte
+ rand.Read(data[:])
+ pssMsg := newPssMsg(&msgParams{raw: true})
+ pssMsg.To = remoteAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ Data: data[:],
+ }
+
+ log.Trace("withprox addrs", "local", localAddr, "remote", remoteAddr)
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if (!expects[i] && prevReceive != receives) || (expects[i] && prevReceive == receives) {
+ t.Fatalf("expected distance %d recipient %v when prox is set for handler", distance, expects[i])
+ }
+ prevReceive = receives
+ }
+
+ // now deregister the prox capable handler, now none of the messages will be handled
+ hndlrProxDereg()
+ receives = 0
+
+ for _, distance := range remoteDistances {
+ remotePotAddr := pot.RandomAddressAt(localPotAddr, distance)
+ remoteAddr := remotePotAddr.Bytes()
+
+ pssMsg := newPssMsg(&msgParams{raw: true})
+ pssMsg.To = remoteAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ Data: []byte(remotePotAddr.String()),
+ }
+
+ log.Trace("noprox addrs", "local", localAddr, "remote", remoteAddr)
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives != 0 {
+ t.Fatalf("expected distance %d to not be recipient when prox is not set for handler", distance)
+ }
+
+ }
+}
+
+// verify that message queueing happens when it should, and that expired and corrupt messages are dropped
+func TestMessageProcessing(t *testing.T) {
t.Skip("Disabled due to probable faulty logic for outbox expectations")
// setup
@@ -326,13 +650,12 @@ func TestHandlerConditions(t *testing.T) {
ps := newTestPss(privkey, network.NewKademlia(addr, network.NewKadParams()), NewPssParams())
// message should pass
- msg := &PssMsg{
- To: addr,
- Expire: uint32(time.Now().Add(time.Second * 60).Unix()),
- Payload: &whisper.Envelope{
- Topic: [4]byte{},
- Data: []byte{0x66, 0x6f, 0x6f},
- },
+ msg := newPssMsg(&msgParams{})
+ msg.To = addr
+ msg.Expire = uint32(time.Now().Add(time.Second * 60).Unix())
+ msg.Payload = &whisper.Envelope{
+ Topic: [4]byte{},
+ Data: []byte{0x66, 0x6f, 0x6f},
}
if err := ps.handlePssMsg(context.TODO(), msg); err != nil {
t.Fatal(err.Error())
@@ -498,6 +821,7 @@ func TestKeys(t *testing.T) {
}
}
+// check that we can retrieve previously added public key entires per topic and peer
func TestGetPublickeyEntries(t *testing.T) {
privkey, err := crypto.GenerateKey()
@@ -557,7 +881,7 @@ OUTER:
}
// forwarding should skip peers that do not have matching pss capabilities
-func TestMismatch(t *testing.T) {
+func TestPeerCapabilityMismatch(t *testing.T) {
// create privkey for forwarder node
privkey, err := crypto.GenerateKey()
@@ -615,6 +939,76 @@ func TestMismatch(t *testing.T) {
}
+// verifies that message handlers for raw messages only are invoked when minimum one handler for the topic exists in which raw messages are explicitly allowed
+func TestRawAllow(t *testing.T) {
+
+ // set up pss like so many times before
+ privKey, err := crypto.GenerateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+ baseAddr := network.RandomAddr()
+ kad := network.NewKademlia((baseAddr).Over(), network.NewKadParams())
+ ps := newTestPss(privKey, kad, nil)
+ topic := BytesToTopic([]byte{0x2a})
+
+ // create handler innards that increments every time a message hits it
+ var receives int
+ rawHandlerFunc := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ log.Trace("in allowraw handler")
+ receives++
+ return nil
+ }
+
+ // wrap this handler function with a handler without raw capability and register it
+ hndlrNoRaw := &handler{
+ f: rawHandlerFunc,
+ }
+ ps.Register(&topic, hndlrNoRaw)
+
+ // test it with a raw message, should be poo-poo
+ pssMsg := newPssMsg(&msgParams{
+ raw: true,
+ })
+ pssMsg.To = baseAddr.OAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ }
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives > 0 {
+ t.Fatalf("Expected handler not to be executed with raw cap off")
+ }
+
+ // now wrap the same handler function with raw capabilities and register it
+ hndlrRaw := &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ },
+ }
+ deregRawHandler := ps.Register(&topic, hndlrRaw)
+
+ // should work now
+ pssMsg.Payload.Data = []byte("Raw Deal")
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives == 0 {
+ t.Fatalf("Expected handler to be executed with raw cap on")
+ }
+
+ // now deregister the raw capable handler
+ prevReceives := receives
+ deregRawHandler()
+
+ // check that raw messages fail again
+ pssMsg.Payload.Data = []byte("Raw Trump")
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives != prevReceives {
+ t.Fatalf("Expected handler not to be executed when raw handler is retracted")
+ }
+}
+
+// verifies that nodes can send and receive raw (verbatim) messages
func TestSendRaw(t *testing.T) {
t.Run("32", testSendRaw)
t.Run("8", testSendRaw)
@@ -658,19 +1052,19 @@ func testSendRaw(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, lcancel := context.WithTimeout(context.Background(), time.Second*10)
defer lcancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, true, false)
log.Trace("lsub", "id", lsub)
defer lsub.Unsubscribe()
rmsgC := make(chan APIMsg)
rctx, rcancel := context.WithTimeout(context.Background(), time.Second*10)
defer rcancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, true, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
// send and verify delivery
lmsg := []byte("plugh")
- err = clients[1].Call(nil, "pss_sendRaw", loaddrhex, topic, lmsg)
+ err = clients[1].Call(nil, "pss_sendRaw", loaddrhex, topic, hexutil.Encode(lmsg))
if err != nil {
t.Fatal(err)
}
@@ -683,7 +1077,7 @@ func testSendRaw(t *testing.T) {
t.Fatalf("test message (left) timed out: %v", cerr)
}
rmsg := []byte("xyzzy")
- err = clients[0].Call(nil, "pss_sendRaw", roaddrhex, topic, rmsg)
+ err = clients[0].Call(nil, "pss_sendRaw", roaddrhex, topic, hexutil.Encode(rmsg))
if err != nil {
t.Fatal(err)
}
@@ -757,13 +1151,13 @@ func testSendSym(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, lcancel := context.WithTimeout(context.Background(), time.Second*10)
defer lcancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, false, false)
log.Trace("lsub", "id", lsub)
defer lsub.Unsubscribe()
rmsgC := make(chan APIMsg)
rctx, rcancel := context.WithTimeout(context.Background(), time.Second*10)
defer rcancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
@@ -872,13 +1266,13 @@ func testSendAsym(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, lcancel := context.WithTimeout(context.Background(), time.Second*10)
defer lcancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, false, false)
log.Trace("lsub", "id", lsub)
defer lsub.Unsubscribe()
rmsgC := make(chan APIMsg)
rctx, rcancel := context.WithTimeout(context.Background(), time.Second*10)
defer rcancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
@@ -1037,7 +1431,7 @@ func testNetwork(t *testing.T) {
msgC := make(chan APIMsg)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
- sub, err := rpcclient.Subscribe(ctx, "pss", msgC, "receive", topic)
+ sub, err := rpcclient.Subscribe(ctx, "pss", msgC, "receive", topic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -1209,7 +1603,7 @@ func TestDeduplication(t *testing.T) {
rmsgC := make(chan APIMsg)
rctx, cancel := context.WithTimeout(context.Background(), time.Second*1)
defer cancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
@@ -1392,8 +1786,8 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) {
if err != nil {
b.Fatalf("could not generate whisper envelope: %v", err)
}
- ps.Register(&topic, func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
- return nil
+ ps.Register(&topic, &handler{
+ f: noopHandlerFunc,
})
pssmsgs = append(pssmsgs, &PssMsg{
To: to,
@@ -1402,7 +1796,7 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) {
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
- if err := ps.process(pssmsgs[len(pssmsgs)-(i%len(pssmsgs))-1]); err != nil {
+ if err := ps.process(pssmsgs[len(pssmsgs)-(i%len(pssmsgs))-1], false, false); err != nil {
b.Fatalf("pss processing failed: %v", err)
}
}
@@ -1476,15 +1870,15 @@ func benchmarkSymkeyBruteforceSameaddr(b *testing.B) {
if err != nil {
b.Fatalf("could not generate whisper envelope: %v", err)
}
- ps.Register(&topic, func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
- return nil
+ ps.Register(&topic, &handler{
+ f: noopHandlerFunc,
})
pssmsg := &PssMsg{
To: addr[len(addr)-1][:],
Payload: env,
}
for i := 0; i < b.N; i++ {
- if err := ps.process(pssmsg); err != nil {
+ if err := ps.process(pssmsg, false, false); err != nil {
b.Fatalf("pss processing failed: %v", err)
}
}
@@ -1581,7 +1975,12 @@ func newServices(allowRaw bool) adapters.Services {
if useHandshake {
SetHandshakeController(ps, NewHandshakeParams())
}
- ps.Register(&PingTopic, pp.Handle)
+ ps.Register(&PingTopic, &handler{
+ f: pp.Handle,
+ caps: &handlerCaps{
+ raw: true,
+ },
+ })
ps.addAPI(rpc.API{
Namespace: "psstest",
Version: "0.3",
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/types.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/types.go
index 56c2c51d..ba963067 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/types.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/types.go
@@ -159,9 +159,39 @@ func (msg *PssMsg) String() string {
}
// Signature for a message handler function for a PssMsg
-//
// Implementations of this type are passed to Pss.Register together with a topic,
-type Handler func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error
+type HandlerFunc func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error
+
+type handlerCaps struct {
+ raw bool
+ prox bool
+}
+
+// Handler defines code to be executed upon reception of content.
+type handler struct {
+ f HandlerFunc
+ caps *handlerCaps
+}
+
+// NewHandler returns a new message handler
+func NewHandler(f HandlerFunc) *handler {
+ return &handler{
+ f: f,
+ caps: &handlerCaps{},
+ }
+}
+
+// WithRaw is a chainable method that allows raw messages to be handled.
+func (h *handler) WithRaw() *handler {
+ h.caps.raw = true
+ return h
+}
+
+// WithProxBin is a chainable method that allows sending messages with full addresses to neighbourhoods using the kademlia depth as reference
+func (h *handler) WithProxBin() *handler {
+ h.caps.prox = true
+ return h
+}
// the stateStore handles saving and loading PSS peers and their corresponding keys
// it is currently unimplemented
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go
new file mode 100644
index 00000000..e128b8cb
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go
@@ -0,0 +1,130 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package shed provides a simple abstraction components to compose
+// more complex operations on storage data organized in fields and indexes.
+//
+// Only type which holds logical information about swarm storage chunks data
+// and metadata is IndexItem. This part is not generalized mostly for
+// performance reasons.
+package shed
+
+import (
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+// The limit for LevelDB OpenFilesCacheCapacity.
+const openFileLimit = 128
+
+// DB provides abstractions over LevelDB in order to
+// implement complex structures using fields and ordered indexes.
+// It provides a schema functionality to store fields and indexes
+// information about naming and types.
+type DB struct {
+ ldb *leveldb.DB
+}
+
+// NewDB constructs a new DB and validates the schema
+// if it exists in database on the given path.
+func NewDB(path string) (db *DB, err error) {
+ ldb, err := leveldb.OpenFile(path, &opt.Options{
+ OpenFilesCacheCapacity: openFileLimit,
+ })
+ if err != nil {
+ return nil, err
+ }
+ db = &DB{
+ ldb: ldb,
+ }
+
+ if _, err = db.getSchema(); err != nil {
+ if err == leveldb.ErrNotFound {
+ // save schema with initialized default fields
+ if err = db.putSchema(schema{
+ Fields: make(map[string]fieldSpec),
+ Indexes: make(map[byte]indexSpec),
+ }); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+ }
+ return db, nil
+}
+
+// Put wraps LevelDB Put method to increment metrics counter.
+func (db *DB) Put(key []byte, value []byte) (err error) {
+ err = db.ldb.Put(key, value, nil)
+ if err != nil {
+ metrics.GetOrRegisterCounter("DB.putFail", nil).Inc(1)
+ return err
+ }
+ metrics.GetOrRegisterCounter("DB.put", nil).Inc(1)
+ return nil
+}
+
+// Get wraps LevelDB Get method to increment metrics counter.
+func (db *DB) Get(key []byte) (value []byte, err error) {
+ value, err = db.ldb.Get(key, nil)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ metrics.GetOrRegisterCounter("DB.getNotFound", nil).Inc(1)
+ } else {
+ metrics.GetOrRegisterCounter("DB.getFail", nil).Inc(1)
+ }
+ return nil, err
+ }
+ metrics.GetOrRegisterCounter("DB.get", nil).Inc(1)
+ return value, nil
+}
+
+// Delete wraps LevelDB Delete method to increment metrics counter.
+func (db *DB) Delete(key []byte) (err error) {
+ err = db.ldb.Delete(key, nil)
+ if err != nil {
+ metrics.GetOrRegisterCounter("DB.deleteFail", nil).Inc(1)
+ return err
+ }
+ metrics.GetOrRegisterCounter("DB.delete", nil).Inc(1)
+ return nil
+}
+
+// NewIterator wraps LevelDB NewIterator method to increment metrics counter.
+func (db *DB) NewIterator() iterator.Iterator {
+ metrics.GetOrRegisterCounter("DB.newiterator", nil).Inc(1)
+
+ return db.ldb.NewIterator(nil, nil)
+}
+
+// WriteBatch wraps LevelDB Write method to increment metrics counter.
+func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) {
+ err = db.ldb.Write(batch, nil)
+ if err != nil {
+ metrics.GetOrRegisterCounter("DB.writebatchFail", nil).Inc(1)
+ return err
+ }
+ metrics.GetOrRegisterCounter("DB.writebatch", nil).Inc(1)
+ return nil
+}
+
+// Close closes LevelDB database.
+func (db *DB) Close() (err error) {
+ return db.ldb.Close()
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/db_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db_test.go
new file mode 100644
index 00000000..45325bee
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db_test.go
@@ -0,0 +1,110 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+// TestNewDB constructs a new DB
+// and validates if the schema is initialized properly.
+func TestNewDB(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ s, err := db.getSchema()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if s.Fields == nil {
+ t.Error("schema fields are empty")
+ }
+ if len(s.Fields) != 0 {
+ t.Errorf("got schema fields length %v, want %v", len(s.Fields), 0)
+ }
+ if s.Indexes == nil {
+ t.Error("schema indexes are empty")
+ }
+ if len(s.Indexes) != 0 {
+ t.Errorf("got schema indexes length %v, want %v", len(s.Indexes), 0)
+ }
+}
+
+// TestDB_persistence creates one DB, saves a field and closes that DB.
+// Then, it constructs another DB and trues to retrieve the saved value.
+func TestDB_persistence(t *testing.T) {
+ dir, err := ioutil.TempDir("", "shed-test-persistence")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ db, err := NewDB(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stringField, err := db.NewStringField("preserve-me")
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "persistent value"
+ err = stringField.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = db.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ db2, err := NewDB(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stringField2, err := db2.NewStringField("preserve-me")
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := stringField2.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+}
+
+// newTestDB is a helper function that constructs a
+// temporary database and returns a cleanup function that must
+// be called to remove the data.
+func newTestDB(t *testing.T) (db *DB, cleanupFunc func()) {
+ t.Helper()
+
+ dir, err := ioutil.TempDir("", "shed-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cleanupFunc = func() { os.RemoveAll(dir) }
+ db, err = NewDB(dir)
+ if err != nil {
+ cleanupFunc()
+ t.Fatal(err)
+ }
+ return db, cleanupFunc
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/example_store_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/example_store_test.go
new file mode 100644
index 00000000..2ed0be14
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/example_store_test.go
@@ -0,0 +1,332 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed_test
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "time"
+
+ "github.com/ethereum/go-ethereum/swarm/shed"
+ "github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Store holds fields and indexes (including their encoding functions)
+// and defines operations on them by composing data from them.
+// It implements storage.ChunkStore interface.
+// It is just an example without any support for parallel operations
+// or real world implementation.
+type Store struct {
+ db *shed.DB
+
+ // fields and indexes
+ schemaName shed.StringField
+ sizeCounter shed.Uint64Field
+ accessCounter shed.Uint64Field
+ retrievalIndex shed.Index
+ accessIndex shed.Index
+ gcIndex shed.Index
+}
+
+// New returns new Store. All fields and indexes are initialized
+// and possible conflicts with schema from existing database is checked
+// automatically.
+func New(path string) (s *Store, err error) {
+ db, err := shed.NewDB(path)
+ if err != nil {
+ return nil, err
+ }
+ s = &Store{
+ db: db,
+ }
+ // Identify current storage schema by arbitrary name.
+ s.schemaName, err = db.NewStringField("schema-name")
+ if err != nil {
+ return nil, err
+ }
+ // Global ever incrementing index of chunk accesses.
+ s.accessCounter, err = db.NewUint64Field("access-counter")
+ if err != nil {
+ return nil, err
+ }
+ // Index storing actual chunk address, data and store timestamp.
+ s.retrievalIndex, err = db.NewIndex("Address->StoreTimestamp|Data", shed.IndexFuncs{
+ EncodeKey: func(fields shed.IndexItem) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e shed.IndexItem, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: func(fields shed.IndexItem) (value []byte, err error) {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+ value = append(b, fields.Data...)
+ return value, nil
+ },
+ DecodeValue: func(value []byte) (e shed.IndexItem, err error) {
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
+ e.Data = value[8:]
+ return e, nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ // Index storing access timestamp for a particular address.
+ // It is needed in order to update gc index keys for iteration order.
+ s.accessIndex, err = db.NewIndex("Address->AccessTimestamp", shed.IndexFuncs{
+ EncodeKey: func(fields shed.IndexItem) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e shed.IndexItem, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: func(fields shed.IndexItem) (value []byte, err error) {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(fields.AccessTimestamp))
+ return b, nil
+ },
+ DecodeValue: func(value []byte) (e shed.IndexItem, err error) {
+ e.AccessTimestamp = int64(binary.BigEndian.Uint64(value))
+ return e, nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ // Index with keys ordered by access timestamp for garbage collection prioritization.
+ s.gcIndex, err = db.NewIndex("AccessTimestamp|StoredTimestamp|Address->nil", shed.IndexFuncs{
+ EncodeKey: func(fields shed.IndexItem) (key []byte, err error) {
+ b := make([]byte, 16, 16+len(fields.Address))
+ binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
+ binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
+ key = append(b, fields.Address...)
+ return key, nil
+ },
+ DecodeKey: func(key []byte) (e shed.IndexItem, err error) {
+ e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[8:16]))
+ e.Address = key[16:]
+ return e, nil
+ },
+ EncodeValue: func(fields shed.IndexItem) (value []byte, err error) {
+ return nil, nil
+ },
+ DecodeValue: func(value []byte) (e shed.IndexItem, err error) {
+ return e, nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// Put stores the chunk and sets it store timestamp.
+func (s *Store) Put(_ context.Context, ch storage.Chunk) (err error) {
+ return s.retrievalIndex.Put(shed.IndexItem{
+ Address: ch.Address(),
+ Data: ch.Data(),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ })
+}
+
+// Get retrieves a chunk with the provided address.
+// It updates access and gc indexes by removing the previous
+// items from them and adding new items as keys of index entries
+// are changed.
+func (s *Store) Get(_ context.Context, addr storage.Address) (c storage.Chunk, err error) {
+ batch := new(leveldb.Batch)
+
+ // Get the chunk data and storage timestamp.
+ item, err := s.retrievalIndex.Get(shed.IndexItem{
+ Address: addr,
+ })
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ return nil, storage.ErrChunkNotFound
+ }
+ return nil, err
+ }
+
+ // Get the chunk access timestamp.
+ accessItem, err := s.accessIndex.Get(shed.IndexItem{
+ Address: addr,
+ })
+ switch err {
+ case nil:
+ // Remove gc index entry if access timestamp is found.
+ err = s.gcIndex.DeleteInBatch(batch, shed.IndexItem{
+ Address: item.Address,
+ StoreTimestamp: accessItem.AccessTimestamp,
+ AccessTimestamp: item.StoreTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+ case leveldb.ErrNotFound:
+ // Access timestamp is not found. Do not do anything.
+ // This is the firs get request.
+ default:
+ return nil, err
+ }
+
+ // Specify new access timestamp
+ accessTimestamp := time.Now().UTC().UnixNano()
+
+ // Put new access timestamp in access index.
+ err = s.accessIndex.PutInBatch(batch, shed.IndexItem{
+ Address: addr,
+ AccessTimestamp: accessTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Put new access timestamp in gc index.
+ err = s.gcIndex.PutInBatch(batch, shed.IndexItem{
+ Address: item.Address,
+ AccessTimestamp: accessTimestamp,
+ StoreTimestamp: item.StoreTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Increment access counter.
+ // Currently this information is not used anywhere.
+ _, err = s.accessCounter.IncInBatch(batch)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the batch.
+ err = s.db.WriteBatch(batch)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the chunk.
+ return storage.NewChunk(item.Address, item.Data), nil
+}
+
+// CollectGarbage is an example of index iteration.
+// It provides no reliable garbage collection functionality.
+func (s *Store) CollectGarbage() (err error) {
+ const maxTrashSize = 100
+ maxRounds := 10 // arbitrary number, needs to be calculated
+
+ // Run a few gc rounds.
+ for roundCount := 0; roundCount < maxRounds; roundCount++ {
+ var garbageCount int
+ // New batch for a new cg round.
+ trash := new(leveldb.Batch)
+ // Iterate through all index items and break when needed.
+ err = s.gcIndex.IterateAll(func(item shed.IndexItem) (stop bool, err error) {
+ // Remove the chunk.
+ err = s.retrievalIndex.DeleteInBatch(trash, item)
+ if err != nil {
+ return false, err
+ }
+ // Remove the element in gc index.
+ err = s.gcIndex.DeleteInBatch(trash, item)
+ if err != nil {
+ return false, err
+ }
+ // Remove the relation in access index.
+ err = s.accessIndex.DeleteInBatch(trash, item)
+ if err != nil {
+ return false, err
+ }
+ garbageCount++
+ if garbageCount >= maxTrashSize {
+ return true, nil
+ }
+ return false, nil
+ })
+ if err != nil {
+ return err
+ }
+ if garbageCount == 0 {
+ return nil
+ }
+ err = s.db.WriteBatch(trash)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSchema is an example of retrieveing the most simple
+// string from a database field.
+func (s *Store) GetSchema() (name string, err error) {
+ name, err = s.schemaName.Get()
+ if err == leveldb.ErrNotFound {
+ return "", nil
+ }
+ return name, err
+}
+
+// GetSchema is an example of storing the most simple
+// string in a database field.
+func (s *Store) PutSchema(name string) (err error) {
+ return s.schemaName.Put(name)
+}
+
+// Close closes the underlying database.
+func (s *Store) Close() error {
+ return s.db.Close()
+}
+
+// Example_store constructs a simple storage implementation using shed package.
+func Example_store() {
+ dir, err := ioutil.TempDir("", "ephemeral")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ s, err := New(dir)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer s.Close()
+
+ ch := storage.GenerateRandomChunk(1024)
+ err = s.Put(context.Background(), ch)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ got, err := s.Get(context.Background(), ch.Address())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(bytes.Equal(got.Data(), ch.Data()))
+
+ //Output: true
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string.go
new file mode 100644
index 00000000..a7e8f0c7
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string.go
@@ -0,0 +1,66 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// StringField is the most simple field implementation
+// that stores an arbitrary string under a specific LevelDB key.
+type StringField struct {
+ db *DB
+ key []byte
+}
+
+// NewStringField retruns a new Instance of StringField.
+// It validates its name and type against the database schema.
+func (db *DB) NewStringField(name string) (f StringField, err error) {
+ key, err := db.schemaFieldKey(name, "string")
+ if err != nil {
+ return f, err
+ }
+ return StringField{
+ db: db,
+ key: key,
+ }, nil
+}
+
+// Get returns a string value from database.
+// If the value is not found, an empty string is returned
+// an no error.
+func (f StringField) Get() (val string, err error) {
+ b, err := f.db.Get(f.key)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ return "", nil
+ }
+ return "", err
+ }
+ return string(b), nil
+}
+
+// Put stores a string in the database.
+func (f StringField) Put(val string) (err error) {
+ return f.db.Put(f.key, []byte(val))
+}
+
+// PutInBatch stores a string in a batch that can be
+// saved later in database.
+func (f StringField) PutInBatch(batch *leveldb.Batch, val string) {
+ batch.Put(f.key, []byte(val))
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string_test.go
new file mode 100644
index 00000000..4215075b
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string_test.go
@@ -0,0 +1,110 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "testing"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// TestStringField validates put and get operations
+// of the StringField.
+func TestStringField(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ simpleString, err := db.NewStringField("simple-string")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("get empty", func(t *testing.T) {
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := ""
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+
+ t.Run("put", func(t *testing.T) {
+ want := "simple string value"
+ err = simpleString.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := "overwritten string value"
+ err = simpleString.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := "simple string batch value"
+ simpleString.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := "overwritten string batch value"
+ simpleString.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+ })
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct.go
new file mode 100644
index 00000000..90daee7f
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct.go
@@ -0,0 +1,71 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// StructField is a helper to store complex structure by
+// encoding it in RLP format.
+type StructField struct {
+ db *DB
+ key []byte
+}
+
+// NewStructField returns a new StructField.
+// It validates its name and type against the database schema.
+func (db *DB) NewStructField(name string) (f StructField, err error) {
+ key, err := db.schemaFieldKey(name, "struct-rlp")
+ if err != nil {
+ return f, err
+ }
+ return StructField{
+ db: db,
+ key: key,
+ }, nil
+}
+
+// Get unmarshals data from the database to a provided val.
+// If the data is not found leveldb.ErrNotFound is returned.
+func (f StructField) Get(val interface{}) (err error) {
+ b, err := f.db.Get(f.key)
+ if err != nil {
+ return err
+ }
+ return rlp.DecodeBytes(b, val)
+}
+
+// Put marshals provided val and saves it to the database.
+func (f StructField) Put(val interface{}) (err error) {
+ b, err := rlp.EncodeToBytes(val)
+ if err != nil {
+ return err
+ }
+ return f.db.Put(f.key, b)
+}
+
+// PutInBatch marshals provided val and puts it into the batch.
+func (f StructField) PutInBatch(batch *leveldb.Batch, val interface{}) (err error) {
+ b, err := rlp.EncodeToBytes(val)
+ if err != nil {
+ return err
+ }
+ batch.Put(f.key, b)
+ return nil
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct_test.go
new file mode 100644
index 00000000..cc0be018
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct_test.go
@@ -0,0 +1,127 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "testing"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// TestStructField validates put and get operations
+// of the StructField.
+func TestStructField(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ complexField, err := db.NewStructField("complex-field")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type complexStructure struct {
+ A string
+ }
+
+ t.Run("get empty", func(t *testing.T) {
+ var s complexStructure
+ err := complexField.Get(&s)
+ if err != leveldb.ErrNotFound {
+ t.Fatalf("got error %v, want %v", err, leveldb.ErrNotFound)
+ }
+ want := ""
+ if s.A != want {
+ t.Errorf("got string %q, want %q", s.A, want)
+ }
+ })
+
+ t.Run("put", func(t *testing.T) {
+ want := complexStructure{
+ A: "simple string value",
+ }
+ err = complexField.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err = complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got.A, want.A)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := complexStructure{
+ A: "overwritten string value",
+ }
+ err = complexField.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err = complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got.A, want.A)
+ }
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := complexStructure{
+ A: "simple string batch value",
+ }
+ complexField.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err := complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := complexStructure{
+ A: "overwritten string batch value",
+ }
+ complexField.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err := complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+ })
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go
new file mode 100644
index 00000000..80e0069a
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go
@@ -0,0 +1,108 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "encoding/binary"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Uint64Field provides a way to have a simple counter in the database.
+// It transparently encodes uint64 type value to bytes.
+type Uint64Field struct {
+ db *DB
+ key []byte
+}
+
+// NewUint64Field returns a new Uint64Field.
+// It validates its name and type against the database schema.
+func (db *DB) NewUint64Field(name string) (f Uint64Field, err error) {
+ key, err := db.schemaFieldKey(name, "uint64")
+ if err != nil {
+ return f, err
+ }
+ return Uint64Field{
+ db: db,
+ key: key,
+ }, nil
+}
+
+// Get retrieves a uint64 value from the database.
+// If the value is not found in the database a 0 value
+// is returned and no error.
+func (f Uint64Field) Get() (val uint64, err error) {
+ b, err := f.db.Get(f.key)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ return 0, nil
+ }
+ return 0, err
+ }
+ return binary.BigEndian.Uint64(b), nil
+}
+
+// Put encodes uin64 value and stores it in the database.
+func (f Uint64Field) Put(val uint64) (err error) {
+ return f.db.Put(f.key, encodeUint64(val))
+}
+
+// PutInBatch stores a uint64 value in a batch
+// that can be saved later in the database.
+func (f Uint64Field) PutInBatch(batch *leveldb.Batch, val uint64) {
+ batch.Put(f.key, encodeUint64(val))
+}
+
+// Inc increments a uint64 value in the database.
+// This operation is not goroutine save.
+func (f Uint64Field) Inc() (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ val++
+ return val, f.Put(val)
+}
+
+// IncInBatch increments a uint64 value in the batch
+// by retreiving a value from the database, not the same batch.
+// This operation is not goroutine save.
+func (f Uint64Field) IncInBatch(batch *leveldb.Batch) (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ val++
+ f.PutInBatch(batch, val)
+ return val, nil
+}
+
+// encode transforms uint64 to 8 byte long
+// slice in big endian encoding.
+func encodeUint64(val uint64) (b []byte) {
+ b = make([]byte, 8)
+ binary.BigEndian.PutUint64(b, val)
+ return b
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64_test.go
new file mode 100644
index 00000000..69ade71b
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64_test.go
@@ -0,0 +1,194 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "testing"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// TestUint64Field validates put and get operations
+// of the Uint64Field.
+func TestUint64Field(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("get empty", func(t *testing.T) {
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var want uint64
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ })
+
+ t.Run("put", func(t *testing.T) {
+ var want uint64 = 42
+ err = counter.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ var want uint64 = 84
+ err = counter.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ var want uint64 = 42
+ counter.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ var want uint64 = 84
+ counter.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ })
+ })
+}
+
+// TestUint64Field_Inc validates Inc operation
+// of the Uint64Field.
+func TestUint64Field_Inc(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var want uint64 = 1
+ got, err := counter.Inc()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ want = 2
+ got, err = counter.Inc()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
+
+// TestUint64Field_IncInBatch validates IncInBatch operation
+// of the Uint64Field.
+func TestUint64Field_IncInBatch(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ batch := new(leveldb.Batch)
+ var want uint64 = 1
+ got, err := counter.IncInBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ batch2 := new(leveldb.Batch)
+ want = 2
+ got, err = counter.IncInBatch(batch2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go
new file mode 100644
index 00000000..ba803e3c
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go
@@ -0,0 +1,264 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// IndexItem holds fields relevant to Swarm Chunk data and metadata.
+// All information required for swarm storage and operations
+// on that storage must be defined here.
+// This structure is logically connected to swarm storage,
+// the only part of this package that is not generalized,
+// mostly for performance reasons.
+//
+// IndexItem is a type that is used for retrieving, storing and encoding
+// chunk data and metadata. It is passed as an argument to Index encoding
+// functions, get function and put function.
+// But it is also returned with additional data from get function call
+// and as the argument in iterator function definition.
+type IndexItem struct {
+ Address []byte
+ Data []byte
+ AccessTimestamp int64
+ StoreTimestamp int64
+ // UseMockStore is a pointer to identify
+ // an unset state of the field in Join function.
+ UseMockStore *bool
+}
+
+// Merge is a helper method to construct a new
+// IndexItem by filling up fields with default values
+// of a particular IndexItem with values from another one.
+func (i IndexItem) Merge(i2 IndexItem) (new IndexItem) {
+ if i.Address == nil {
+ i.Address = i2.Address
+ }
+ if i.Data == nil {
+ i.Data = i2.Data
+ }
+ if i.AccessTimestamp == 0 {
+ i.AccessTimestamp = i2.AccessTimestamp
+ }
+ if i.StoreTimestamp == 0 {
+ i.StoreTimestamp = i2.StoreTimestamp
+ }
+ if i.UseMockStore == nil {
+ i.UseMockStore = i2.UseMockStore
+ }
+ return i
+}
+
+// Index represents a set of LevelDB key value pairs that have common
+// prefix. It holds functions for encoding and decoding keys and values
+// to provide transparent actions on saved data which inclide:
+// - getting a particular IndexItem
+// - saving a particular IndexItem
+// - iterating over a sorted LevelDB keys
+// It implements IndexIteratorInterface interface.
+type Index struct {
+ db *DB
+ prefix []byte
+ encodeKeyFunc func(fields IndexItem) (key []byte, err error)
+ decodeKeyFunc func(key []byte) (e IndexItem, err error)
+ encodeValueFunc func(fields IndexItem) (value []byte, err error)
+ decodeValueFunc func(value []byte) (e IndexItem, err error)
+}
+
+// IndexFuncs structure defines functions for encoding and decoding
+// LevelDB keys and values for a specific index.
+type IndexFuncs struct {
+ EncodeKey func(fields IndexItem) (key []byte, err error)
+ DecodeKey func(key []byte) (e IndexItem, err error)
+ EncodeValue func(fields IndexItem) (value []byte, err error)
+ DecodeValue func(value []byte) (e IndexItem, err error)
+}
+
+// NewIndex returns a new Index instance with defined name and
+// encoding functions. The name must be unique and will be validated
+// on database schema for a key prefix byte.
+func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) {
+ id, err := db.schemaIndexPrefix(name)
+ if err != nil {
+ return f, err
+ }
+ prefix := []byte{id}
+ return Index{
+ db: db,
+ prefix: prefix,
+ // This function adjusts Index LevelDB key
+ // by appending the provided index id byte.
+ // This is needed to avoid collisions between keys of different
+ // indexes as all index ids are unique.
+ encodeKeyFunc: func(e IndexItem) (key []byte, err error) {
+ key, err = funcs.EncodeKey(e)
+ if err != nil {
+ return nil, err
+ }
+ return append(append(make([]byte, 0, len(key)+1), prefix...), key...), nil
+ },
+ // This function reverses the encodeKeyFunc constructed key
+ // to transparently work with index keys without their index ids.
+ // It assumes that index keys are prefixed with only one byte.
+ decodeKeyFunc: func(key []byte) (e IndexItem, err error) {
+ return funcs.DecodeKey(key[1:])
+ },
+ encodeValueFunc: funcs.EncodeValue,
+ decodeValueFunc: funcs.DecodeValue,
+ }, nil
+}
+
+// Get accepts key fields represented as IndexItem to retrieve a
+// value from the index and return maximum available information
+// from the index represented as another IndexItem.
+func (f Index) Get(keyFields IndexItem) (out IndexItem, err error) {
+ key, err := f.encodeKeyFunc(keyFields)
+ if err != nil {
+ return out, err
+ }
+ value, err := f.db.Get(key)
+ if err != nil {
+ return out, err
+ }
+ out, err = f.decodeValueFunc(value)
+ if err != nil {
+ return out, err
+ }
+ return out.Merge(keyFields), nil
+}
+
+// Put accepts IndexItem to encode information from it
+// and save it to the database.
+func (f Index) Put(i IndexItem) (err error) {
+ key, err := f.encodeKeyFunc(i)
+ if err != nil {
+ return err
+ }
+ value, err := f.encodeValueFunc(i)
+ if err != nil {
+ return err
+ }
+ return f.db.Put(key, value)
+}
+
+// PutInBatch is the same as Put method, but it just
+// saves the key/value pair to the batch instead
+// directly to the database.
+func (f Index) PutInBatch(batch *leveldb.Batch, i IndexItem) (err error) {
+ key, err := f.encodeKeyFunc(i)
+ if err != nil {
+ return err
+ }
+ value, err := f.encodeValueFunc(i)
+ if err != nil {
+ return err
+ }
+ batch.Put(key, value)
+ return nil
+}
+
+// Delete accepts IndexItem to remove a key/value pair
+// from the database based on its fields.
+func (f Index) Delete(keyFields IndexItem) (err error) {
+ key, err := f.encodeKeyFunc(keyFields)
+ if err != nil {
+ return err
+ }
+ return f.db.Delete(key)
+}
+
+// DeleteInBatch is the same as Delete just the operation
+// is performed on the batch instead on the database.
+func (f Index) DeleteInBatch(batch *leveldb.Batch, keyFields IndexItem) (err error) {
+ key, err := f.encodeKeyFunc(keyFields)
+ if err != nil {
+ return err
+ }
+ batch.Delete(key)
+ return nil
+}
+
+// IndexIterFunc is a callback on every IndexItem that is decoded
+// by iterating on an Index keys.
+// By returning a true for stop variable, iteration will
+// stop, and by returning the error, that error will be
+// propagated to the called iterator method on Index.
+type IndexIterFunc func(item IndexItem) (stop bool, err error)
+
+// IterateAll iterates over all keys of the Index.
+func (f Index) IterateAll(fn IndexIterFunc) (err error) {
+ it := f.db.NewIterator()
+ defer it.Release()
+
+ for ok := it.Seek(f.prefix); ok; ok = it.Next() {
+ key := it.Key()
+ if key[0] != f.prefix[0] {
+ break
+ }
+ keyIndexItem, err := f.decodeKeyFunc(key)
+ if err != nil {
+ return err
+ }
+ valueIndexItem, err := f.decodeValueFunc(it.Value())
+ if err != nil {
+ return err
+ }
+ stop, err := fn(keyIndexItem.Merge(valueIndexItem))
+ if err != nil {
+ return err
+ }
+ if stop {
+ break
+ }
+ }
+ return it.Error()
+}
+
+// IterateFrom iterates over Index keys starting from the key
+// encoded from the provided IndexItem.
+func (f Index) IterateFrom(start IndexItem, fn IndexIterFunc) (err error) {
+ startKey, err := f.encodeKeyFunc(start)
+ if err != nil {
+ return err
+ }
+ it := f.db.NewIterator()
+ defer it.Release()
+
+ for ok := it.Seek(startKey); ok; ok = it.Next() {
+ key := it.Key()
+ if key[0] != f.prefix[0] {
+ break
+ }
+ keyIndexItem, err := f.decodeKeyFunc(key)
+ if err != nil {
+ return err
+ }
+ valueIndexItem, err := f.decodeValueFunc(it.Value())
+ if err != nil {
+ return err
+ }
+ stop, err := fn(keyIndexItem.Merge(valueIndexItem))
+ if err != nil {
+ return err
+ }
+ if stop {
+ break
+ }
+ }
+ return it.Error()
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/index_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index_test.go
new file mode 100644
index 00000000..ba82216d
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index_test.go
@@ -0,0 +1,426 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Index functions for the index that is used in tests in this file.
+var retrievalIndexFuncs = IndexFuncs{
+ EncodeKey: func(fields IndexItem) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e IndexItem, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: func(fields IndexItem) (value []byte, err error) {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+ value = append(b, fields.Data...)
+ return value, nil
+ },
+ DecodeValue: func(value []byte) (e IndexItem, err error) {
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
+ e.Data = value[8:]
+ return e, nil
+ },
+}
+
+// TestIndex validates put, get and delete functions of the Index implementation.
+func TestIndex(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("put", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("put-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err := index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("put-hash"),
+ Data: []byte("New DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err = index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("put-in-batch-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ batch := new(leveldb.Batch)
+ index.PutInBatch(batch, want)
+ err := db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("put-in-batch-hash"),
+ Data: []byte("New DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ batch := new(leveldb.Batch)
+ index.PutInBatch(batch, want)
+ db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+ })
+ })
+
+ t.Run("put in batch twice", func(t *testing.T) {
+ // ensure that the last item of items with the same db keys
+ // is actually saved
+ batch := new(leveldb.Batch)
+ address := []byte("put-in-batch-twice-hash")
+
+ // put the first item
+ index.PutInBatch(batch, IndexItem{
+ Address: address,
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ })
+
+ want := IndexItem{
+ Address: address,
+ Data: []byte("New DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+ // then put the item that will produce the same key
+ // but different value in the database
+ index.PutInBatch(batch, want)
+ db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+ })
+
+ t.Run("delete", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("delete-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err := index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+
+ err = index.Delete(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wantErr := leveldb.ErrNotFound
+ got, err = index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != wantErr {
+ t.Fatalf("got error %v, want %v", err, wantErr)
+ }
+ })
+
+ t.Run("delete in batch", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("delete-in-batch-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err := index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+
+ batch := new(leveldb.Batch)
+ index.DeleteInBatch(batch, IndexItem{
+ Address: want.Address,
+ })
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wantErr := leveldb.ErrNotFound
+ got, err = index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != wantErr {
+ t.Fatalf("got error %v, want %v", err, wantErr)
+ }
+ })
+}
+
+// TestIndex_iterate validates index iterator functions for correctness.
+func TestIndex_iterate(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ items := []IndexItem{
+ {
+ Address: []byte("iterate-hash-01"),
+ Data: []byte("data80"),
+ },
+ {
+ Address: []byte("iterate-hash-03"),
+ Data: []byte("data22"),
+ },
+ {
+ Address: []byte("iterate-hash-05"),
+ Data: []byte("data41"),
+ },
+ {
+ Address: []byte("iterate-hash-02"),
+ Data: []byte("data84"),
+ },
+ {
+ Address: []byte("iterate-hash-06"),
+ Data: []byte("data1"),
+ },
+ }
+ batch := new(leveldb.Batch)
+ for _, i := range items {
+ index.PutInBatch(batch, i)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ item04 := IndexItem{
+ Address: []byte("iterate-hash-04"),
+ Data: []byte("data0"),
+ }
+ err = index.Put(item04)
+ if err != nil {
+ t.Fatal(err)
+ }
+ items = append(items, item04)
+
+ sort.SliceStable(items, func(i, j int) bool {
+ return bytes.Compare(items[i].Address, items[j].Address) < 0
+ })
+
+ t.Run("all", func(t *testing.T) {
+ var i int
+ err := index.IterateAll(func(item IndexItem) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkIndexItem(t, item, want)
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+
+ t.Run("from", func(t *testing.T) {
+ startIndex := 2
+ i := startIndex
+ err := index.IterateFrom(items[startIndex], func(item IndexItem) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkIndexItem(t, item, want)
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+
+ t.Run("stop", func(t *testing.T) {
+ var i int
+ stopIndex := 3
+ var count int
+ err := index.IterateAll(func(item IndexItem) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkIndexItem(t, item, want)
+ count++
+ if i == stopIndex {
+ return true, nil
+ }
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantItemsCount := stopIndex + 1
+ if count != wantItemsCount {
+ t.Errorf("got %v items, expected %v", count, wantItemsCount)
+ }
+ })
+
+ t.Run("no overflow", func(t *testing.T) {
+ secondIndex, err := db.NewIndex("second-index", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ secondIndexItem := IndexItem{
+ Address: []byte("iterate-hash-10"),
+ Data: []byte("data-second"),
+ }
+ err = secondIndex.Put(secondIndexItem)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var i int
+ err = index.IterateAll(func(item IndexItem) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkIndexItem(t, item, want)
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ i = 0
+ err = secondIndex.IterateAll(func(item IndexItem) (stop bool, err error) {
+ if i > 1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ checkIndexItem(t, item, secondIndexItem)
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// checkIndexItem is a test helper function that compares if two Index items are the same.
+func checkIndexItem(t *testing.T, got, want IndexItem) {
+ t.Helper()
+
+ if !bytes.Equal(got.Address, want.Address) {
+ t.Errorf("got hash %q, expected %q", string(got.Address), string(want.Address))
+ }
+ if !bytes.Equal(got.Data, want.Data) {
+ t.Errorf("got data %q, expected %q", string(got.Data), string(want.Data))
+ }
+ if got.StoreTimestamp != want.StoreTimestamp {
+ t.Errorf("got store timestamp %v, expected %v", got.StoreTimestamp, want.StoreTimestamp)
+ }
+ if got.AccessTimestamp != want.AccessTimestamp {
+ t.Errorf("got access timestamp %v, expected %v", got.AccessTimestamp, want.AccessTimestamp)
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/schema.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/schema.go
new file mode 100644
index 00000000..cfb7c6d6
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/schema.go
@@ -0,0 +1,134 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+)
+
+var (
+ // LevelDB key value for storing the schema.
+ keySchema = []byte{0}
+ // LevelDB key prefix for all field type.
+ // LevelDB keys will be constructed by appending name values to this prefix.
+ keyPrefixFields byte = 1
+ // LevelDB key prefix from which indexing keys start.
+ // Every index has its own key prefix and this value defines the first one.
+ keyPrefixIndexStart byte = 2 // Q: or maybe a higher number like 7, to have more space for potential specific perfixes
+)
+
+// schema is used to serialize known database structure information.
+type schema struct {
+ Fields map[string]fieldSpec `json:"fields"` // keys are field names
+ Indexes map[byte]indexSpec `json:"indexes"` // keys are index prefix bytes
+}
+
+// fieldSpec holds information about a particular field.
+// It does not need Name field as it is contained in the
+// schema.Field map key.
+type fieldSpec struct {
+ Type string `json:"type"`
+}
+
+// indxSpec holds information about a particular index.
+// It does not contain index type, as indexes do not have type.
+type indexSpec struct {
+ Name string `json:"name"`
+}
+
+// schemaFieldKey retrives the complete LevelDB key for
+// a particular field form the schema definition.
+func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) {
+ if name == "" {
+ return nil, errors.New("field name can not be blank")
+ }
+ if fieldType == "" {
+ return nil, errors.New("field type can not be blank")
+ }
+ s, err := db.getSchema()
+ if err != nil {
+ return nil, err
+ }
+ var found bool
+ for n, f := range s.Fields {
+ if n == name {
+ if f.Type != fieldType {
+ return nil, fmt.Errorf("field %q of type %q stored as %q in db", name, fieldType, f.Type)
+ }
+ break
+ }
+ }
+ if !found {
+ s.Fields[name] = fieldSpec{
+ Type: fieldType,
+ }
+ err := db.putSchema(s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return append([]byte{keyPrefixFields}, []byte(name)...), nil
+}
+
+// schemaIndexID retrieves the complete LevelDB prefix for
+// a particular index.
+func (db *DB) schemaIndexPrefix(name string) (id byte, err error) {
+ if name == "" {
+ return 0, errors.New("index name can not be blank")
+ }
+ s, err := db.getSchema()
+ if err != nil {
+ return 0, err
+ }
+ nextID := keyPrefixIndexStart
+ for i, f := range s.Indexes {
+ if i >= nextID {
+ nextID = i + 1
+ }
+ if f.Name == name {
+ return i, nil
+ }
+ }
+ id = nextID
+ s.Indexes[id] = indexSpec{
+ Name: name,
+ }
+ return id, db.putSchema(s)
+}
+
+// getSchema retrieves the complete schema from
+// the database.
+func (db *DB) getSchema() (s schema, err error) {
+ b, err := db.Get(keySchema)
+ if err != nil {
+ return s, err
+ }
+ err = json.Unmarshal(b, &s)
+ return s, err
+}
+
+// putSchema stores the complete schema to
+// the database.
+func (db *DB) putSchema(s schema) (err error) {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ return db.Put(keySchema, b)
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/schema_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/schema_test.go
new file mode 100644
index 00000000..a0c1838c
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/schema_test.go
@@ -0,0 +1,126 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "bytes"
+ "testing"
+)
+
+// TestDB_schemaFieldKey validates correctness of schemaFieldKey.
+func TestDB_schemaFieldKey(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ t.Run("empty name or type", func(t *testing.T) {
+ _, err := db.schemaFieldKey("", "")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+ _, err = db.schemaFieldKey("", "type")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+
+ _, err = db.schemaFieldKey("test", "")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+ })
+
+ t.Run("same field", func(t *testing.T) {
+ key1, err := db.schemaFieldKey("test", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ key2, err := db.schemaFieldKey("test", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(key1, key2) {
+ t.Errorf("schema keys for the same field name are not the same: %q, %q", string(key1), string(key2))
+ }
+ })
+
+ t.Run("different fields", func(t *testing.T) {
+ key1, err := db.schemaFieldKey("test1", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ key2, err := db.schemaFieldKey("test2", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if bytes.Equal(key1, key2) {
+ t.Error("schema keys for the same field name are the same, but must not be")
+ }
+ })
+
+ t.Run("same field name different types", func(t *testing.T) {
+ _, err := db.schemaFieldKey("the-field", "one-type")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.schemaFieldKey("the-field", "another-type")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+ })
+}
+
+// TestDB_schemaIndexPrefix validates correctness of schemaIndexPrefix.
+func TestDB_schemaIndexPrefix(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ t.Run("same name", func(t *testing.T) {
+ id1, err := db.schemaIndexPrefix("test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id2, err := db.schemaIndexPrefix("test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if id1 != id2 {
+ t.Errorf("schema keys for the same field name are not the same: %v, %v", id1, id2)
+ }
+ })
+
+ t.Run("different names", func(t *testing.T) {
+ id1, err := db.schemaIndexPrefix("test1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id2, err := db.schemaIndexPrefix("test2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if id1 == id2 {
+ t.Error("schema ids for the same index name are the same, but must not be")
+ }
+ })
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go
index b0aa92e2..fc5dd8f7 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go
@@ -22,6 +22,7 @@ import (
"errors"
"github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/storage"
)
// ErrNotFound is returned when no results are returned from the database
@@ -30,6 +31,15 @@ var ErrNotFound = errors.New("ErrorNotFound")
// ErrInvalidArgument is returned when the argument type does not match the expected type
var ErrInvalidArgument = errors.New("ErrorInvalidArgument")
+// Store defines methods required to get, set, delete values for different keys
+// and close the underlying resources.
+type Store interface {
+ Get(key string, i interface{}) (err error)
+ Put(key string, i interface{}) (err error)
+ Delete(key string) (err error)
+ Close() error
+}
+
// DBStore uses LevelDB to store values.
type DBStore struct {
db *leveldb.DB
@@ -46,6 +56,17 @@ func NewDBStore(path string) (s *DBStore, err error) {
}, nil
}
+// NewInmemoryStore returns a new instance of DBStore. To be used only in tests and simulations.
+func NewInmemoryStore() *DBStore {
+ db, err := leveldb.Open(storage.NewMemStorage(), nil)
+ if err != nil {
+ panic(err)
+ }
+ return &DBStore{
+ db: db,
+ }
+}
+
// Get retrieves a persisted value for a specific key. If there is no results
// ErrNotFound is returned. The provided parameter should be either a byte slice or
// a struct that implements the encoding.BinaryUnmarshaler interface
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/state/inmemorystore.go b/vendor/github.com/ethereum/go-ethereum/swarm/state/inmemorystore.go
deleted file mode 100644
index 3ba48592..00000000
--- a/vendor/github.com/ethereum/go-ethereum/swarm/state/inmemorystore.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-import (
- "encoding"
- "encoding/json"
- "sync"
-)
-
-// InmemoryStore is the reference implementation of Store interface that is supposed
-// to be used in tests.
-type InmemoryStore struct {
- db map[string][]byte
- mu sync.RWMutex
-}
-
-// NewInmemoryStore returns a new instance of InmemoryStore.
-func NewInmemoryStore() *InmemoryStore {
- return &InmemoryStore{
- db: make(map[string][]byte),
- }
-}
-
-// Get retrieves a value stored for a specific key. If there is no value found,
-// ErrNotFound is returned.
-func (s *InmemoryStore) Get(key string, i interface{}) (err error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
-
- bytes, ok := s.db[key]
- if !ok {
- return ErrNotFound
- }
-
- unmarshaler, ok := i.(encoding.BinaryUnmarshaler)
- if !ok {
- return json.Unmarshal(bytes, i)
- }
-
- return unmarshaler.UnmarshalBinary(bytes)
-}
-
-// Put stores a value for a specific key.
-func (s *InmemoryStore) Put(key string, i interface{}) (err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- var bytes []byte
-
- marshaler, ok := i.(encoding.BinaryMarshaler)
- if !ok {
- if bytes, err = json.Marshal(i); err != nil {
- return err
- }
- } else {
- if bytes, err = marshaler.MarshalBinary(); err != nil {
- return err
- }
- }
-
- s.db[key] = bytes
- return nil
-}
-
-// Delete removes value stored under a specific key.
-func (s *InmemoryStore) Delete(key string) (err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if _, ok := s.db[key]; !ok {
- return ErrNotFound
- }
- delete(s.db, key)
- return nil
-}
-
-// Close does not do anything.
-func (s *InmemoryStore) Close() error {
- return nil
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/state/store.go b/vendor/github.com/ethereum/go-ethereum/swarm/state/store.go
deleted file mode 100644
index fb7fe258..00000000
--- a/vendor/github.com/ethereum/go-ethereum/swarm/state/store.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package state
-
-// Store defines methods required to get, set, delete values for different keys
-// and close the underlying resources.
-type Store interface {
- Get(key string, i interface{}) (err error)
- Put(key string, i interface{}) (err error)
- Delete(key string) (err error)
- Close() error
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go
index 40292e88..cbe65372 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go
@@ -22,6 +22,7 @@ import (
"fmt"
"io"
"sync"
+ "time"
"github.com/ethereum/go-ethereum/metrics"
ch "github.com/ethereum/go-ethereum/swarm/chunk"
@@ -410,10 +411,14 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e
log.Debug("lazychunkreader.size", "addr", r.addr)
if r.chunkData == nil {
+
+ startTime := time.Now()
chunkData, err := r.getter.Get(cctx, Reference(r.addr))
if err != nil {
+ metrics.GetOrRegisterResettingTimer("lcr.getter.get.err", nil).UpdateSince(startTime)
return 0, err
}
+ metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime)
r.chunkData = chunkData
s := r.chunkData.Size()
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
@@ -542,8 +547,10 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS
wg.Add(1)
go func(j int64) {
childAddress := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize]
+ startTime := time.Now()
chunkData, err := r.getter.Get(r.ctx, Reference(childAddress))
if err != nil {
+ metrics.GetOrRegisterResettingTimer("lcr.getter.get.err", nil).UpdateSince(startTime)
log.Debug("lazychunkreader.join", "key", fmt.Sprintf("%x", childAddress), "err", err)
select {
case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childAddress)):
@@ -551,6 +558,7 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS
}
return
}
+ metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime)
if l := len(chunkData); l < 9 {
select {
case errC <- fmt.Errorf("chunk %v-%v incomplete; key: %s, data length %v", off, off+treeSize, fmt.Sprintf("%x", childAddress), l):
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go
index fbae59fa..bd4f6b91 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go
@@ -284,7 +284,7 @@ func getGCIdxValue(index *dpaDBIndex, po uint8, addr Address) []byte {
return val
}
-func parseGCIdxKey(key []byte) (byte, []byte) {
+func parseIdxKey(key []byte) (byte, []byte) {
return key[0], key[1:]
}
@@ -589,7 +589,7 @@ func (s *LDBStore) CleanGCIndex() error {
it.Seek([]byte{keyGCIdx})
var gcDeletes int
for it.Valid() {
- rowType, _ := parseGCIdxKey(it.Key())
+ rowType, _ := parseIdxKey(it.Key())
if rowType != keyGCIdx {
break
}
@@ -601,47 +601,113 @@ func (s *LDBStore) CleanGCIndex() error {
if err := s.db.Write(&batch); err != nil {
return err
}
-
- it.Seek([]byte{keyIndex})
- var idx dpaDBIndex
- var poPtrs [256]uint64
- for it.Valid() {
- rowType, chunkHash := parseGCIdxKey(it.Key())
- if rowType != keyIndex {
- break
- }
- err := decodeIndex(it.Value(), &idx)
- if err != nil {
- return fmt.Errorf("corrupt index: %v", err)
- }
- po := s.po(chunkHash)
-
- // if we don't find the data key, remove the entry
- dataKey := getDataKey(idx.Idx, po)
- _, err = s.db.Get(dataKey)
- if err != nil {
- log.Warn("deleting inconsistent index (missing data)", "key", chunkHash)
- batch.Delete(it.Key())
- } else {
- gcIdxKey := getGCIdxKey(&idx)
- gcIdxData := getGCIdxValue(&idx, po, chunkHash)
- batch.Put(gcIdxKey, gcIdxData)
- log.Trace("clean ok", "key", chunkHash, "gcKey", gcIdxKey, "gcData", gcIdxData)
- okEntryCount++
- if idx.Idx > poPtrs[po] {
- poPtrs[po] = idx.Idx
- }
- }
- totalEntryCount++
- it.Next()
- }
+ batch.Reset()
it.Release()
+
+ // corrected po index pointer values
+ var poPtrs [256]uint64
+
+ // set to true if chunk count not on 4096 iteration boundary
+ var doneIterating bool
+
+ // last key index in previous iteration
+ lastIdxKey := []byte{keyIndex}
+
+ // counter for debug output
+ var cleanBatchCount int
+
+ // go through all key index entries
+ for !doneIterating {
+ cleanBatchCount++
+ var idxs []dpaDBIndex
+ var chunkHashes [][]byte
+ var pos []uint8
+ it := s.db.NewIterator()
+
+ it.Seek(lastIdxKey)
+
+ // 4096 is just a nice number, don't look for any hidden meaning here...
+ var i int
+ for i = 0; i < 4096; i++ {
+
+ // this really shouldn't happen unless database is empty
+ // but let's keep it to be safe
+ if !it.Valid() {
+ doneIterating = true
+ break
+ }
+
+ // if it's not keyindex anymore we're done iterating
+ rowType, chunkHash := parseIdxKey(it.Key())
+ if rowType != keyIndex {
+ doneIterating = true
+ break
+ }
+
+ // decode the retrieved index
+ var idx dpaDBIndex
+ err := decodeIndex(it.Value(), &idx)
+ if err != nil {
+ return fmt.Errorf("corrupt index: %v", err)
+ }
+ po := s.po(chunkHash)
+ lastIdxKey = it.Key()
+
+ // if we don't find the data key, remove the entry
+ // if we find it, add to the array of new gc indices to create
+ dataKey := getDataKey(idx.Idx, po)
+ _, err = s.db.Get(dataKey)
+ if err != nil {
+ log.Warn("deleting inconsistent index (missing data)", "key", chunkHash)
+ batch.Delete(it.Key())
+ } else {
+ idxs = append(idxs, idx)
+ chunkHashes = append(chunkHashes, chunkHash)
+ pos = append(pos, po)
+ okEntryCount++
+ if idx.Idx > poPtrs[po] {
+ poPtrs[po] = idx.Idx
+ }
+ }
+ totalEntryCount++
+ it.Next()
+ }
+ it.Release()
+
+ // flush the key index corrections
+ err := s.db.Write(&batch)
+ if err != nil {
+ return err
+ }
+ batch.Reset()
+
+ // add correct gc indices
+ for i, okIdx := range idxs {
+ gcIdxKey := getGCIdxKey(&okIdx)
+ gcIdxData := getGCIdxValue(&okIdx, pos[i], chunkHashes[i])
+ batch.Put(gcIdxKey, gcIdxData)
+ log.Trace("clean ok", "key", chunkHashes[i], "gcKey", gcIdxKey, "gcData", gcIdxData)
+ }
+
+ // flush them
+ err = s.db.Write(&batch)
+ if err != nil {
+ return err
+ }
+ batch.Reset()
+
+ log.Debug("clean gc index pass", "batch", cleanBatchCount, "checked", i, "kept", len(idxs))
+ }
+
log.Debug("gc cleanup entries", "ok", okEntryCount, "total", totalEntryCount, "batchlen", batch.Len())
+ // lastly add updated entry count
var entryCount [8]byte
binary.BigEndian.PutUint64(entryCount[:], okEntryCount)
batch.Put(keyEntryCnt, entryCount[:])
+
+ // and add the new po index pointers
var poKey [2]byte
poKey[0] = keyDistanceCnt
for i, poPtr := range poPtrs {
@@ -655,6 +721,7 @@ func (s *LDBStore) CleanGCIndex() error {
}
}
+ // if you made it this far your harddisk has survived. Congratulations
return s.db.Write(&batch)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore_test.go
index 07557980..e8b9ae39 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore_test.go
@@ -344,17 +344,18 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
func TestLDBStoreCollectGarbage(t *testing.T) {
// below max ronud
- cap := defaultMaxGCRound / 2
+ initialCap := defaultMaxGCRound / 100
+ cap := initialCap / 2
t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
// at max round
- cap = defaultMaxGCRound
+ cap = initialCap
t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
// more than max around, not on threshold
- cap = defaultMaxGCRound * 1.1
+ cap = initialCap + 500
t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
@@ -578,7 +579,7 @@ func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
// TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
- capacity := defaultMaxGCRound * 2
+ capacity := defaultMaxGCRound / 100 * 2
n := capacity - 1
ldb, cleanup := newLDBStore(t)
@@ -761,6 +762,38 @@ func TestCleanIndex(t *testing.T) {
t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
}
}
+
+ // check that the iterator quits properly
+ chunks, err = mputRandomChunks(ldb, 4100, 4096)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ po = ldb.po(chunks[4099].Address()[:])
+ dataKey = make([]byte, 10)
+ dataKey[0] = keyData
+ dataKey[1] = byte(po)
+ binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
+ if _, err := ldb.db.Get(dataKey); err != nil {
+ t.Fatal(err)
+ }
+ if err := ldb.db.Delete(dataKey); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := ldb.CleanGCIndex(); err != nil {
+ t.Fatal(err)
+ }
+
+ // entrycount should now be one less of added chunks
+ c, err = ldb.db.Get(keyEntryCnt)
+ if err != nil {
+ t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
+ }
+ entryCount = binary.BigEndian.Uint64(c)
+ if entryCount != 4099+2 {
+ t.Fatalf("expected entrycnt to be 2, was %d", c)
+ }
}
func waitGc(ctx context.Context, ldb *LDBStore) {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go
index fa98848d..111821ff 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go
@@ -194,7 +194,8 @@ func (ls *LocalStore) Close() {
ls.DbStore.Close()
}
-// Migrate checks the datastore schema vs the runtime schema, and runs migrations if they don't match
+// Migrate checks the datastore schema vs the runtime schema and runs
+// migrations if they don't match
func (ls *LocalStore) Migrate() error {
actualDbSchema, err := ls.DbStore.GetSchema()
if err != nil {
@@ -202,12 +203,12 @@ func (ls *LocalStore) Migrate() error {
return err
}
- log.Debug("running migrations for", "schema", actualDbSchema, "runtime-schema", CurrentDbSchema)
-
if actualDbSchema == CurrentDbSchema {
return nil
}
+ log.Debug("running migrations for", "schema", actualDbSchema, "runtime-schema", CurrentDbSchema)
+
if actualDbSchema == DbSchemaNone {
ls.migrateFromNoneToPurity()
actualDbSchema = DbSchemaPurity
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/db/db.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/db/db.go
index 43bfa24f..73ae199e 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/db/db.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/db/db.go
@@ -86,6 +86,13 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
return s.db.Write(batch, nil)
}
+// Delete removes the chunk reference to node with address addr.
+func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ batch := new(leveldb.Batch)
+ batch.Delete(nodeDBKey(addr, key))
+ return s.db.Write(batch, nil)
+}
+
// HasKey returns whether a node with addr contains the key.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
has, err := s.db.Has(nodeDBKey(addr, key), nil)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mem/mem.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mem/mem.go
index 8878309d..3a0a2beb 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mem/mem.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mem/mem.go
@@ -83,6 +83,22 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
return nil
}
+// Delete removes the chunk data for node with address addr.
+func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var count int
+ if _, ok := s.nodes[string(key)]; ok {
+ delete(s.nodes[string(key)], addr)
+ count = len(s.nodes[string(key)])
+ }
+ if count == 0 {
+ delete(s.data, string(key))
+ }
+ return nil
+}
+
// HasKey returns whether a node with addr contains the key.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
s.mu.Lock()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go
index 81340f92..1fb71b70 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go
@@ -70,6 +70,12 @@ func (n *NodeStore) Put(key []byte, data []byte) error {
return n.store.Put(n.addr, key, data)
}
+// Delete removes chunk data for a key for a node that has the address
+// provided on NodeStore initialization.
+func (n *NodeStore) Delete(key []byte) error {
+ return n.store.Delete(n.addr, key)
+}
+
// GlobalStorer defines methods for mock db store
// that stores chunk data for all swarm nodes.
// It is used in tests to construct mock NodeStores
@@ -77,6 +83,7 @@ func (n *NodeStore) Put(key []byte, data []byte) error {
type GlobalStorer interface {
Get(addr common.Address, key []byte) (data []byte, err error)
Put(addr common.Address, key []byte, data []byte) error
+ Delete(addr common.Address, key []byte) error
HasKey(addr common.Address, key []byte) bool
// NewNodeStore creates an instance of NodeStore
// to be used by a single swarm node with
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc.go
index 6e735f69..8cd6c83a 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc.go
@@ -73,6 +73,12 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
return err
}
+// Delete calls a Delete method to RPC server.
+func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ err := s.client.Call(nil, "mockStore_delete", addr, key)
+ return err
+}
+
// HasKey calls a HasKey method to RPC server.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
var has bool
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc_test.go
index 52b634a4..f62340ed 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc_test.go
@@ -37,5 +37,5 @@ func TestRPCStore(t *testing.T) {
store := NewGlobalStore(rpc.DialInProc(server))
defer store.Close()
- test.MockStore(t, store, 100)
+ test.MockStore(t, store, 30)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go
index 02da3af5..10180985 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go
@@ -72,6 +72,31 @@ func MockStore(t *testing.T, globalStore mock.GlobalStorer, n int) {
}
}
}
+ t.Run("delete", func(t *testing.T) {
+ chunkAddr := storage.Address([]byte("1234567890abcd"))
+ for _, addr := range addrs {
+ err := globalStore.Put(addr, chunkAddr, []byte("data"))
+ if err != nil {
+ t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ firstNodeAddr := addrs[0]
+ if err := globalStore.Delete(firstNodeAddr, chunkAddr); err != nil {
+ t.Fatalf("delete from store %s key %s: %v", firstNodeAddr.Hex(), chunkAddr.Hex(), err)
+ }
+ for i, addr := range addrs {
+ _, err := globalStore.Get(addr, chunkAddr)
+ if i == 0 {
+ if err != mock.ErrNotFound {
+ t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ }
+ })
})
t.Run("NodeStore", func(t *testing.T) {
@@ -114,6 +139,34 @@ func MockStore(t *testing.T, globalStore mock.GlobalStorer, n int) {
}
}
}
+ t.Run("delete", func(t *testing.T) {
+ chunkAddr := storage.Address([]byte("1234567890abcd"))
+ var chosenStore *mock.NodeStore
+ for addr, store := range nodes {
+ if chosenStore == nil {
+ chosenStore = store
+ }
+ err := store.Put(chunkAddr, []byte("data"))
+ if err != nil {
+ t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ if err := chosenStore.Delete(chunkAddr); err != nil {
+ t.Fatalf("delete key %s: %v", chunkAddr.Hex(), err)
+ }
+ for addr, store := range nodes {
+ _, err := store.Get(chunkAddr)
+ if store == chosenStore {
+ if err != mock.ErrNotFound {
+ t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ }
+ })
})
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/swap/swap.go b/vendor/github.com/ethereum/go-ethereum/swarm/swap/swap.go
new file mode 100644
index 00000000..5d636dc2
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/swap/swap.go
@@ -0,0 +1,98 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package swap
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/protocols"
+ "github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/state"
+)
+
+// SwAP Swarm Accounting Protocol
+// a peer to peer micropayment system
+// A node maintains an individual balance with every peer
+// Only messages which have a price will be accounted for
+type Swap struct {
+ stateStore state.Store //stateStore is needed in order to keep balances across sessions
+ lock sync.RWMutex //lock the balances
+ balances map[enode.ID]int64 //map of balances for each peer
+}
+
+// New - swap constructor
+func New(stateStore state.Store) (swap *Swap) {
+ swap = &Swap{
+ stateStore: stateStore,
+ balances: make(map[enode.ID]int64),
+ }
+ return
+}
+
+//Swap implements the protocols.Balance interface
+//Add is the (sole) accounting function
+func (s *Swap) Add(amount int64, peer *protocols.Peer) (err error) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ //load existing balances from the state store
+ err = s.loadState(peer)
+ if err != nil && err != state.ErrNotFound {
+ return
+ }
+ //adjust the balance
+ //if amount is negative, it will decrease, otherwise increase
+ s.balances[peer.ID()] += amount
+ //save the new balance to the state store
+ peerBalance := s.balances[peer.ID()]
+ err = s.stateStore.Put(peer.ID().String(), &peerBalance)
+
+ log.Debug(fmt.Sprintf("balance for peer %s: %s", peer.ID().String(), strconv.FormatInt(peerBalance, 10)))
+ return err
+}
+
+//GetPeerBalance returns the balance for a given peer
+func (swap *Swap) GetPeerBalance(peer enode.ID) (int64, error) {
+ swap.lock.RLock()
+ defer swap.lock.RUnlock()
+ if p, ok := swap.balances[peer]; ok {
+ return p, nil
+ }
+ return 0, errors.New("Peer not found")
+}
+
+//load balances from the state store (persisted)
+func (s *Swap) loadState(peer *protocols.Peer) (err error) {
+ var peerBalance int64
+ peerID := peer.ID()
+ //only load if the current instance doesn't already have this peer's
+ //balance in memory
+ if _, ok := s.balances[peerID]; !ok {
+ err = s.stateStore.Get(peerID.String(), &peerBalance)
+ s.balances[peerID] = peerBalance
+ }
+ return
+}
+
+//Clean up Swap
+func (swap *Swap) Close() {
+ swap.stateStore.Close()
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/swap/swap_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/swap/swap_test.go
new file mode 100644
index 00000000..f2e3ba16
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/swap/swap_test.go
@@ -0,0 +1,184 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package swap
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ mrand "math/rand"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/protocols"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/state"
+ colorable "github.com/mattn/go-colorable"
+)
+
+var (
+ loglevel = flag.Int("loglevel", 2, "verbosity of logs")
+)
+
+func init() {
+ flag.Parse()
+ mrand.Seed(time.Now().UnixNano())
+
+ log.PrintOrigins(true)
+ log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+}
+
+//Test getting a peer's balance
+func TestGetPeerBalance(t *testing.T) {
+ //create a test swap account
+ swap, testDir := createTestSwap(t)
+ defer os.RemoveAll(testDir)
+
+ //test for correct value
+ testPeer := newDummyPeer()
+ swap.balances[testPeer.ID()] = 888
+ b, err := swap.GetPeerBalance(testPeer.ID())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if b != 888 {
+ t.Fatalf("Expected peer's balance to be %d, but is %d", 888, b)
+ }
+
+ //test for inexistent node
+ id := adapters.RandomNodeConfig().ID
+ _, err = swap.GetPeerBalance(id)
+ if err == nil {
+ t.Fatal("Expected call to fail, but it didn't!")
+ }
+ if err.Error() != "Peer not found" {
+ t.Fatalf("Expected test to fail with %s, but is %s", "Peer not found", err.Error())
+ }
+}
+
+//Test that repeated bookings do correct accounting
+func TestRepeatedBookings(t *testing.T) {
+ //create a test swap account
+ swap, testDir := createTestSwap(t)
+ defer os.RemoveAll(testDir)
+
+ testPeer := newDummyPeer()
+ amount := mrand.Intn(100)
+ cnt := 1 + mrand.Intn(10)
+ for i := 0; i < cnt; i++ {
+ swap.Add(int64(amount), testPeer.Peer)
+ }
+ expectedBalance := int64(cnt * amount)
+ realBalance := swap.balances[testPeer.ID()]
+ if expectedBalance != realBalance {
+ t.Fatal(fmt.Sprintf("After %d credits of %d, expected balance to be: %d, but is: %d", cnt, amount, expectedBalance, realBalance))
+ }
+
+ testPeer2 := newDummyPeer()
+ amount = mrand.Intn(100)
+ cnt = 1 + mrand.Intn(10)
+ for i := 0; i < cnt; i++ {
+ swap.Add(0-int64(amount), testPeer2.Peer)
+ }
+ expectedBalance = int64(0 - (cnt * amount))
+ realBalance = swap.balances[testPeer2.ID()]
+ if expectedBalance != realBalance {
+ t.Fatal(fmt.Sprintf("After %d debits of %d, expected balance to be: %d, but is: %d", cnt, amount, expectedBalance, realBalance))
+ }
+
+ //mixed debits and credits
+ amount1 := mrand.Intn(100)
+ amount2 := mrand.Intn(55)
+ amount3 := mrand.Intn(999)
+ swap.Add(int64(amount1), testPeer2.Peer)
+ swap.Add(int64(0-amount2), testPeer2.Peer)
+ swap.Add(int64(0-amount3), testPeer2.Peer)
+
+ expectedBalance = expectedBalance + int64(amount1-amount2-amount3)
+ realBalance = swap.balances[testPeer2.ID()]
+
+ if expectedBalance != realBalance {
+ t.Fatal(fmt.Sprintf("After mixed debits and credits, expected balance to be: %d, but is: %d", expectedBalance, realBalance))
+ }
+}
+
+//try restoring a balance from state store
+//this is simulated by creating a node,
+//assigning it an arbitrary balance,
+//then closing the state store.
+//Then we re-open the state store and check that
+//the balance is still the same
+func TestRestoreBalanceFromStateStore(t *testing.T) {
+ //create a test swap account
+ swap, testDir := createTestSwap(t)
+ defer os.RemoveAll(testDir)
+
+ testPeer := newDummyPeer()
+ swap.balances[testPeer.ID()] = -8888
+
+ tmpBalance := swap.balances[testPeer.ID()]
+ swap.stateStore.Put(testPeer.ID().String(), &tmpBalance)
+
+ swap.stateStore.Close()
+ swap.stateStore = nil
+
+ stateStore, err := state.NewDBStore(testDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var newBalance int64
+ stateStore.Get(testPeer.ID().String(), &newBalance)
+
+ //compare the balances
+ if tmpBalance != newBalance {
+ t.Fatal(fmt.Sprintf("Unexpected balance value after sending cheap message test. Expected balance: %d, balance is: %d",
+ tmpBalance, newBalance))
+ }
+}
+
+//create a test swap account
+//creates a stateStore for persistence and a Swap account
+func createTestSwap(t *testing.T) (*Swap, string) {
+ dir, err := ioutil.TempDir("", "swap_test_store")
+ if err != nil {
+ t.Fatal(err)
+ }
+ stateStore, err2 := state.NewDBStore(dir)
+ if err2 != nil {
+ t.Fatal(err2)
+ }
+ swap := New(stateStore)
+ return swap, dir
+}
+
+type dummyPeer struct {
+ *protocols.Peer
+}
+
+//creates a dummy protocols.Peer with dummy MsgReadWriter
+func newDummyPeer() *dummyPeer {
+ id := adapters.RandomNodeConfig().ID
+ protoPeer := protocols.NewPeer(p2p.NewPeer(id, "testPeer", nil), nil, nil)
+ dummy := &dummyPeer{
+ Peer: protoPeer,
+ }
+ return dummy
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go b/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go
index 1fb5443f..a4ff9405 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -51,6 +51,7 @@ import (
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/storage/mock"
+ "github.com/ethereum/go-ethereum/swarm/swap"
"github.com/ethereum/go-ethereum/swarm/tracing"
)
@@ -65,19 +66,22 @@ var (
// the swarm stack
type Swarm struct {
- config *api.Config // swarm configuration
- api *api.API // high level api layer (fs/manifest)
- dns api.Resolver // DNS registrar
- fileStore *storage.FileStore // distributed preimage archive, the local API to the storage with document level storage/retrieval support
- streamer *stream.Registry
- bzz *network.Bzz // the logistic manager
- backend chequebook.Backend // simple blockchain Backend
- privateKey *ecdsa.PrivateKey
- corsString string
- swapEnabled bool
- netStore *storage.NetStore
- sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
- ps *pss.Pss
+ config *api.Config // swarm configuration
+ api *api.API // high level api layer (fs/manifest)
+ dns api.Resolver // DNS registrar
+ fileStore *storage.FileStore // distributed preimage archive, the local API to the storage with document level storage/retrieval support
+ streamer *stream.Registry
+ bzz *network.Bzz // the logistic manager
+ backend chequebook.Backend // simple blockchain Backend
+ privateKey *ecdsa.PrivateKey
+ corsString string
+ swapEnabled bool
+ netStore *storage.NetStore
+ sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
+ ps *pss.Pss
+ swap *swap.Swap
+ stateStore *state.DBStore
+ accountingMetrics *protocols.AccountingMetrics
tracerClose io.Closer
}
@@ -132,7 +136,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
LightNode: config.LightNodeEnabled,
}
- stateStore, err := state.NewDBStore(filepath.Join(config.Path, "state-store.db"))
+ self.stateStore, err = state.NewDBStore(filepath.Join(config.Path, "state-store.db"))
if err != nil {
return
}
@@ -171,6 +175,15 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
delivery := stream.NewDelivery(to, self.netStore)
self.netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, config.DeliverySkipCheck).New
+ if config.SwapEnabled {
+ balancesStore, err := state.NewDBStore(filepath.Join(config.Path, "balances.db"))
+ if err != nil {
+ return nil, err
+ }
+ self.swap = swap.New(balancesStore)
+ self.accountingMetrics = protocols.SetupAccountingMetrics(10*time.Second, filepath.Join(config.Path, "metrics.db"))
+ }
+
var nodeID enode.ID
if err := nodeID.UnmarshalText([]byte(config.NodeID)); err != nil {
return nil, err
@@ -193,7 +206,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
SyncUpdateDelay: config.SyncUpdateDelay,
MaxPeerServers: config.MaxStreamPeerServers,
}
- self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, stateStore, registryOptions)
+ self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, self.stateStore, registryOptions, self.swap)
// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams)
@@ -216,7 +229,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
log.Debug("Setup local storage")
- self.bzz = network.NewBzz(bzzconfig, to, stateStore, stream.Spec, self.streamer.Run)
+ self.bzz = network.NewBzz(bzzconfig, to, self.stateStore, self.streamer.GetSpec(), self.streamer.Run)
// Pss = postal service over swarm (devp2p over bzz)
self.ps, err = pss.NewPss(to, config.Pss)
@@ -353,7 +366,9 @@ func (self *Swarm) Start(srv *p2p.Server) error {
newaddr := self.bzz.UpdateLocalAddr([]byte(srv.Self().String()))
log.Info("Updated bzz local addr", "oaddr", fmt.Sprintf("%x", newaddr.OAddr), "uaddr", fmt.Sprintf("%s", newaddr.UAddr))
// set chequebook
- if self.config.SwapEnabled {
+ //TODO: Currently if swap is enabled and no chequebook (or inexistent) contract is provided, the node would crash.
+ //Once we integrate back the contracts, this check MUST be revisited
+ if self.config.SwapEnabled && self.config.SwapAPI != "" {
ctx := context.Background() // The initial setup has no deadline.
err := self.SetChequebook(ctx)
if err != nil {
@@ -434,14 +449,24 @@ func (self *Swarm) Stop() error {
ch.Stop()
ch.Save()
}
-
+ if self.swap != nil {
+ self.swap.Close()
+ }
+ if self.accountingMetrics != nil {
+ self.accountingMetrics.Close()
+ }
if self.netStore != nil {
self.netStore.Close()
}
self.sfs.Stop()
stopCounter.Inc(1)
self.streamer.Stop()
- return self.bzz.Stop()
+
+ err := self.bzz.Stop()
+ if self.stateStore != nil {
+ self.stateStore.Close()
+ }
+ return err
}
// implements the node.Service interface
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go b/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go
index 0f5a453d..c3481090 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 0 // Major version component of the current release
VersionMinor = 3 // Minor version component of the current release
- VersionPatch = 6 // Patch version component of the current release
+ VersionPatch = 8 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go b/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go
index 12cba324..9fa69bf4 100644
--- a/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go
+++ b/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go
@@ -118,7 +118,7 @@ func (t *BlockTest) Run() error {
} else {
engine = ethash.NewShared()
}
- chain, err := core.NewBlockChain(db, nil, config, engine, vm.Config{}, nil)
+ chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieCleanLimit: 0}, config, engine, vm.Config{}, nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/tests/init.go b/vendor/github.com/ethereum/go-ethereum/tests/init.go
index f0a4943c..db0457b6 100644
--- a/vendor/github.com/ethereum/go-ethereum/tests/init.go
+++ b/vendor/github.com/ethereum/go-ethereum/tests/init.go
@@ -86,6 +86,15 @@ var Forks = map[string]*params.ChainConfig{
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(5),
},
+ "ByzantiumToConstantinopleAt5": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(5),
+ },
}
// UnsupportedForkError is returned when a test requests a fork that isn't implemented.
diff --git a/vendor/github.com/ethereum/go-ethereum/tests/state_test.go b/vendor/github.com/ethereum/go-ethereum/tests/state_test.go
index ad77e4f3..96440538 100644
--- a/vendor/github.com/ethereum/go-ethereum/tests/state_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/tests/state_test.go
@@ -18,10 +18,12 @@ package tests
import (
"bytes"
+ "flag"
"fmt"
"reflect"
"testing"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/core/vm"
)
@@ -65,8 +67,17 @@ func TestState(t *testing.T) {
// Transactions with gasLimit above this value will not get a VM trace on failure.
const traceErrorLimit = 400000
+// The VM config for state tests that accepts --vm.* command line arguments.
+var testVMConfig = func() vm.Config {
+ vmconfig := vm.Config{}
+ flag.StringVar(&vmconfig.EVMInterpreter, utils.EVMInterpreterFlag.Name, utils.EVMInterpreterFlag.Value, utils.EVMInterpreterFlag.Usage)
+ flag.StringVar(&vmconfig.EWASMInterpreter, utils.EWASMInterpreterFlag.Name, utils.EWASMInterpreterFlag.Value, utils.EWASMInterpreterFlag.Usage)
+ flag.Parse()
+ return vmconfig
+}()
+
func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) {
- err := test(vm.Config{})
+ err := test(testVMConfig)
if err == nil {
return
}
diff --git a/vendor/github.com/ethereum/go-ethereum/trie/database.go b/vendor/github.com/ethereum/go-ethereum/trie/database.go
index d0691b63..739a98ad 100644
--- a/vendor/github.com/ethereum/go-ethereum/trie/database.go
+++ b/vendor/github.com/ethereum/go-ethereum/trie/database.go
@@ -22,6 +22,7 @@ import (
"sync"
"time"
+ "github.com/allegro/bigcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
@@ -30,6 +31,11 @@ import (
)
var (
+ memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil)
+ memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil)
+ memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
+ memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
+
memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
@@ -64,9 +70,10 @@ type DatabaseReader interface {
type Database struct {
diskdb ethdb.Database // Persistent storage for matured trie nodes
- nodes map[common.Hash]*cachedNode // Data and references relationships of a node
- oldest common.Hash // Oldest tracked node, flush-list head
- newest common.Hash // Newest tracked node, flush-list tail
+ cleans *bigcache.BigCache // GC friendly memory cache of clean node RLPs
+ dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes
+ oldest common.Hash // Oldest tracked node, flush-list head
+ newest common.Hash // Newest tracked node, flush-list tail
preimages map[common.Hash][]byte // Preimages of nodes from the secure trie
seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys
@@ -79,7 +86,7 @@ type Database struct {
flushnodes uint64 // Nodes flushed since last commit
flushsize common.StorageSize // Data storage flushed since last commit
- nodesSize common.StorageSize // Storage size of the nodes cache (exc. flushlist)
+ dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. flushlist)
preimagesSize common.StorageSize // Storage size of the preimages cache
lock sync.RWMutex
@@ -134,7 +141,7 @@ type cachedNode struct {
node node // Cached collapsed trie node, or raw rlp data
size uint16 // Byte size of the useful cached data
- parents uint16 // Number of live nodes referencing this one
+ parents uint32 // Number of live nodes referencing this one
children map[common.Hash]uint16 // External children referenced by this node
flushPrev common.Hash // Previous node in the flush-list
@@ -262,11 +269,30 @@ func expandNode(hash hashNode, n node, cachegen uint16) node {
}
// NewDatabase creates a new trie database to store ephemeral trie content before
-// its written out to disk or garbage collected.
+// its written out to disk or garbage collected. No read cache is created, so all
+// data retrievals will hit the underlying disk database.
func NewDatabase(diskdb ethdb.Database) *Database {
+ return NewDatabaseWithCache(diskdb, 0)
+}
+
+// NewDatabaseWithCache creates a new trie database to store ephemeral trie content
+// before its written out to disk or garbage collected. It also acts as a read cache
+// for nodes loaded from disk.
+func NewDatabaseWithCache(diskdb ethdb.Database, cache int) *Database {
+ var cleans *bigcache.BigCache
+ if cache > 0 {
+ cleans, _ = bigcache.NewBigCache(bigcache.Config{
+ Shards: 1024,
+ LifeWindow: time.Hour,
+ MaxEntriesInWindow: cache * 1024,
+ MaxEntrySize: 512,
+ HardMaxCacheSize: cache,
+ })
+ }
return &Database{
diskdb: diskdb,
- nodes: map[common.Hash]*cachedNode{{}: {}},
+ cleans: cleans,
+ dirties: map[common.Hash]*cachedNode{{}: {}},
preimages: make(map[common.Hash][]byte),
}
}
@@ -293,7 +319,7 @@ func (db *Database) InsertBlob(hash common.Hash, blob []byte) {
// size tracking.
func (db *Database) insert(hash common.Hash, blob []byte, node node) {
// If the node's already cached, skip
- if _, ok := db.nodes[hash]; ok {
+ if _, ok := db.dirties[hash]; ok {
return
}
// Create the cached entry for this node
@@ -303,19 +329,19 @@ func (db *Database) insert(hash common.Hash, blob []byte, node node) {
flushPrev: db.newest,
}
for _, child := range entry.childs() {
- if c := db.nodes[child]; c != nil {
+ if c := db.dirties[child]; c != nil {
c.parents++
}
}
- db.nodes[hash] = entry
+ db.dirties[hash] = entry
// Update the flush-list endpoints
if db.oldest == (common.Hash{}) {
db.oldest, db.newest = hash, hash
} else {
- db.nodes[db.newest].flushNext, db.newest = hash, hash
+ db.dirties[db.newest].flushNext, db.newest = hash, hash
}
- db.nodesSize += common.StorageSize(common.HashLength + entry.size)
+ db.dirtiesSize += common.StorageSize(common.HashLength + entry.size)
}
// insertPreimage writes a new trie node pre-image to the memory database if it's
@@ -333,35 +359,64 @@ func (db *Database) insertPreimage(hash common.Hash, preimage []byte) {
// node retrieves a cached trie node from memory, or returns nil if none can be
// found in the memory cache.
func (db *Database) node(hash common.Hash, cachegen uint16) node {
- // Retrieve the node from cache if available
+ // Retrieve the node from the clean cache if available
+ if db.cleans != nil {
+ if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil {
+ memcacheCleanHitMeter.Mark(1)
+ memcacheCleanReadMeter.Mark(int64(len(enc)))
+ return mustDecodeNode(hash[:], enc, cachegen)
+ }
+ }
+ // Retrieve the node from the dirty cache if available
db.lock.RLock()
- node := db.nodes[hash]
+ dirty := db.dirties[hash]
db.lock.RUnlock()
- if node != nil {
- return node.obj(hash, cachegen)
+ if dirty != nil {
+ return dirty.obj(hash, cachegen)
}
// Content unavailable in memory, attempt to retrieve from disk
enc, err := db.diskdb.Get(hash[:])
if err != nil || enc == nil {
return nil
}
+ if db.cleans != nil {
+ db.cleans.Set(string(hash[:]), enc)
+ memcacheCleanMissMeter.Mark(1)
+ memcacheCleanWriteMeter.Mark(int64(len(enc)))
+ }
return mustDecodeNode(hash[:], enc, cachegen)
}
// Node retrieves an encoded cached trie node from memory. If it cannot be found
// cached, the method queries the persistent database for the content.
func (db *Database) Node(hash common.Hash) ([]byte, error) {
- // Retrieve the node from cache if available
+ // Retrieve the node from the clean cache if available
+ if db.cleans != nil {
+ if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil {
+ memcacheCleanHitMeter.Mark(1)
+ memcacheCleanReadMeter.Mark(int64(len(enc)))
+ return enc, nil
+ }
+ }
+ // Retrieve the node from the dirty cache if available
db.lock.RLock()
- node := db.nodes[hash]
+ dirty := db.dirties[hash]
db.lock.RUnlock()
- if node != nil {
- return node.rlp(), nil
+ if dirty != nil {
+ return dirty.rlp(), nil
}
// Content unavailable in memory, attempt to retrieve from disk
- return db.diskdb.Get(hash[:])
+ enc, err := db.diskdb.Get(hash[:])
+ if err == nil && enc != nil {
+ if db.cleans != nil {
+ db.cleans.Set(string(hash[:]), enc)
+ memcacheCleanMissMeter.Mark(1)
+ memcacheCleanWriteMeter.Mark(int64(len(enc)))
+ }
+ }
+ return enc, err
}
// preimage retrieves a cached trie node pre-image from memory. If it cannot be
@@ -395,8 +450,8 @@ func (db *Database) Nodes() []common.Hash {
db.lock.RLock()
defer db.lock.RUnlock()
- var hashes = make([]common.Hash, 0, len(db.nodes))
- for hash := range db.nodes {
+ var hashes = make([]common.Hash, 0, len(db.dirties))
+ for hash := range db.dirties {
if hash != (common.Hash{}) { // Special case for "root" references/nodes
hashes = append(hashes, hash)
}
@@ -415,18 +470,18 @@ func (db *Database) Reference(child common.Hash, parent common.Hash) {
// reference is the private locked version of Reference.
func (db *Database) reference(child common.Hash, parent common.Hash) {
// If the node does not exist, it's a node pulled from disk, skip
- node, ok := db.nodes[child]
+ node, ok := db.dirties[child]
if !ok {
return
}
// If the reference already exists, only duplicate for roots
- if db.nodes[parent].children == nil {
- db.nodes[parent].children = make(map[common.Hash]uint16)
- } else if _, ok = db.nodes[parent].children[child]; ok && parent != (common.Hash{}) {
+ if db.dirties[parent].children == nil {
+ db.dirties[parent].children = make(map[common.Hash]uint16)
+ } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) {
return
}
node.parents++
- db.nodes[parent].children[child]++
+ db.dirties[parent].children[child]++
}
// Dereference removes an existing reference from a root node.
@@ -439,25 +494,25 @@ func (db *Database) Dereference(root common.Hash) {
db.lock.Lock()
defer db.lock.Unlock()
- nodes, storage, start := len(db.nodes), db.nodesSize, time.Now()
+ nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
db.dereference(root, common.Hash{})
- db.gcnodes += uint64(nodes - len(db.nodes))
- db.gcsize += storage - db.nodesSize
+ db.gcnodes += uint64(nodes - len(db.dirties))
+ db.gcsize += storage - db.dirtiesSize
db.gctime += time.Since(start)
memcacheGCTimeTimer.Update(time.Since(start))
- memcacheGCSizeMeter.Mark(int64(storage - db.nodesSize))
- memcacheGCNodesMeter.Mark(int64(nodes - len(db.nodes)))
+ memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
- log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.nodes), "size", storage-db.nodesSize, "time", time.Since(start),
- "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.nodes), "livesize", db.nodesSize)
+ log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
+ "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
}
// dereference is the private locked version of Dereference.
func (db *Database) dereference(child common.Hash, parent common.Hash) {
// Dereference the parent-child
- node := db.nodes[parent]
+ node := db.dirties[parent]
if node.children != nil && node.children[child] > 0 {
node.children[child]--
@@ -466,7 +521,7 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) {
}
}
// If the child does not exist, it's a previously committed node.
- node, ok := db.nodes[child]
+ node, ok := db.dirties[child]
if !ok {
return
}
@@ -483,20 +538,20 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) {
switch child {
case db.oldest:
db.oldest = node.flushNext
- db.nodes[node.flushNext].flushPrev = common.Hash{}
+ db.dirties[node.flushNext].flushPrev = common.Hash{}
case db.newest:
db.newest = node.flushPrev
- db.nodes[node.flushPrev].flushNext = common.Hash{}
+ db.dirties[node.flushPrev].flushNext = common.Hash{}
default:
- db.nodes[node.flushPrev].flushNext = node.flushNext
- db.nodes[node.flushNext].flushPrev = node.flushPrev
+ db.dirties[node.flushPrev].flushNext = node.flushNext
+ db.dirties[node.flushNext].flushPrev = node.flushPrev
}
// Dereference all children and delete the node
for _, hash := range node.childs() {
db.dereference(hash, child)
}
- delete(db.nodes, child)
- db.nodesSize -= common.StorageSize(common.HashLength + int(node.size))
+ delete(db.dirties, child)
+ db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
}
}
@@ -509,13 +564,13 @@ func (db *Database) Cap(limit common.StorageSize) error {
// by only uncaching existing data when the database write finalizes.
db.lock.RLock()
- nodes, storage, start := len(db.nodes), db.nodesSize, time.Now()
+ nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
batch := db.diskdb.NewBatch()
- // db.nodesSize only contains the useful data in the cache, but when reporting
+ // db.dirtiesSize only contains the useful data in the cache, but when reporting
// the total memory consumption, the maintenance metadata is also needed to be
// counted. For every useful node, we track 2 extra hashes as the flushlist.
- size := db.nodesSize + common.StorageSize((len(db.nodes)-1)*2*common.HashLength)
+ size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*2*common.HashLength)
// If the preimage cache got large enough, push to disk. If it's still small
// leave for later to deduplicate writes.
@@ -540,7 +595,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
oldest := db.oldest
for size > limit && oldest != (common.Hash{}) {
// Fetch the oldest referenced node and push into the batch
- node := db.nodes[oldest]
+ node := db.dirties[oldest]
if err := batch.Put(oldest[:], node.rlp()); err != nil {
db.lock.RUnlock()
return err
@@ -578,25 +633,25 @@ func (db *Database) Cap(limit common.StorageSize) error {
db.preimagesSize = 0
}
for db.oldest != oldest {
- node := db.nodes[db.oldest]
- delete(db.nodes, db.oldest)
+ node := db.dirties[db.oldest]
+ delete(db.dirties, db.oldest)
db.oldest = node.flushNext
- db.nodesSize -= common.StorageSize(common.HashLength + int(node.size))
+ db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
}
if db.oldest != (common.Hash{}) {
- db.nodes[db.oldest].flushPrev = common.Hash{}
+ db.dirties[db.oldest].flushPrev = common.Hash{}
}
- db.flushnodes += uint64(nodes - len(db.nodes))
- db.flushsize += storage - db.nodesSize
+ db.flushnodes += uint64(nodes - len(db.dirties))
+ db.flushsize += storage - db.dirtiesSize
db.flushtime += time.Since(start)
memcacheFlushTimeTimer.Update(time.Since(start))
- memcacheFlushSizeMeter.Mark(int64(storage - db.nodesSize))
- memcacheFlushNodesMeter.Mark(int64(nodes - len(db.nodes)))
+ memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
- log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.nodes), "size", storage-db.nodesSize, "time", time.Since(start),
- "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.nodes), "livesize", db.nodesSize)
+ log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
+ "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
return nil
}
@@ -630,7 +685,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
}
}
// Move the trie itself into the batch, flushing if enough data is accumulated
- nodes, storage := len(db.nodes), db.nodesSize
+ nodes, storage := len(db.dirties), db.dirtiesSize
if err := db.commit(node, batch); err != nil {
log.Error("Failed to commit trie from trie database", "err", err)
db.lock.RUnlock()
@@ -654,15 +709,15 @@ func (db *Database) Commit(node common.Hash, report bool) error {
db.uncache(node)
memcacheCommitTimeTimer.Update(time.Since(start))
- memcacheCommitSizeMeter.Mark(int64(storage - db.nodesSize))
- memcacheCommitNodesMeter.Mark(int64(nodes - len(db.nodes)))
+ memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
logger := log.Info
if !report {
logger = log.Debug
}
- logger("Persisted trie from memory database", "nodes", nodes-len(db.nodes)+int(db.flushnodes), "size", storage-db.nodesSize+db.flushsize, "time", time.Since(start)+db.flushtime,
- "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.nodes), "livesize", db.nodesSize)
+ logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime,
+ "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
// Reset the garbage collection statistics
db.gcnodes, db.gcsize, db.gctime = 0, 0, 0
@@ -674,7 +729,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
// commit is the private locked version of Commit.
func (db *Database) commit(hash common.Hash, batch ethdb.Batch) error {
// If the node does not exist, it's a previously committed node
- node, ok := db.nodes[hash]
+ node, ok := db.dirties[hash]
if !ok {
return nil
}
@@ -702,7 +757,7 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch) error {
// to disk.
func (db *Database) uncache(hash common.Hash) {
// If the node does not exist, we're done on this path
- node, ok := db.nodes[hash]
+ node, ok := db.dirties[hash]
if !ok {
return
}
@@ -710,20 +765,20 @@ func (db *Database) uncache(hash common.Hash) {
switch hash {
case db.oldest:
db.oldest = node.flushNext
- db.nodes[node.flushNext].flushPrev = common.Hash{}
+ db.dirties[node.flushNext].flushPrev = common.Hash{}
case db.newest:
db.newest = node.flushPrev
- db.nodes[node.flushPrev].flushNext = common.Hash{}
+ db.dirties[node.flushPrev].flushNext = common.Hash{}
default:
- db.nodes[node.flushPrev].flushNext = node.flushNext
- db.nodes[node.flushNext].flushPrev = node.flushPrev
+ db.dirties[node.flushPrev].flushNext = node.flushNext
+ db.dirties[node.flushNext].flushPrev = node.flushPrev
}
// Uncache the node's subtries and remove the node itself too
for _, child := range node.childs() {
db.uncache(child)
}
- delete(db.nodes, hash)
- db.nodesSize -= common.StorageSize(common.HashLength + int(node.size))
+ delete(db.dirties, hash)
+ db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
}
// Size returns the current storage size of the memory cache in front of the
@@ -732,11 +787,11 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) {
db.lock.RLock()
defer db.lock.RUnlock()
- // db.nodesSize only contains the useful data in the cache, but when reporting
+ // db.dirtiesSize only contains the useful data in the cache, but when reporting
// the total memory consumption, the maintenance metadata is also needed to be
// counted. For every useful node, we track 2 extra hashes as the flushlist.
- var flushlistSize = common.StorageSize((len(db.nodes) - 1) * 2 * common.HashLength)
- return db.nodesSize + flushlistSize, db.preimagesSize
+ var flushlistSize = common.StorageSize((len(db.dirties) - 1) * 2 * common.HashLength)
+ return db.dirtiesSize + flushlistSize, db.preimagesSize
}
// verifyIntegrity is a debug method to iterate over the entire trie stored in
@@ -749,12 +804,12 @@ func (db *Database) verifyIntegrity() {
// Iterate over all the cached nodes and accumulate them into a set
reachable := map[common.Hash]struct{}{{}: {}}
- for child := range db.nodes[common.Hash{}].children {
+ for child := range db.dirties[common.Hash{}].children {
db.accumulate(child, reachable)
}
// Find any unreachable but cached nodes
unreachable := []string{}
- for hash, node := range db.nodes {
+ for hash, node := range db.dirties {
if _, ok := reachable[hash]; !ok {
unreachable = append(unreachable, fmt.Sprintf("%x: {Node: %v, Parents: %d, Prev: %x, Next: %x}",
hash, node.node, node.parents, node.flushPrev, node.flushNext))
@@ -769,7 +824,7 @@ func (db *Database) verifyIntegrity() {
// cached children found in memory.
func (db *Database) accumulate(hash common.Hash, reachable map[common.Hash]struct{}) {
// Mark the node reachable if present in the memory cache
- node, ok := db.nodes[hash]
+ node, ok := db.dirties[hash]
if !ok {
return
}
diff --git a/vendor/github.com/ethereum/go-ethereum/trie/iterator.go b/vendor/github.com/ethereum/go-ethereum/trie/iterator.go
index 00b890eb..77f16816 100644
--- a/vendor/github.com/ethereum/go-ethereum/trie/iterator.go
+++ b/vendor/github.com/ethereum/go-ethereum/trie/iterator.go
@@ -181,6 +181,8 @@ func (it *nodeIterator) LeafProof() [][]byte {
if len(it.stack) > 0 {
if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
hasher := newHasher(0, 0, nil)
+ defer returnHasherToPool(hasher)
+
proofs := make([][]byte, 0, len(it.stack))
for i, item := range it.stack[:len(it.stack)-1] {
diff --git a/vendor/github.com/ethereum/go-ethereum/trie/iterator_test.go b/vendor/github.com/ethereum/go-ethereum/trie/iterator_test.go
index 2a510b1c..4f633b19 100644
--- a/vendor/github.com/ethereum/go-ethereum/trie/iterator_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/trie/iterator_test.go
@@ -113,7 +113,7 @@ func TestNodeIteratorCoverage(t *testing.T) {
t.Errorf("failed to retrieve reported node %x: %v", hash, err)
}
}
- for hash, obj := range db.nodes {
+ for hash, obj := range db.dirties {
if obj != nil && hash != (common.Hash{}) {
if _, ok := hashes[hash]; !ok {
t.Errorf("state entry not reported %x", hash)
@@ -333,8 +333,8 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
}
}
if memonly {
- robj = triedb.nodes[rkey]
- delete(triedb.nodes, rkey)
+ robj = triedb.dirties[rkey]
+ delete(triedb.dirties, rkey)
} else {
rval, _ = diskdb.Get(rkey[:])
diskdb.Delete(rkey[:])
@@ -350,7 +350,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
// Add the node back and continue iteration.
if memonly {
- triedb.nodes[rkey] = robj
+ triedb.dirties[rkey] = robj
} else {
diskdb.Put(rkey[:], rval)
}
@@ -393,8 +393,8 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
barNodeObj *cachedNode
)
if memonly {
- barNodeObj = triedb.nodes[barNodeHash]
- delete(triedb.nodes, barNodeHash)
+ barNodeObj = triedb.dirties[barNodeHash]
+ delete(triedb.dirties, barNodeHash)
} else {
barNodeBlob, _ = diskdb.Get(barNodeHash[:])
diskdb.Delete(barNodeHash[:])
@@ -411,7 +411,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
}
// Reinsert the missing node.
if memonly {
- triedb.nodes[barNodeHash] = barNodeObj
+ triedb.dirties[barNodeHash] = barNodeObj
} else {
diskdb.Put(barNodeHash[:], barNodeBlob)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/trie/proof.go b/vendor/github.com/ethereum/go-ethereum/trie/proof.go
index 6cb8f4d5..f90ecd7d 100644
--- a/vendor/github.com/ethereum/go-ethereum/trie/proof.go
+++ b/vendor/github.com/ethereum/go-ethereum/trie/proof.go
@@ -66,6 +66,8 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.Putter) error {
}
}
hasher := newHasher(0, 0, nil)
+ defer returnHasherToPool(hasher)
+
for i, n := range nodes {
// Don't bother checking for errors here since hasher panics
// if encoding doesn't work and we're not writing to any database.
diff --git a/vendor/github.com/ethereum/go-ethereum/trie/trie_test.go b/vendor/github.com/ethereum/go-ethereum/trie/trie_test.go
index f8e5fd12..f9d6029c 100644
--- a/vendor/github.com/ethereum/go-ethereum/trie/trie_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/trie/trie_test.go
@@ -119,7 +119,7 @@ func testMissingNode(t *testing.T, memonly bool) {
hash := common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9")
if memonly {
- delete(triedb.nodes, hash)
+ delete(triedb.dirties, hash)
} else {
diskdb.Delete(hash[:])
}
@@ -342,15 +342,16 @@ func TestCacheUnload(t *testing.T) {
// Commit the trie repeatedly and access key1.
// The branch containing it is loaded from DB exactly two times:
// in the 0th and 6th iteration.
- db := &countingDB{Database: trie.db.diskdb, gets: make(map[string]int)}
- trie, _ = New(root, NewDatabase(db))
+ diskdb := &countingDB{Database: trie.db.diskdb, gets: make(map[string]int)}
+ triedb := NewDatabase(diskdb)
+ trie, _ = New(root, triedb)
trie.SetCacheLimit(5)
for i := 0; i < 12; i++ {
getString(trie, key1)
trie.Commit(nil)
}
// Check that it got loaded two times.
- for dbkey, count := range db.gets {
+ for dbkey, count := range diskdb.gets {
if count != 2 {
t.Errorf("db key %x loaded %d times, want %d times", []byte(dbkey), count, 2)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go b/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go
index af9418d9..d7af4baa 100644
--- a/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go
+++ b/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+// Package mailserver provides a naive, example mailserver implementation
package mailserver
import (
@@ -26,9 +27,11 @@ import (
"github.com/ethereum/go-ethereum/rlp"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
"github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
+// WMailServer represents the state data of the mailserver.
type WMailServer struct {
db *leveldb.DB
w *whisper.Whisper
@@ -42,6 +45,8 @@ type DBKey struct {
raw []byte
}
+// NewDbKey is a helper function that creates a levelDB
+// key from a hash and an integer.
func NewDbKey(t uint32, h common.Hash) *DBKey {
const sz = common.HashLength + 4
var k DBKey
@@ -53,6 +58,7 @@ func NewDbKey(t uint32, h common.Hash) *DBKey {
return &k
}
+// Init initializes the mail server.
func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, pow float64) error {
var err error
if len(path) == 0 {
@@ -63,7 +69,7 @@ func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, p
return fmt.Errorf("password is not specified")
}
- s.db, err = leveldb.OpenFile(path, nil)
+ s.db, err = leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 32})
if err != nil {
return fmt.Errorf("open DB file: %s", err)
}
@@ -82,12 +88,14 @@ func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, p
return nil
}
+// Close cleans up before shutdown.
func (s *WMailServer) Close() {
if s.db != nil {
s.db.Close()
}
}
+// Archive stores the
func (s *WMailServer) Archive(env *whisper.Envelope) {
key := NewDbKey(env.Expiry-env.TTL, env.Hash())
rawEnvelope, err := rlp.EncodeToBytes(env)
@@ -101,6 +109,8 @@ func (s *WMailServer) Archive(env *whisper.Envelope) {
}
}
+// DeliverMail responds with saved messages upon request by the
+// messages' owner.
func (s *WMailServer) DeliverMail(peer *whisper.Peer, request *whisper.Envelope) {
if peer == nil {
log.Error("Whisper peer is nil")
diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv5/peer_test.go b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv5/peer_test.go
index 35616aaa..24495320 100644
--- a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv5/peer_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv5/peer_test.go
@@ -139,7 +139,7 @@ func initialize(t *testing.T) {
err = node.server.Start()
if err != nil {
- t.Fatalf("failed to start server %d.", i)
+ t.Fatalf("failed to start server %d. err: %v", i, err)
}
for j := 0; j < i; j++ {
diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/api_test.go b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/api_test.go
index cdbc7fab..6d7157f5 100644
--- a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/api_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/api_test.go
@@ -18,27 +18,12 @@ package whisperv6
import (
"bytes"
- "crypto/ecdsa"
"testing"
"time"
-
- mapset "github.com/deckarep/golang-set"
- "github.com/ethereum/go-ethereum/common"
)
func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) {
- w := &Whisper{
- privateKeys: make(map[string]*ecdsa.PrivateKey),
- symKeys: make(map[string][]byte),
- envelopes: make(map[common.Hash]*Envelope),
- expirations: make(map[uint32]mapset.Set),
- peers: make(map[*Peer]struct{}),
- messageQueue: make(chan *Envelope, messageQueueLimit),
- p2pMsgQueue: make(chan *Envelope, messageQueueLimit),
- quit: make(chan struct{}),
- syncAllowance: DefaultSyncAllowance,
- }
- w.filters = NewFilters(w)
+ w := New(nil)
keyID, err := w.GenerateSymKey()
if err != nil {
diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/peer_test.go b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/peer_test.go
index 65e62d96..c5b044e1 100644
--- a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/peer_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/peer_test.go
@@ -232,7 +232,7 @@ func initialize(t *testing.T) {
func startServer(t *testing.T, s *p2p.Server) {
err := s.Start()
if err != nil {
- t.Fatalf("failed to start the fisrt server.")
+ t.Fatalf("failed to start the first server. err: %v", err)
}
atomic.AddInt64(&result.started, 1)