VDB-327 Constantinople prep (#135)

* Bump geth to 1.8.20 for Constantinople

* Fix conflicting import/toml source for logrus
This commit is contained in:
Edvard Hübinette 2019-01-14 11:31:28 +01:00 committed by GitHub
parent 5efd683c54
commit d41209d293
235 changed files with 11115 additions and 7200 deletions

18
Gopkg.lock generated
View File

@ -1,6 +1,17 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:48a213e9dc4880bbbd6999309a476fa4d3cc67560aa7127154cf8ea95bd464c2"
name = "github.com/allegro/bigcache"
packages = [
".",
"queue",
]
pruneopts = ""
revision = "f31987a23e44c5121ef8c8b2f2ea2e8ffa37b068"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:a313376bcbcce8ae8bddb8089a7293e0473a0f8e9e3710d6244e09e81875ccf0"
@ -26,7 +37,7 @@
version = "v1.7.1"
[[projects]]
digest = "1:c205f1963071408c1fac73c1b37c86ef9b98d80f17e690a2239853cde255ad3d"
digest = "1:a9c8210eb5d36a9a6e66953dc3d3cabd3afbbfb4f50baab0db1af1b723254b82"
name = "github.com/ethereum/go-ethereum"
packages = [
".",
@ -64,8 +75,8 @@
"trie",
]
pruneopts = ""
revision = "58632d44021bf095b43a1bb2443e6e3690a94739"
version = "v1.8.18"
revision = "24d727b6d6e2c0cde222fa12155c4a6db5caaf2e"
version = "v1.8.20"
[[projects]]
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
@ -537,6 +548,7 @@
"github.com/ethereum/go-ethereum/crypto",
"github.com/ethereum/go-ethereum/ethclient",
"github.com/ethereum/go-ethereum/ethdb",
"github.com/ethereum/go-ethereum/log",
"github.com/ethereum/go-ethereum/p2p",
"github.com/ethereum/go-ethereum/p2p/discv5",
"github.com/ethereum/go-ethereum/params",

View File

@ -42,7 +42,7 @@
name = "github.com/lib/pq"
[[constraint]]
name = "gopkg.in/Sirupsen/logrus.v1"
name = "github.com/sirupsen/logrus"
version = "1.2.0"
[[constraint]]
@ -51,4 +51,4 @@
[[constraint]]
name = "github.com/ethereum/go-ethereum"
version = "1.8.18"
version = "1.8.20"

5
vendor/github.com/allegro/bigcache/.gitignore generated vendored Normal file
View File

@ -0,0 +1,5 @@
.idea
.DS_Store
/server/server.exe
/server/server
CHANGELOG.md

31
vendor/github.com/allegro/bigcache/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,31 @@
language: go
go:
- 1.x
- tip
matrix:
allow_failures:
- go: tip
fast_finish: true
before_install:
- go get github.com/modocache/gover
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
- go get golang.org/x/tools/cmd/goimports
- go get github.com/golang/lint/golint
- go get github.com/stretchr/testify/assert
- go get github.com/gordonklaus/ineffassign
script:
- gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
- diff <(echo -n) <(gofmt -s -d .)
- golint ./... # This won't break the build, just show warnings
- ineffassign .
- go vet ./...
- go test -race -count=1 -coverprofile=queue.coverprofile ./queue
- go test -race -count=1 -coverprofile=server.coverprofile ./server
- go test -race -count=1 -coverprofile=main.coverprofile
- $HOME/gopath/bin/gover
- $HOME/gopath/bin/goveralls -coverprofile=gover.coverprofile -service travis-ci

201
vendor/github.com/allegro/bigcache/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

145
vendor/github.com/allegro/bigcache/README.md generated vendored Normal file
View File

@ -0,0 +1,145 @@
# BigCache [![Build Status](https://travis-ci.org/allegro/bigcache.svg?branch=master)](https://travis-ci.org/allegro/bigcache)&nbsp;[![Coverage Status](https://coveralls.io/repos/github/allegro/bigcache/badge.svg?branch=master)](https://coveralls.io/github/allegro/bigcache?branch=master)&nbsp;[![GoDoc](https://godoc.org/github.com/allegro/bigcache?status.svg)](https://godoc.org/github.com/allegro/bigcache)&nbsp;[![Go Report Card](https://goreportcard.com/badge/github.com/allegro/bigcache)](https://goreportcard.com/report/github.com/allegro/bigcache)
Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance.
BigCache keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
therefore entries (de)serialization in front of the cache will be needed in most use cases.
## Usage
### Simple initialization
```go
import "github.com/allegro/bigcache"
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
cache.Set("my-unique-key", []byte("value"))
entry, _ := cache.Get("my-unique-key")
fmt.Println(string(entry))
```
### Custom initialization
When cache load can be predicted in advance then it is better to use custom initialization because additional memory
allocation can be avoided in that way.
```go
import (
"log"
"github.com/allegro/bigcache"
)
config := bigcache.Config {
// number of shards (must be a power of 2)
Shards: 1024,
// time after which entry can be evicted
LifeWindow: 10 * time.Minute,
// rps * lifeWindow, used only in initial memory allocation
MaxEntriesInWindow: 1000 * 10 * 60,
// max entry size in bytes, used only in initial memory allocation
MaxEntrySize: 500,
// prints information about additional memory allocation
Verbose: true,
// cache will not allocate more memory than this limit, value in MB
// if value is reached then the oldest entries can be overridden for the new ones
// 0 value means no size limit
HardMaxCacheSize: 8192,
// callback fired when the oldest entry is removed because of its
// expiration time or no space left for the new entry. Default value is nil which
// means no callback and it prevents from unwrapping the oldest entry.
OnRemove: nil,
}
cache, initErr := bigcache.NewBigCache(config)
if initErr != nil {
log.Fatal(initErr)
}
cache.Set("my-unique-key", []byte("value"))
if entry, err := cache.Get("my-unique-key"); err == nil {
fmt.Println(string(entry))
}
```
## Benchmarks
Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map.
Benchmark tests were made using an i7-6700K with 32GB of RAM on Windows 10.
### Writes and reads
```bash
cd caches_bench; go test -bench=. -benchtime=10s ./... -timeout 30m
BenchmarkMapSet-8 2000000 716 ns/op 336 B/op 3 allocs/op
BenchmarkConcurrentMapSet-8 1000000 1292 ns/op 347 B/op 8 allocs/op
BenchmarkFreeCacheSet-8 3000000 501 ns/op 371 B/op 3 allocs/op
BenchmarkBigCacheSet-8 3000000 482 ns/op 303 B/op 2 allocs/op
BenchmarkMapGet-8 5000000 309 ns/op 24 B/op 1 allocs/op
BenchmarkConcurrentMapGet-8 2000000 659 ns/op 24 B/op 2 allocs/op
BenchmarkFreeCacheGet-8 3000000 541 ns/op 152 B/op 3 allocs/op
BenchmarkBigCacheGet-8 3000000 420 ns/op 152 B/op 3 allocs/op
BenchmarkBigCacheSetParallel-8 10000000 184 ns/op 313 B/op 3 allocs/op
BenchmarkFreeCacheSetParallel-8 10000000 195 ns/op 357 B/op 4 allocs/op
BenchmarkConcurrentMapSetParallel-8 5000000 242 ns/op 200 B/op 6 allocs/op
BenchmarkBigCacheGetParallel-8 20000000 100 ns/op 152 B/op 4 allocs/op
BenchmarkFreeCacheGetParallel-8 10000000 133 ns/op 152 B/op 4 allocs/op
BenchmarkConcurrentMapGetParallel-8 10000000 202 ns/op 24 B/op 2 allocs/op
```
Writes and reads in bigcache are faster than in freecache.
Writes to map are the slowest.
### GC pause time
```bash
cd caches_bench; go run caches_gc_overhead_comparison.go
Number of entries: 20000000
GC pause for bigcache: 5.8658ms
GC pause for freecache: 32.4341ms
GC pause for map: 52.9661ms
```
Test shows how long are the GC pauses for caches filled with 20mln of entries.
Bigcache and freecache have very similar GC pause time.
It is clear that both reduce GC overhead in contrast to map
which GC pause time took more than 10 seconds.
## How it works
BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)).
This optimization states that if map without pointers in keys and values is used then GC will omit its content.
Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries.
Entries are kept in bytes array, to omit GC again.
Bytes array size can grow to gigabytes without impact on performance
because GC will only see single pointer to it.
## Bigcache vs Freecache
Both caches provide the same core features but they reduce GC overhead in different ways.
Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on
slices to reduce number of pointers.
Results from benchmark tests are presented above.
One of the advantage of bigcache over freecache is that you dont need to know
the size of the cache in advance, because when bigcache is full,
it can allocate additional memory for new entries instead of
overwriting existing ones as freecache does currently.
However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config).
## HTTP Server
This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package.
## More
Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html)
## License
BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE))

155
vendor/github.com/allegro/bigcache/bigcache.go generated vendored Normal file
View File

@ -0,0 +1,155 @@
package bigcache
import (
"fmt"
"time"
)
const (
minimumEntriesInShard = 10 // Minimum number of entries in single shard
)
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
// It keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
type BigCache struct {
shards []*cacheShard
lifeWindow uint64
clock clock
hash Hasher
config Config
shardMask uint64
maxShardSize uint32
}
// NewBigCache initialize new instance of BigCache
func NewBigCache(config Config) (*BigCache, error) {
return newBigCache(config, &systemClock{})
}
func newBigCache(config Config, clock clock) (*BigCache, error) {
if !isPowerOfTwo(config.Shards) {
return nil, fmt.Errorf("Shards number must be power of two")
}
if config.Hasher == nil {
config.Hasher = newDefaultHasher()
}
cache := &BigCache{
shards: make([]*cacheShard, config.Shards),
lifeWindow: uint64(config.LifeWindow.Seconds()),
clock: clock,
hash: config.Hasher,
config: config,
shardMask: uint64(config.Shards - 1),
maxShardSize: uint32(config.maximumShardSize()),
}
var onRemove func(wrappedEntry []byte)
if config.OnRemove == nil {
onRemove = cache.notProvidedOnRemove
} else {
onRemove = cache.providedOnRemove
}
for i := 0; i < config.Shards; i++ {
cache.shards[i] = initNewShard(config, onRemove, clock)
}
if config.CleanWindow > 0 {
go func() {
for t := range time.Tick(config.CleanWindow) {
cache.cleanUp(uint64(t.Unix()))
}
}()
}
return cache, nil
}
// Get reads entry for the key.
// It returns an EntryNotFoundError when
// no entry exists for the given key.
func (c *BigCache) Get(key string) ([]byte, error) {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.get(key, hashedKey)
}
// Set saves entry under the key
func (c *BigCache) Set(key string, entry []byte) error {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.set(key, hashedKey, entry)
}
// Delete removes the key
func (c *BigCache) Delete(key string) error {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.del(key, hashedKey)
}
// Reset empties all cache shards
func (c *BigCache) Reset() error {
for _, shard := range c.shards {
shard.reset(c.config)
}
return nil
}
// Len computes number of entries in cache
func (c *BigCache) Len() int {
var len int
for _, shard := range c.shards {
len += shard.len()
}
return len
}
// Stats returns cache's statistics
func (c *BigCache) Stats() Stats {
var s Stats
for _, shard := range c.shards {
tmp := shard.getStats()
s.Hits += tmp.Hits
s.Misses += tmp.Misses
s.DelHits += tmp.DelHits
s.DelMisses += tmp.DelMisses
s.Collisions += tmp.Collisions
}
return s
}
// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
func (c *BigCache) Iterator() *EntryInfoIterator {
return newIterator(c)
}
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
oldestTimestamp := readTimestampFromEntry(oldestEntry)
if currentTimestamp-oldestTimestamp > c.lifeWindow {
evict()
return true
}
return false
}
func (c *BigCache) cleanUp(currentTimestamp uint64) {
for _, shard := range c.shards {
shard.cleanUp(currentTimestamp)
}
}
func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
return c.shards[hashedKey&c.shardMask]
}
func (c *BigCache) providedOnRemove(wrappedEntry []byte) {
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
}
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte) {
}

View File

@ -0,0 +1,141 @@
package bigcache
import (
"fmt"
"math/rand"
"strconv"
"testing"
"time"
)
var message = blob('a', 256)
func BenchmarkWriteToCacheWith1Shard(b *testing.B) {
writeToCache(b, 1, 100*time.Second, b.N)
}
func BenchmarkWriteToLimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
m := blob('a', 1024)
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 100 * time.Second,
MaxEntriesInWindow: 100,
MaxEntrySize: 256,
HardMaxCacheSize: 1,
})
b.ReportAllocs()
for i := 0; i < b.N; i++ {
cache.Set(fmt.Sprintf("key-%d", i), m)
}
}
func BenchmarkWriteToUnlimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
m := blob('a', 1024)
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 100 * time.Second,
MaxEntriesInWindow: 100,
MaxEntrySize: 256,
})
b.ReportAllocs()
for i := 0; i < b.N; i++ {
cache.Set(fmt.Sprintf("key-%d", i), m)
}
}
func BenchmarkWriteToCache(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
writeToCache(b, shards, 100*time.Second, b.N)
})
}
}
func BenchmarkReadFromCache(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
readFromCache(b, 1024)
})
}
}
func BenchmarkIterateOverCache(b *testing.B) {
m := blob('a', 1)
for _, shards := range []int{512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
cache, _ := NewBigCache(Config{
Shards: shards,
LifeWindow: 1000 * time.Second,
MaxEntriesInWindow: max(b.N, 100),
MaxEntrySize: 500,
})
for i := 0; i < b.N; i++ {
cache.Set(fmt.Sprintf("key-%d", i), m)
}
b.ResetTimer()
it := cache.Iterator()
b.RunParallel(func(pb *testing.PB) {
b.ReportAllocs()
for pb.Next() {
if it.SetNext() {
it.Value()
}
}
})
})
}
}
func BenchmarkWriteToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) {
writeToCache(b, 1024, 100*time.Second, 100)
}
func writeToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) {
cache, _ := NewBigCache(Config{
Shards: shards,
LifeWindow: lifeWindow,
MaxEntriesInWindow: max(requestsInLifeWindow, 100),
MaxEntrySize: 500,
})
rand.Seed(time.Now().Unix())
b.RunParallel(func(pb *testing.PB) {
id := rand.Int()
counter := 0
b.ReportAllocs()
for pb.Next() {
cache.Set(fmt.Sprintf("key-%d-%d", id, counter), message)
counter = counter + 1
}
})
}
func readFromCache(b *testing.B, shards int) {
cache, _ := NewBigCache(Config{
Shards: shards,
LifeWindow: 1000 * time.Second,
MaxEntriesInWindow: max(b.N, 100),
MaxEntrySize: 500,
})
for i := 0; i < b.N; i++ {
cache.Set(strconv.Itoa(i), message)
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
b.ReportAllocs()
for pb.Next() {
cache.Get(strconv.Itoa(rand.Intn(b.N)))
}
})
}

579
vendor/github.com/allegro/bigcache/bigcache_test.go generated vendored Normal file
View File

@ -0,0 +1,579 @@
package bigcache
import (
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var sink []byte
func TestParallel(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(DefaultConfig(5 * time.Second))
value := []byte("value")
var wg sync.WaitGroup
wg.Add(3)
keys := 1337
// when
go func() {
defer wg.Done()
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), value)
}
}()
go func() {
defer wg.Done()
for i := 0; i < keys; i++ {
sink, _ = cache.Get(fmt.Sprintf("key%d", i))
}
}()
go func() {
defer wg.Done()
for i := 0; i < keys; i++ {
cache.Delete(fmt.Sprintf("key%d", i))
}
}()
// then
wg.Wait()
}
func TestWriteAndGetOnCache(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(DefaultConfig(5 * time.Second))
value := []byte("value")
// when
cache.Set("key", value)
cachedValue, err := cache.Get("key")
// then
assert.NoError(t, err)
assert.Equal(t, value, cachedValue)
}
func TestConstructCacheWithDefaultHasher(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 16,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
})
assert.IsType(t, fnv64a{}, cache.hash)
}
func TestWillReturnErrorOnInvalidNumberOfPartitions(t *testing.T) {
t.Parallel()
// given
cache, error := NewBigCache(Config{
Shards: 18,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
})
assert.Nil(t, cache)
assert.Error(t, error, "Shards number must be power of two")
}
func TestEntryNotFound(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 16,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
})
// when
_, err := cache.Get("nonExistingKey")
// then
assert.EqualError(t, err, "Entry \"nonExistingKey\" not found")
}
func TestTimingEviction(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key2", []byte("value2"))
_, err := cache.Get("key")
// then
assert.EqualError(t, err, "Entry \"key\" not found")
}
func TestTimingEvictionShouldEvictOnlyFromUpdatedShard(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(Config{
Shards: 4,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key2", []byte("value 2"))
value, err := cache.Get("key")
// then
assert.NoError(t, err, "Entry \"key\" not found")
assert.Equal(t, []byte("value"), value)
}
func TestCleanShouldEvictAll(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 4,
LifeWindow: time.Second,
CleanWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
// when
cache.Set("key", []byte("value"))
<-time.After(3 * time.Second)
value, err := cache.Get("key")
// then
assert.EqualError(t, err, "Entry \"key\" not found")
assert.Equal(t, value, []byte(nil))
}
func TestOnRemoveCallback(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
onRemoveInvoked := false
onRemove := func(key string, entry []byte) {
onRemoveInvoked = true
assert.Equal(t, "key", key)
assert.Equal(t, []byte("value"), entry)
}
cache, _ := newBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
OnRemove: onRemove,
}, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key2", []byte("value2"))
// then
assert.True(t, onRemoveInvoked)
}
func TestCacheLen(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
keys := 1337
// when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
// then
assert.Equal(t, keys, cache.Len())
}
func TestCacheStats(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
// when
for i := 0; i < 100; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
for i := 0; i < 10; i++ {
value, err := cache.Get(fmt.Sprintf("key%d", i))
assert.Nil(t, err)
assert.Equal(t, string(value), "value")
}
for i := 100; i < 110; i++ {
_, err := cache.Get(fmt.Sprintf("key%d", i))
assert.Error(t, err)
}
for i := 10; i < 20; i++ {
err := cache.Delete(fmt.Sprintf("key%d", i))
assert.Nil(t, err)
}
for i := 110; i < 120; i++ {
err := cache.Delete(fmt.Sprintf("key%d", i))
assert.Error(t, err)
}
// then
stats := cache.Stats()
assert.Equal(t, stats.Hits, int64(10))
assert.Equal(t, stats.Misses, int64(10))
assert.Equal(t, stats.DelHits, int64(10))
assert.Equal(t, stats.DelMisses, int64(10))
}
func TestCacheDel(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(DefaultConfig(time.Second))
// when
err := cache.Delete("nonExistingKey")
// then
assert.Equal(t, err.Error(), "Entry \"nonExistingKey\" not found")
// and when
cache.Set("existingKey", nil)
err = cache.Delete("existingKey")
cachedValue, _ := cache.Get("existingKey")
// then
assert.Nil(t, err)
assert.Len(t, cachedValue, 0)
}
func TestCacheReset(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
keys := 1337
// when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
// then
assert.Equal(t, keys, cache.Len())
// and when
cache.Reset()
// then
assert.Equal(t, 0, cache.Len())
// and when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
// then
assert.Equal(t, keys, cache.Len())
}
func TestIterateOnResetCache(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
keys := 1337
// when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
cache.Reset()
// then
iterator := cache.Iterator()
assert.Equal(t, false, iterator.SetNext())
}
func TestGetOnResetCache(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
keys := 1337
// when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
cache.Reset()
// then
value, err := cache.Get("key1")
assert.Equal(t, err.Error(), "Entry \"key1\" not found")
assert.Equal(t, value, []byte(nil))
}
func TestEntryUpdate(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(Config{
Shards: 1,
LifeWindow: 6 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key", []byte("value2"))
clock.set(7)
cache.Set("key2", []byte("value3"))
cachedValue, _ := cache.Get("key")
// then
assert.Equal(t, []byte("value2"), cachedValue)
}
func TestOldestEntryDeletionWhenMaxCacheSizeIsReached(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 1,
HardMaxCacheSize: 1,
})
// when
cache.Set("key1", blob('a', 1024*400))
cache.Set("key2", blob('b', 1024*400))
cache.Set("key3", blob('c', 1024*800))
_, key1Err := cache.Get("key1")
_, key2Err := cache.Get("key2")
entry3, _ := cache.Get("key3")
// then
assert.EqualError(t, key1Err, "Entry \"key1\" not found")
assert.EqualError(t, key2Err, "Entry \"key2\" not found")
assert.Equal(t, blob('c', 1024*800), entry3)
}
func TestRetrievingEntryShouldCopy(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 1,
HardMaxCacheSize: 1,
})
cache.Set("key1", blob('a', 1024*400))
value, key1Err := cache.Get("key1")
// when
// override queue
cache.Set("key2", blob('b', 1024*400))
cache.Set("key3", blob('c', 1024*400))
cache.Set("key4", blob('d', 1024*400))
cache.Set("key5", blob('d', 1024*400))
// then
assert.Nil(t, key1Err)
assert.Equal(t, blob('a', 1024*400), value)
}
func TestEntryBiggerThanMaxShardSizeError(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 1,
HardMaxCacheSize: 1,
})
// when
err := cache.Set("key1", blob('a', 1024*1025))
// then
assert.EqualError(t, err, "entry is bigger than max shard size")
}
func TestHashCollision(t *testing.T) {
t.Parallel()
ml := &mockedLogger{}
// given
cache, _ := NewBigCache(Config{
Shards: 16,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
Verbose: true,
Hasher: hashStub(5),
Logger: ml,
})
// when
cache.Set("liquid", []byte("value"))
cachedValue, err := cache.Get("liquid")
// then
assert.NoError(t, err)
assert.Equal(t, []byte("value"), cachedValue)
// when
cache.Set("costarring", []byte("value 2"))
cachedValue, err = cache.Get("costarring")
// then
assert.NoError(t, err)
assert.Equal(t, []byte("value 2"), cachedValue)
// when
cachedValue, err = cache.Get("liquid")
// then
assert.Error(t, err)
assert.Nil(t, cachedValue)
assert.NotEqual(t, "", ml.lastFormat)
assert.Equal(t, cache.Stats().Collisions, int64(1))
}
func TestNilValueCaching(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 1,
HardMaxCacheSize: 1,
})
// when
cache.Set("Kierkegaard", []byte{})
cachedValue, err := cache.Get("Kierkegaard")
// then
assert.NoError(t, err)
assert.Equal(t, []byte{}, cachedValue)
// when
cache.Set("Sartre", nil)
cachedValue, err = cache.Get("Sartre")
// then
assert.NoError(t, err)
assert.Equal(t, []byte{}, cachedValue)
// when
cache.Set("Nietzsche", []byte(nil))
cachedValue, err = cache.Get("Nietzsche")
// then
assert.NoError(t, err)
assert.Equal(t, []byte{}, cachedValue)
}
type mockedLogger struct {
lastFormat string
lastArgs []interface{}
}
func (ml *mockedLogger) Printf(format string, v ...interface{}) {
ml.lastFormat = format
ml.lastArgs = v
}
type mockedClock struct {
value int64
}
func (mc *mockedClock) epoch() int64 {
return mc.value
}
func (mc *mockedClock) set(value int64) {
mc.value = value
}
func blob(char byte, len int) []byte {
b := make([]byte, len)
for index := range b {
b[index] = char
}
return b
}

View File

@ -0,0 +1,219 @@
package main
import (
"fmt"
"math/rand"
"sync"
"testing"
"time"
"github.com/allegro/bigcache"
"github.com/coocood/freecache"
)
const maxEntrySize = 256
func BenchmarkMapSet(b *testing.B) {
m := make(map[string][]byte)
for i := 0; i < b.N; i++ {
m[key(i)] = value()
}
}
func BenchmarkConcurrentMapSet(b *testing.B) {
var m sync.Map
for i := 0; i < b.N; i++ {
m.Store(key(i), value())
}
}
func BenchmarkFreeCacheSet(b *testing.B) {
cache := freecache.NewCache(b.N * maxEntrySize)
for i := 0; i < b.N; i++ {
cache.Set([]byte(key(i)), value(), 0)
}
}
func BenchmarkBigCacheSet(b *testing.B) {
cache := initBigCache(b.N)
for i := 0; i < b.N; i++ {
cache.Set(key(i), value())
}
}
func BenchmarkMapGet(b *testing.B) {
b.StopTimer()
m := make(map[string][]byte)
for i := 0; i < b.N; i++ {
m[key(i)] = value()
}
b.StartTimer()
hitCount := 0
for i := 0; i < b.N; i++ {
if m[key(i)] != nil {
hitCount++
}
}
}
func BenchmarkConcurrentMapGet(b *testing.B) {
b.StopTimer()
var m sync.Map
for i := 0; i < b.N; i++ {
m.Store(key(i), value())
}
b.StartTimer()
hitCounter := 0
for i := 0; i < b.N; i++ {
_, ok := m.Load(key(i))
if ok {
hitCounter++
}
}
}
func BenchmarkFreeCacheGet(b *testing.B) {
b.StopTimer()
cache := freecache.NewCache(b.N * maxEntrySize)
for i := 0; i < b.N; i++ {
cache.Set([]byte(key(i)), value(), 0)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
cache.Get([]byte(key(i)))
}
}
func BenchmarkBigCacheGet(b *testing.B) {
b.StopTimer()
cache := initBigCache(b.N)
for i := 0; i < b.N; i++ {
cache.Set(key(i), value())
}
b.StartTimer()
for i := 0; i < b.N; i++ {
cache.Get(key(i))
}
}
func BenchmarkBigCacheSetParallel(b *testing.B) {
cache := initBigCache(b.N)
rand.Seed(time.Now().Unix())
b.RunParallel(func(pb *testing.PB) {
id := rand.Intn(1000)
counter := 0
for pb.Next() {
cache.Set(parallelKey(id, counter), value())
counter = counter + 1
}
})
}
func BenchmarkFreeCacheSetParallel(b *testing.B) {
cache := freecache.NewCache(b.N * maxEntrySize)
rand.Seed(time.Now().Unix())
b.RunParallel(func(pb *testing.PB) {
id := rand.Intn(1000)
counter := 0
for pb.Next() {
cache.Set([]byte(parallelKey(id, counter)), value(), 0)
counter = counter + 1
}
})
}
func BenchmarkConcurrentMapSetParallel(b *testing.B) {
var m sync.Map
b.RunParallel(func(pb *testing.PB) {
id := rand.Intn(1000)
for pb.Next() {
m.Store(key(id), value())
}
})
}
func BenchmarkBigCacheGetParallel(b *testing.B) {
b.StopTimer()
cache := initBigCache(b.N)
for i := 0; i < b.N; i++ {
cache.Set(key(i), value())
}
b.StartTimer()
b.RunParallel(func(pb *testing.PB) {
counter := 0
for pb.Next() {
cache.Get(key(counter))
counter = counter + 1
}
})
}
func BenchmarkFreeCacheGetParallel(b *testing.B) {
b.StopTimer()
cache := freecache.NewCache(b.N * maxEntrySize)
for i := 0; i < b.N; i++ {
cache.Set([]byte(key(i)), value(), 0)
}
b.StartTimer()
b.RunParallel(func(pb *testing.PB) {
counter := 0
for pb.Next() {
cache.Get([]byte(key(counter)))
counter = counter + 1
}
})
}
func BenchmarkConcurrentMapGetParallel(b *testing.B) {
b.StopTimer()
var m sync.Map
for i := 0; i < b.N; i++ {
m.Store(key(i), value())
}
b.StartTimer()
hitCount := 0
b.RunParallel(func(pb *testing.PB) {
id := rand.Intn(1000)
for pb.Next() {
_, ok := m.Load(key(id))
if ok {
hitCount++
}
}
})
}
func key(i int) string {
return fmt.Sprintf("key-%010d", i)
}
func value() []byte {
return make([]byte, 100)
}
func parallelKey(threadID int, counter int) string {
return fmt.Sprintf("key-%04d-%06d", threadID, counter)
}
func initBigCache(entriesInWindow int) *bigcache.BigCache {
cache, _ := bigcache.NewBigCache(bigcache.Config{
Shards: 256,
LifeWindow: 10 * time.Minute,
MaxEntriesInWindow: entriesInWindow,
MaxEntrySize: maxEntrySize,
Verbose: true,
})
return cache
}

View File

@ -0,0 +1,96 @@
package main
import (
"fmt"
"runtime"
"runtime/debug"
"time"
"github.com/allegro/bigcache"
"github.com/coocood/freecache"
)
func gcPause() time.Duration {
runtime.GC()
var stats debug.GCStats
debug.ReadGCStats(&stats)
return stats.PauseTotal
}
const (
entries = 20000000
valueSize = 100
)
func main() {
debug.SetGCPercent(10)
fmt.Println("Number of entries: ", entries)
config := bigcache.Config{
Shards: 256,
LifeWindow: 100 * time.Minute,
MaxEntriesInWindow: entries,
MaxEntrySize: 200,
Verbose: true,
}
bigcache, _ := bigcache.NewBigCache(config)
for i := 0; i < entries; i++ {
key, val := generateKeyValue(i, valueSize)
bigcache.Set(key, val)
}
firstKey, _ := generateKeyValue(1, valueSize)
checkFirstElement(bigcache.Get(firstKey))
fmt.Println("GC pause for bigcache: ", gcPause())
bigcache = nil
gcPause()
//------------------------------------------
freeCache := freecache.NewCache(entries * 200) //allocate entries * 200 bytes
for i := 0; i < entries; i++ {
key, val := generateKeyValue(i, valueSize)
if err := freeCache.Set([]byte(key), val, 0); err != nil {
fmt.Println("Error in set: ", err.Error())
}
}
firstKey, _ = generateKeyValue(1, valueSize)
checkFirstElement(freeCache.Get([]byte(firstKey)))
if freeCache.OverwriteCount() != 0 {
fmt.Println("Overwritten: ", freeCache.OverwriteCount())
}
fmt.Println("GC pause for freecache: ", gcPause())
freeCache = nil
gcPause()
//------------------------------------------
mapCache := make(map[string][]byte)
for i := 0; i < entries; i++ {
key, val := generateKeyValue(i, valueSize)
mapCache[key] = val
}
fmt.Println("GC pause for map: ", gcPause())
}
func checkFirstElement(val []byte, err error) {
_, expectedVal := generateKeyValue(1, valueSize)
if err != nil {
fmt.Println("Error in get: ", err.Error())
} else if string(val) != string(expectedVal) {
fmt.Println("Wrong first element: ", string(val))
}
}
func generateKeyValue(index int, valSize int) (string, []byte) {
key := fmt.Sprintf("key-%010d", index)
fixedNumber := []byte(fmt.Sprintf("%010d", index))
val := append(make([]byte, valSize-10), fixedNumber...)
return key, val
}

14
vendor/github.com/allegro/bigcache/clock.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package bigcache
import "time"
type clock interface {
epoch() int64
}
type systemClock struct {
}
func (c systemClock) epoch() int64 {
return time.Now().Unix()
}

67
vendor/github.com/allegro/bigcache/config.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package bigcache
import "time"
// Config for BigCache
type Config struct {
// Number of cache shards, value must be a power of two
Shards int
// Time after which entry can be evicted
LifeWindow time.Duration
// Interval between removing expired entries (clean up).
// If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution.
CleanWindow time.Duration
// Max number of entries in life window. Used only to calculate initial size for cache shards.
// When proper value is set then additional memory allocation does not occur.
MaxEntriesInWindow int
// Max size of entry in bytes. Used only to calculate initial size for cache shards.
MaxEntrySize int
// Verbose mode prints information about new memory allocation
Verbose bool
// Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used.
Hasher Hasher
// HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit.
// It can protect application from consuming all available memory on machine, therefore from running OOM Killer.
// Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then
// the oldest entries are overridden for the new ones.
HardMaxCacheSize int
// OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left
// for the new entry. Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
OnRemove func(key string, entry []byte)
// Logger is a logging interface and used in combination with `Verbose`
// Defaults to `DefaultLogger()`
Logger Logger
}
// DefaultConfig initializes config with default values.
// When load for BigCache can be predicted in advance then it is better to use custom config.
func DefaultConfig(eviction time.Duration) Config {
return Config{
Shards: 1024,
LifeWindow: eviction,
CleanWindow: 0,
MaxEntriesInWindow: 1000 * 10 * 60,
MaxEntrySize: 500,
Verbose: true,
Hasher: newDefaultHasher(),
HardMaxCacheSize: 0,
Logger: DefaultLogger(),
}
}
// initialShardSize computes initial shard size
func (c Config) initialShardSize() int {
return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard)
}
// maximumShardSize computes maximum shard size
func (c Config) maximumShardSize() int {
maxShardSize := 0
if c.HardMaxCacheSize > 0 {
maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards
}
return maxShardSize
}

70
vendor/github.com/allegro/bigcache/encoding.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package bigcache
import (
"encoding/binary"
"reflect"
"unsafe"
)
const (
timestampSizeInBytes = 8 // Number of bytes used for timestamp
hashSizeInBytes = 8 // Number of bytes used for hash
keySizeInBytes = 2 // Number of bytes used for size of entry key
headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers
)
func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte {
keyLength := len(key)
blobLength := len(entry) + headersSizeInBytes + keyLength
if blobLength > len(*buffer) {
*buffer = make([]byte, blobLength)
}
blob := *buffer
binary.LittleEndian.PutUint64(blob, timestamp)
binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash)
binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength))
copy(blob[headersSizeInBytes:], key)
copy(blob[headersSizeInBytes+keyLength:], entry)
return blob[:blobLength]
}
func readEntry(data []byte) []byte {
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
// copy on read
dst := make([]byte, len(data)-int(headersSizeInBytes+length))
copy(dst, data[headersSizeInBytes+length:])
return dst
}
func readTimestampFromEntry(data []byte) uint64 {
return binary.LittleEndian.Uint64(data)
}
func readKeyFromEntry(data []byte) string {
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
// copy on read
dst := make([]byte, length)
copy(dst, data[headersSizeInBytes:headersSizeInBytes+length])
return bytesToString(dst)
}
func bytesToString(b []byte) string {
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len}
return *(*string)(unsafe.Pointer(&strHeader))
}
func readHashFromEntry(data []byte) uint64 {
return binary.LittleEndian.Uint64(data[timestampSizeInBytes:])
}
func resetKeyFromEntry(data []byte) {
binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0)
}

46
vendor/github.com/allegro/bigcache/encoding_test.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
package bigcache
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestEncodeDecode(t *testing.T) {
// given
now := uint64(time.Now().Unix())
hash := uint64(42)
key := "key"
data := []byte("data")
buffer := make([]byte, 100)
// when
wrapped := wrapEntry(now, hash, key, data, &buffer)
// then
assert.Equal(t, key, readKeyFromEntry(wrapped))
assert.Equal(t, hash, readHashFromEntry(wrapped))
assert.Equal(t, now, readTimestampFromEntry(wrapped))
assert.Equal(t, data, readEntry(wrapped))
assert.Equal(t, 100, len(buffer))
}
func TestAllocateBiggerBuffer(t *testing.T) {
//given
now := uint64(time.Now().Unix())
hash := uint64(42)
key := "1"
data := []byte("2")
buffer := make([]byte, 1)
// when
wrapped := wrapEntry(now, hash, key, data, &buffer)
// then
assert.Equal(t, key, readKeyFromEntry(wrapped))
assert.Equal(t, hash, readHashFromEntry(wrapped))
assert.Equal(t, now, readTimestampFromEntry(wrapped))
assert.Equal(t, data, readEntry(wrapped))
assert.Equal(t, 2+headersSizeInBytes, len(buffer))
}

View File

@ -0,0 +1,17 @@
package bigcache
import "fmt"
// EntryNotFoundError is an error type struct which is returned when entry was not found for provided key
type EntryNotFoundError struct {
message string
}
func notFound(key string) error {
return &EntryNotFoundError{fmt.Sprintf("Entry %q not found", key)}
}
// Error returned when entry does not exist.
func (e EntryNotFoundError) Error() string {
return e.message
}

28
vendor/github.com/allegro/bigcache/fnv.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package bigcache
// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations.
// Its Sum64 method will lay the value out in big-endian byte order.
// See https://en.wikipedia.org/wiki/FowlerNollVo_hash_function
func newDefaultHasher() Hasher {
return fnv64a{}
}
type fnv64a struct{}
const (
// offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/FowlerNollVo_hash_function#FNV-1a_hash
offset64 = 14695981039346656037
// prime64 FNVa prime value. See https://en.wikipedia.org/wiki/FowlerNollVo_hash_function#FNV-1a_hash
prime64 = 1099511628211
)
// Sum64 gets the string and returns its uint64 hash value.
func (f fnv64a) Sum64(key string) uint64 {
var hash uint64 = offset64
for i := 0; i < len(key); i++ {
hash ^= uint64(key[i])
hash *= prime64
}
return hash
}

18
vendor/github.com/allegro/bigcache/fnv_bench_test.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package bigcache
import "testing"
var text = "abcdefg"
func BenchmarkFnvHashSum64(b *testing.B) {
h := newDefaultHasher()
for i := 0; i < b.N; i++ {
h.Sum64(text)
}
}
func BenchmarkFnvHashStdLibSum64(b *testing.B) {
for i := 0; i < b.N; i++ {
stdLibFnvSum64(text)
}
}

35
vendor/github.com/allegro/bigcache/fnv_test.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package bigcache
import (
"hash/fnv"
"testing"
)
type testCase struct {
text string
expectedHash uint64
}
var testCases = []testCase{
{"", stdLibFnvSum64("")},
{"a", stdLibFnvSum64("a")},
{"ab", stdLibFnvSum64("ab")},
{"abc", stdLibFnvSum64("abc")},
{"some longer and more complicated text", stdLibFnvSum64("some longer and more complicated text")},
}
func TestFnvHashSum64(t *testing.T) {
h := newDefaultHasher()
for _, testCase := range testCases {
hashed := h.Sum64(testCase.text)
if hashed != testCase.expectedHash {
t.Errorf("hash(%q) = %d want %d", testCase.text, hashed, testCase.expectedHash)
}
}
}
func stdLibFnvSum64(key string) uint64 {
h := fnv.New64a()
h.Write([]byte(key))
return h.Sum64()
}

8
vendor/github.com/allegro/bigcache/hash.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
package bigcache
// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions
// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e.
// you can use FarmHash family).
type Hasher interface {
Sum64(string) uint64
}

7
vendor/github.com/allegro/bigcache/hash_test.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
package bigcache
type hashStub uint64
func (stub hashStub) Sum64(_ string) uint64 {
return uint64(stub)
}

122
vendor/github.com/allegro/bigcache/iterator.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
package bigcache
import "sync"
type iteratorError string
func (e iteratorError) Error() string {
return string(e)
}
// ErrInvalidIteratorState is reported when iterator is in invalid state
const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position")
// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying
const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache")
var emptyEntryInfo = EntryInfo{}
// EntryInfo holds informations about entry in the cache
type EntryInfo struct {
timestamp uint64
hash uint64
key string
value []byte
}
// Key returns entry's underlying key
func (e EntryInfo) Key() string {
return e.key
}
// Hash returns entry's hash value
func (e EntryInfo) Hash() uint64 {
return e.hash
}
// Timestamp returns entry's timestamp (time of insertion)
func (e EntryInfo) Timestamp() uint64 {
return e.timestamp
}
// Value returns entry's underlying value
func (e EntryInfo) Value() []byte {
return e.value
}
// EntryInfoIterator allows to iterate over entries in the cache
type EntryInfoIterator struct {
mutex sync.Mutex
cache *BigCache
currentShard int
currentIndex int
elements []uint32
elementsCount int
valid bool
}
// SetNext moves to next element and returns true if it exists.
func (it *EntryInfoIterator) SetNext() bool {
it.mutex.Lock()
it.valid = false
it.currentIndex++
if it.elementsCount > it.currentIndex {
it.valid = true
it.mutex.Unlock()
return true
}
for i := it.currentShard + 1; i < it.cache.config.Shards; i++ {
it.elements, it.elementsCount = it.cache.shards[i].copyKeys()
// Non empty shard - stick with it
if it.elementsCount > 0 {
it.currentIndex = 0
it.currentShard = i
it.valid = true
it.mutex.Unlock()
return true
}
}
it.mutex.Unlock()
return false
}
func newIterator(cache *BigCache) *EntryInfoIterator {
elements, count := cache.shards[0].copyKeys()
return &EntryInfoIterator{
cache: cache,
currentShard: 0,
currentIndex: -1,
elements: elements,
elementsCount: count,
}
}
// Value returns current value from the iterator
func (it *EntryInfoIterator) Value() (EntryInfo, error) {
it.mutex.Lock()
if !it.valid {
it.mutex.Unlock()
return emptyEntryInfo, ErrInvalidIteratorState
}
entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex]))
if err != nil {
it.mutex.Unlock()
return emptyEntryInfo, ErrCannotRetrieveEntry
}
it.mutex.Unlock()
return EntryInfo{
timestamp: readTimestampFromEntry(entry),
hash: readHashFromEntry(entry),
key: readKeyFromEntry(entry),
value: readEntry(entry),
}, nil
}

150
vendor/github.com/allegro/bigcache/iterator_test.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
package bigcache
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestEntriesIterator(t *testing.T) {
t.Parallel()
// given
keysCount := 1000
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: 6 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
value := []byte("value")
for i := 0; i < keysCount; i++ {
cache.Set(fmt.Sprintf("key%d", i), value)
}
// when
keys := make(map[string]struct{})
iterator := cache.Iterator()
for iterator.SetNext() {
current, err := iterator.Value()
if err == nil {
keys[current.Key()] = struct{}{}
}
}
// then
assert.Equal(t, keysCount, len(keys))
}
func TestEntriesIteratorWithMostShardsEmpty(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(Config{
Shards: 8,
LifeWindow: 6 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)
cache.Set("key", []byte("value"))
// when
iterator := cache.Iterator()
// then
if !iterator.SetNext() {
t.Errorf("Iterator should contain at least single element")
}
current, err := iterator.Value()
// then
assert.Nil(t, err)
assert.Equal(t, "key", current.Key())
assert.Equal(t, uint64(0x3dc94a19365b10ec), current.Hash())
assert.Equal(t, []byte("value"), current.Value())
assert.Equal(t, uint64(0), current.Timestamp())
}
func TestEntriesIteratorWithConcurrentUpdate(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
cache.Set("key", []byte("value"))
// when
iterator := cache.Iterator()
// then
if !iterator.SetNext() {
t.Errorf("Iterator should contain at least single element")
}
// Quite ugly but works
for i := 0; i < cache.config.Shards; i++ {
if oldestEntry, err := cache.shards[i].getOldestEntry(); err == nil {
cache.onEvict(oldestEntry, 10, cache.shards[i].removeOldestEntry)
}
}
current, err := iterator.Value()
// then
assert.Equal(t, ErrCannotRetrieveEntry, err)
assert.Equal(t, "Could not retrieve entry from cache", err.Error())
assert.Equal(t, EntryInfo{}, current)
}
func TestEntriesIteratorWithAllShardsEmpty(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
// when
iterator := cache.Iterator()
// then
if iterator.SetNext() {
t.Errorf("Iterator should not contain any elements")
}
}
func TestEntriesIteratorInInvalidState(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
// when
iterator := cache.Iterator()
// then
_, err := iterator.Value()
assert.Equal(t, ErrInvalidIteratorState, err)
assert.Equal(t, "Iterator is in invalid state. Use SetNext() to move to next position", err.Error())
}

30
vendor/github.com/allegro/bigcache/logger.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package bigcache
import (
"log"
"os"
)
// Logger is invoked when `Config.Verbose=true`
type Logger interface {
Printf(format string, v ...interface{})
}
// this is a safeguard, breaking on compile time in case
// `log.Logger` does not adhere to our `Logger` interface.
// see https://golang.org/doc/faq#guarantee_satisfies_interface
var _ Logger = &log.Logger{}
// DefaultLogger returns a `Logger` implementation
// backed by stdlib's log
func DefaultLogger() *log.Logger {
return log.New(os.Stdout, "", log.LstdFlags)
}
func newLogger(custom Logger) Logger {
if custom != nil {
return custom
}
return DefaultLogger()
}

210
vendor/github.com/allegro/bigcache/queue/bytes_queue.go generated vendored Normal file
View File

@ -0,0 +1,210 @@
package queue
import (
"encoding/binary"
"log"
"time"
)
const (
// Number of bytes used to keep information about entry size
headerEntrySize = 4
// Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index
leftMarginIndex = 1
// Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation.
// It keeps entries indexes unchanged
minimumEmptyBlobSize = 32 + headerEntrySize
)
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
// For every push operation index of entry is returned. It can be used to read the entry later
type BytesQueue struct {
array []byte
capacity int
maxCapacity int
head int
tail int
count int
rightMargin int
headerBuffer []byte
verbose bool
initialCapacity int
}
type queueError struct {
message string
}
// NewBytesQueue initialize new bytes queue.
// Initial capacity is used in bytes array allocation
// When verbose flag is set then information about memory allocation are printed
func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue {
return &BytesQueue{
array: make([]byte, initialCapacity),
capacity: initialCapacity,
maxCapacity: maxCapacity,
headerBuffer: make([]byte, headerEntrySize),
tail: leftMarginIndex,
head: leftMarginIndex,
rightMargin: leftMarginIndex,
verbose: verbose,
initialCapacity: initialCapacity,
}
}
// Reset removes all entries from queue
func (q *BytesQueue) Reset() {
// Just reset indexes
q.tail = leftMarginIndex
q.head = leftMarginIndex
q.rightMargin = leftMarginIndex
q.count = 0
}
// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
// Returns index for pushed data or error if maximum size queue limit is reached.
func (q *BytesQueue) Push(data []byte) (int, error) {
dataLen := len(data)
if q.availableSpaceAfterTail() < dataLen+headerEntrySize {
if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize {
q.tail = leftMarginIndex
} else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 {
return -1, &queueError{"Full queue. Maximum size limit reached."}
} else {
q.allocateAdditionalMemory(dataLen + headerEntrySize)
}
}
index := q.tail
q.push(data, dataLen)
return index, nil
}
func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
start := time.Now()
if q.capacity < minimum {
q.capacity += minimum
}
q.capacity = q.capacity * 2
if q.capacity > q.maxCapacity && q.maxCapacity > 0 {
q.capacity = q.maxCapacity
}
oldArray := q.array
q.array = make([]byte, q.capacity)
if leftMarginIndex != q.rightMargin {
copy(q.array, oldArray[:q.rightMargin])
if q.tail < q.head {
emptyBlobLen := q.head - q.tail - headerEntrySize
q.push(make([]byte, emptyBlobLen), emptyBlobLen)
q.head = leftMarginIndex
q.tail = q.rightMargin
}
}
if q.verbose {
log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity)
}
}
func (q *BytesQueue) push(data []byte, len int) {
binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len))
q.copy(q.headerBuffer, headerEntrySize)
q.copy(data, len)
if q.tail > q.head {
q.rightMargin = q.tail
}
q.count++
}
func (q *BytesQueue) copy(data []byte, len int) {
q.tail += copy(q.array[q.tail:], data[:len])
}
// Pop reads the oldest entry from queue and moves head pointer to the next one
func (q *BytesQueue) Pop() ([]byte, error) {
data, size, err := q.peek(q.head)
if err != nil {
return nil, err
}
q.head += headerEntrySize + size
q.count--
if q.head == q.rightMargin {
q.head = leftMarginIndex
if q.tail == q.rightMargin {
q.tail = leftMarginIndex
}
q.rightMargin = q.tail
}
return data, nil
}
// Peek reads the oldest entry from list without moving head pointer
func (q *BytesQueue) Peek() ([]byte, error) {
data, _, err := q.peek(q.head)
return data, err
}
// Get reads entry from index
func (q *BytesQueue) Get(index int) ([]byte, error) {
data, _, err := q.peek(index)
return data, err
}
// Capacity returns number of allocated bytes for queue
func (q *BytesQueue) Capacity() int {
return q.capacity
}
// Len returns number of entries kept in queue
func (q *BytesQueue) Len() int {
return q.count
}
// Error returns error message
func (e *queueError) Error() string {
return e.message
}
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
if q.count == 0 {
return nil, 0, &queueError{"Empty queue"}
}
if index <= 0 {
return nil, 0, &queueError{"Index must be grater than zero. Invalid index."}
}
if index+headerEntrySize >= len(q.array) {
return nil, 0, &queueError{"Index out of range"}
}
blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))
return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil
}
func (q *BytesQueue) availableSpaceAfterTail() int {
if q.tail >= q.head {
return q.capacity - q.tail
}
return q.head - q.tail - minimumEmptyBlobSize
}
func (q *BytesQueue) availableSpaceBeforeHead() int {
if q.tail >= q.head {
return q.head - leftMarginIndex - minimumEmptyBlobSize
}
return q.head - q.tail - minimumEmptyBlobSize
}

View File

@ -0,0 +1,365 @@
package queue
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPushAndPop(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(10, 0, true)
entry := []byte("hello")
// when
_, err := queue.Pop()
// then
assert.EqualError(t, err, "Empty queue")
// when
queue.Push(entry)
// then
assert.Equal(t, entry, pop(queue))
}
func TestLen(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
entry := []byte("hello")
assert.Zero(t, queue.Len())
// when
queue.Push(entry)
// then
assert.Equal(t, queue.Len(), 1)
}
func TestPeek(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
entry := []byte("hello")
// when
read, err := queue.Peek()
// then
assert.EqualError(t, err, "Empty queue")
assert.Nil(t, read)
// when
queue.Push(entry)
read, err = queue.Peek()
// then
assert.NoError(t, err)
assert.Equal(t, pop(queue), read)
assert.Equal(t, entry, read)
}
func TestReset(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
entry := []byte("hello")
// when
queue.Push(entry)
queue.Push(entry)
queue.Push(entry)
queue.Reset()
read, err := queue.Peek()
// then
assert.EqualError(t, err, "Empty queue")
assert.Nil(t, read)
// when
queue.Push(entry)
read, err = queue.Peek()
// then
assert.NoError(t, err)
assert.Equal(t, pop(queue), read)
assert.Equal(t, entry, read)
// when
read, err = queue.Peek()
// then
assert.EqualError(t, err, "Empty queue")
assert.Nil(t, read)
}
func TestReuseAvailableSpace(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
// when
queue.Push(blob('a', 70))
queue.Push(blob('b', 20))
queue.Pop()
queue.Push(blob('c', 20))
// then
assert.Equal(t, 100, queue.Capacity())
assert.Equal(t, blob('b', 20), pop(queue))
}
func TestAllocateAdditionalSpace(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(11, 0, false)
// when
queue.Push([]byte("hello1"))
queue.Push([]byte("hello2"))
// then
assert.Equal(t, 22, queue.Capacity())
}
func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereHeadIsBeforeTail(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(25, 0, false)
// when
queue.Push(blob('a', 3)) // header + entry + left margin = 8 bytes
queue.Push(blob('b', 6)) // additional 10 bytes
queue.Pop() // space freed, 7 bytes available at the beginning
queue.Push(blob('c', 6)) // 10 bytes needed, 14 available but not in one segment, allocate additional memory
// then
assert.Equal(t, 50, queue.Capacity())
assert.Equal(t, blob('b', 6), pop(queue))
assert.Equal(t, blob('c', 6), pop(queue))
}
func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereHeadIsBeforeTail(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(25, 0, false)
// when
queue.Push(blob('a', 3)) // header + entry + left margin = 8 bytes
index, _ := queue.Push(blob('b', 6)) // additional 10 bytes
queue.Pop() // space freed, 7 bytes available at the beginning
newestIndex, _ := queue.Push(blob('c', 6)) // 10 bytes needed, 14 available but not in one segment, allocate additional memory
// then
assert.Equal(t, 50, queue.Capacity())
assert.Equal(t, blob('b', 6), get(queue, index))
assert.Equal(t, blob('c', 6), get(queue, newestIndex))
}
func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereTailIsBeforeHead(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
// when
queue.Push(blob('a', 70)) // header + entry + left margin = 75 bytes
queue.Push(blob('b', 10)) // 75 + 10 + 4 = 89 bytes
queue.Pop() // space freed at the beginning
queue.Push(blob('c', 30)) // 34 bytes used at the beginning, tail pointer is before head pointer
queue.Push(blob('d', 40)) // 44 bytes needed but no available in one segment, allocate new memory
// then
assert.Equal(t, 200, queue.Capacity())
assert.Equal(t, blob('c', 30), pop(queue))
// empty blob fills space between tail and head,
// created when additional memory was allocated,
// it keeps current entries indexes unchanged
assert.Equal(t, blob(0, 36), pop(queue))
assert.Equal(t, blob('b', 10), pop(queue))
assert.Equal(t, blob('d', 40), pop(queue))
}
func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereTailIsBeforeHead(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
// when
queue.Push(blob('a', 70)) // header + entry + left margin = 75 bytes
index, _ := queue.Push(blob('b', 10)) // 75 + 10 + 4 = 89 bytes
queue.Pop() // space freed at the beginning
queue.Push(blob('c', 30)) // 34 bytes used at the beginning, tail pointer is before head pointer
newestIndex, _ := queue.Push(blob('d', 40)) // 44 bytes needed but no available in one segment, allocate new memory
// then
assert.Equal(t, 200, queue.Capacity())
assert.Equal(t, blob('b', 10), get(queue, index))
assert.Equal(t, blob('d', 40), get(queue, newestIndex))
}
func TestAllocateAdditionalSpaceForValueBiggerThanInitQueue(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(11, 0, false)
// when
queue.Push(blob('a', 100))
// then
assert.Equal(t, blob('a', 100), pop(queue))
assert.Equal(t, 230, queue.Capacity())
}
func TestAllocateAdditionalSpaceForValueBiggerThanQueue(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(21, 0, false)
// when
queue.Push(make([]byte, 2))
queue.Push(make([]byte, 2))
queue.Push(make([]byte, 100))
// then
queue.Pop()
queue.Pop()
assert.Equal(t, make([]byte, 100), pop(queue))
assert.Equal(t, 250, queue.Capacity())
}
func TestPopWholeQueue(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(13, 0, false)
// when
queue.Push([]byte("a"))
queue.Push([]byte("b"))
queue.Pop()
queue.Pop()
queue.Push([]byte("c"))
// then
assert.Equal(t, 13, queue.Capacity())
assert.Equal(t, []byte("c"), pop(queue))
}
func TestGetEntryFromIndex(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(20, 0, false)
// when
queue.Push([]byte("a"))
index, _ := queue.Push([]byte("b"))
queue.Push([]byte("c"))
result, _ := queue.Get(index)
// then
assert.Equal(t, []byte("b"), result)
}
func TestGetEntryFromInvalidIndex(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(1, 0, false)
queue.Push([]byte("a"))
// when
result, err := queue.Get(0)
// then
assert.Nil(t, result)
assert.EqualError(t, err, "Index must be grater than zero. Invalid index.")
}
func TestGetEntryFromIndexOutOfRange(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(1, 0, false)
queue.Push([]byte("a"))
// when
result, err := queue.Get(42)
// then
assert.Nil(t, result)
assert.EqualError(t, err, "Index out of range")
}
func TestGetEntryFromEmptyQueue(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(13, 0, false)
// when
result, err := queue.Get(1)
// then
assert.Nil(t, result)
assert.EqualError(t, err, "Empty queue")
}
func TestMaxSizeLimit(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(30, 50, false)
// when
queue.Push(blob('a', 25))
queue.Push(blob('b', 5))
capacity := queue.Capacity()
_, err := queue.Push(blob('c', 15))
// then
assert.Equal(t, 50, capacity)
assert.EqualError(t, err, "Full queue. Maximum size limit reached.")
assert.Equal(t, blob('a', 25), pop(queue))
assert.Equal(t, blob('b', 5), pop(queue))
}
func pop(queue *BytesQueue) []byte {
entry, err := queue.Pop()
if err != nil {
panic(err)
}
return entry
}
func get(queue *BytesQueue, index int) []byte {
entry, err := queue.Get(index)
if err != nil {
panic(err)
}
return entry
}
func blob(char byte, len int) []byte {
b := make([]byte, len)
for index := range b {
b[index] = char
}
return b
}

105
vendor/github.com/allegro/bigcache/server/README.md generated vendored Normal file
View File

@ -0,0 +1,105 @@
# BigCache HTTP Server
This is a basic HTTP server implementation for BigCache. It has a basic RESTful API and is designed for easy operational deployments. This server is intended to be consumed as a standalone executable, for things like Cloud Foundry, Heroku, etc. A design goal is versatility, so if you want to cache pictures, software artifacts, text, or any type of bit, the BigCache HTTP Server should fit your needs.
```bash
# cache API.
GET /api/v1/cache/{key}
PUT /api/v1/cache/{key}
DELETE /api/v1/cache/{key}
# stats API.
GET /api/v1/stats
```
The cache API is designed for ease-of-use caching and accepts any content type. The stats API will return hit and miss statistics about the cache since the last time the server was started - they will reset whenever the server is restarted.
### Notes for Operators
1. No SSL support, currently.
1. No authentication, currently.
1. Stats from the stats API are not persistent.
1. The easiest way to clean the cache is to restart the process; it takes less than a second to initialise.
1. There is no replication or clustering.
### Command-line Interface
```powershell
PS C:\go\src\github.com\mxplusb\bigcache\server> .\server.exe -h
Usage of C:\go\src\github.com\mxplusb\bigcache\server\server.exe:
-lifetime duration
Lifetime of each cache object. (default 10m0s)
-logfile string
Location of the logfile.
-max int
Maximum amount of data in the cache in MB. (default 8192)
-maxInWindow int
Used only in initial memory allocation. (default 600000)
-maxShardEntrySize int
The maximum size of each object stored in a shard. Used only in initial memory allocation. (default 500)
-port int
The port to listen on. (default 9090)
-shards int
Number of shards for the cache. (default 1024)
-v Verbose logging.
-version
Print server version.
```
Example:
```bash
$ curl -v -XPUT localhost:9090/api/v1/cache/example -d "yay!"
* Trying 127.0.0.1...
* Connected to localhost (127.0.0.1) port 9090 (#0)
> PUT /api/v1/cache/example HTTP/1.1
> Host: localhost:9090
> User-Agent: curl/7.47.0
> Accept: */*
> Content-Length: 4
> Content-Type: application/x-www-form-urlencoded
>
* upload completely sent off: 4 out of 4 bytes
< HTTP/1.1 201 Created
< Date: Fri, 17 Nov 2017 03:50:07 GMT
< Content-Length: 0
< Content-Type: text/plain; charset=utf-8
<
* Connection #0 to host localhost left intact
$
$ curl -v -XGET localhost:9090/api/v1/cache/example
Note: Unnecessary use of -X or --request, GET is already inferred.
* Trying 127.0.0.1...
* Connected to localhost (127.0.0.1) port 9090 (#0)
> GET /api/v1/cache/example HTTP/1.1
> Host: localhost:9090
> User-Agent: curl/7.47.0
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Fri, 17 Nov 2017 03:50:23 GMT
< Content-Length: 4
< Content-Type: text/plain; charset=utf-8
<
* Connection #0 to host localhost left intact
yay!
```
The server does log basic metrics:
```bash
$ ./server
2017/11/16 22:49:22 cache initialised.
2017/11/16 22:49:22 starting server on :9090
2017/11/16 22:50:07 stored "example" in cache.
2017/11/16 22:50:07 request took 277000ns.
2017/11/16 22:50:23 request took 9000ns.
```
### Acquiring Natively
This is native Go with no external dependencies, so it will compile for all supported Golang platforms. To build:
```bash
go build server.go
```

View File

@ -0,0 +1,87 @@
package main
import (
"io/ioutil"
"log"
"net/http"
"strings"
)
func cacheIndexHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
getCacheHandler(w, r)
case http.MethodPut:
putCacheHandler(w, r)
case http.MethodDelete:
deleteCacheHandler(w, r)
}
})
}
// handles get requests.
func getCacheHandler(w http.ResponseWriter, r *http.Request) {
target := r.URL.Path[len(cachePath):]
if target == "" {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("can't get a key if there is no key."))
log.Print("empty request.")
return
}
entry, err := cache.Get(target)
if err != nil {
errMsg := (err).Error()
if strings.Contains(errMsg, "not found") {
log.Print(err)
w.WriteHeader(http.StatusNotFound)
return
}
log.Print(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(entry)
}
func putCacheHandler(w http.ResponseWriter, r *http.Request) {
target := r.URL.Path[len(cachePath):]
if target == "" {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("can't put a key if there is no key."))
log.Print("empty request.")
return
}
entry, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Print(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
if err := cache.Set(target, []byte(entry)); err != nil {
log.Print(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
log.Printf("stored \"%s\" in cache.", target)
w.WriteHeader(http.StatusCreated)
}
// delete cache objects.
func deleteCacheHandler(w http.ResponseWriter, r *http.Request) {
target := r.URL.Path[len(cachePath):]
if err := cache.Delete(target); err != nil {
if strings.Contains((err).Error(), "not found") {
w.WriteHeader(http.StatusNotFound)
log.Printf("%s not found.", target)
return
}
w.WriteHeader(http.StatusInternalServerError)
log.Printf("internal cache error: %s", err)
}
// this is what the RFC says to use when calling DELETE.
w.WriteHeader(http.StatusOK)
return
}

View File

@ -0,0 +1,29 @@
package main
import (
"log"
"net/http"
"time"
)
// our base middleware implementation.
type service func(http.Handler) http.Handler
// chain load middleware services.
func serviceLoader(h http.Handler, svcs ...service) http.Handler {
for _, svc := range svcs {
h = svc(h)
}
return h
}
// middleware for request length metrics.
func requestMetrics(l *log.Logger) service {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
h.ServeHTTP(w, r)
l.Printf("%s request to %s took %vns.", r.Method, r.URL.Path, time.Now().Sub(start).Nanoseconds())
})
}
}

85
vendor/github.com/allegro/bigcache/server/server.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"strconv"
"github.com/allegro/bigcache"
)
const (
// base HTTP paths.
apiVersion = "v1"
apiBasePath = "/api/" + apiVersion + "/"
// path to cache.
cachePath = apiBasePath + "cache/"
statsPath = apiBasePath + "stats"
// server version.
version = "1.0.0"
)
var (
port int
logfile string
ver bool
// cache-specific settings.
cache *bigcache.BigCache
config = bigcache.Config{}
)
func init() {
flag.BoolVar(&config.Verbose, "v", false, "Verbose logging.")
flag.IntVar(&config.Shards, "shards", 1024, "Number of shards for the cache.")
flag.IntVar(&config.MaxEntriesInWindow, "maxInWindow", 1000*10*60, "Used only in initial memory allocation.")
flag.DurationVar(&config.LifeWindow, "lifetime", 100000*100000*60, "Lifetime of each cache object.")
flag.IntVar(&config.HardMaxCacheSize, "max", 8192, "Maximum amount of data in the cache in MB.")
flag.IntVar(&config.MaxEntrySize, "maxShardEntrySize", 500, "The maximum size of each object stored in a shard. Used only in initial memory allocation.")
flag.IntVar(&port, "port", 9090, "The port to listen on.")
flag.StringVar(&logfile, "logfile", "", "Location of the logfile.")
flag.BoolVar(&ver, "version", false, "Print server version.")
}
func main() {
flag.Parse()
if ver {
fmt.Printf("BigCache HTTP Server v%s", version)
os.Exit(0)
}
var logger *log.Logger
if logfile == "" {
logger = log.New(os.Stdout, "", log.LstdFlags)
} else {
f, err := os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
logger = log.New(f, "", log.LstdFlags)
}
var err error
cache, err = bigcache.NewBigCache(config)
if err != nil {
logger.Fatal(err)
}
logger.Print("cache initialised.")
// let the middleware log.
http.Handle(cachePath, serviceLoader(cacheIndexHandler(), requestMetrics(logger)))
http.Handle(statsPath, serviceLoader(statsIndexHandler(), requestMetrics(logger)))
logger.Printf("starting server on :%d", port)
strPort := ":" + strconv.Itoa(port)
log.Fatal("ListenAndServe: ", http.ListenAndServe(strPort, nil))
}

View File

@ -0,0 +1,185 @@
package main
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http/httptest"
"testing"
"time"
"github.com/allegro/bigcache"
)
const (
testBaseString = "http://bigcache.org"
)
func testCacheSetup() {
cache, _ = bigcache.NewBigCache(bigcache.Config{
Shards: 1024,
LifeWindow: 10 * time.Minute,
MaxEntriesInWindow: 1000 * 10 * 60,
MaxEntrySize: 500,
Verbose: true,
HardMaxCacheSize: 8192,
OnRemove: nil,
})
}
func TestMain(m *testing.M) {
testCacheSetup()
m.Run()
}
func TestGetWithNoKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/", nil)
rr := httptest.NewRecorder()
getCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 400 {
t.Errorf("want: 400; got: %d", resp.StatusCode)
}
}
func TestGetWithMissingKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/doesNotExist", nil)
rr := httptest.NewRecorder()
getCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 404 {
t.Errorf("want: 404; got: %d", resp.StatusCode)
}
}
func TestGetKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/getKey", nil)
rr := httptest.NewRecorder()
// set something.
cache.Set("getKey", []byte("123"))
getCacheHandler(rr, req)
resp := rr.Result()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("cannot deserialise test response: %s", err)
}
if string(body) != "123" {
t.Errorf("want: 123; got: %s.\n\tcan't get existing key getKey.", string(body))
}
}
func TestPutKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
putCacheHandler(rr, req)
testPutKeyResult, err := cache.Get("putKey")
if err != nil {
t.Errorf("error returning cache entry: %s", err)
}
if string(testPutKeyResult) != "123" {
t.Errorf("want: 123; got: %s.\n\tcan't get PUT key putKey.", string(testPutKeyResult))
}
}
func TestPutEmptyKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
putCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 400 {
t.Errorf("want: 400; got: %d.\n\tempty key insertion should return with 400", resp.StatusCode)
}
}
func TestDeleteEmptyKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
deleteCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 404 {
t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete empty keys.", resp.StatusCode)
}
}
func TestDeleteInvalidKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/invalidDeleteKey", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
deleteCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 404 {
t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete invalid keys.", resp.StatusCode)
}
}
func TestDeleteKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/testDeleteKey", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
if err := cache.Set("testDeleteKey", []byte("123")); err != nil {
t.Errorf("can't set key for testing. %s", err)
}
deleteCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 200 {
t.Errorf("want: 200; got: %d.\n\tcan't delete keys.", resp.StatusCode)
}
}
func TestGetStats(t *testing.T) {
t.Parallel()
var testStats bigcache.Stats
req := httptest.NewRequest("GET", testBaseString+"/api/v1/stats", nil)
rr := httptest.NewRecorder()
// manually enter a key so there are some stats. get it so there's at least 1 hit.
if err := cache.Set("incrementStats", []byte("123")); err != nil {
t.Errorf("error setting cache value. error %s", err)
}
// it's okay if this fails, since we'll catch it downstream.
if _, err := cache.Get("incrementStats"); err != nil {
t.Errorf("can't find incrementStats. error: %s", err)
}
getCacheStatsHandler(rr, req)
resp := rr.Result()
if err := json.NewDecoder(resp.Body).Decode(&testStats); err != nil {
t.Errorf("error decoding cache stats. error: %s", err)
}
if testStats.Hits == 0 {
t.Errorf("want: > 0; got: 0.\n\thandler not properly returning stats info.")
}
}

View File

@ -0,0 +1,33 @@
package main
import (
"encoding/json"
"log"
"net/http"
)
// index for stats handle
func statsIndexHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
getCacheStatsHandler(w, r)
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
})
}
// returns the cache's statistics.
func getCacheStatsHandler(w http.ResponseWriter, r *http.Request) {
target, err := json.Marshal(cache.Stats())
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Printf("cannot marshal cache stats. error: %s", err)
return
}
// since we're sending a struct, make it easy for consumers to interface.
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Write(target)
return
}

229
vendor/github.com/allegro/bigcache/shard.go generated vendored Normal file
View File

@ -0,0 +1,229 @@
package bigcache
import (
"fmt"
"sync"
"sync/atomic"
"github.com/allegro/bigcache/queue"
)
type cacheShard struct {
hashmap map[uint64]uint32
entries queue.BytesQueue
lock sync.RWMutex
entryBuffer []byte
onRemove func(wrappedEntry []byte)
isVerbose bool
logger Logger
clock clock
lifeWindow uint64
stats Stats
}
type onRemoveCallback func(wrappedEntry []byte)
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
s.lock.RLock()
itemIndex := s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.RUnlock()
s.miss()
return nil, notFound(key)
}
wrappedEntry, err := s.entries.Get(int(itemIndex))
if err != nil {
s.lock.RUnlock()
s.miss()
return nil, err
}
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
if s.isVerbose {
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
}
s.lock.RUnlock()
s.collision()
return nil, notFound(key)
}
s.lock.RUnlock()
s.hit()
return readEntry(wrappedEntry), nil
}
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
currentTimestamp := uint64(s.clock.epoch())
s.lock.Lock()
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
resetKeyFromEntry(previousEntry)
}
}
if oldestEntry, err := s.entries.Peek(); err == nil {
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
}
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
for {
if index, err := s.entries.Push(w); err == nil {
s.hashmap[hashedKey] = uint32(index)
s.lock.Unlock()
return nil
}
if s.removeOldestEntry() != nil {
s.lock.Unlock()
return fmt.Errorf("entry is bigger than max shard size")
}
}
}
func (s *cacheShard) del(key string, hashedKey uint64) error {
s.lock.RLock()
itemIndex := s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.RUnlock()
s.delmiss()
return notFound(key)
}
wrappedEntry, err := s.entries.Get(int(itemIndex))
if err != nil {
s.lock.RUnlock()
s.delmiss()
return err
}
s.lock.RUnlock()
s.lock.Lock()
{
delete(s.hashmap, hashedKey)
s.onRemove(wrappedEntry)
resetKeyFromEntry(wrappedEntry)
}
s.lock.Unlock()
s.delhit()
return nil
}
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
oldestTimestamp := readTimestampFromEntry(oldestEntry)
if currentTimestamp-oldestTimestamp > s.lifeWindow {
evict()
return true
}
return false
}
func (s *cacheShard) cleanUp(currentTimestamp uint64) {
s.lock.Lock()
for {
if oldestEntry, err := s.entries.Peek(); err != nil {
break
} else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted {
break
}
}
s.lock.Unlock()
}
func (s *cacheShard) getOldestEntry() ([]byte, error) {
return s.entries.Peek()
}
func (s *cacheShard) getEntry(index int) ([]byte, error) {
return s.entries.Get(index)
}
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
keys = make([]uint32, len(s.hashmap))
s.lock.RLock()
for _, index := range s.hashmap {
keys[next] = index
next++
}
s.lock.RUnlock()
return keys, next
}
func (s *cacheShard) removeOldestEntry() error {
oldest, err := s.entries.Pop()
if err == nil {
hash := readHashFromEntry(oldest)
delete(s.hashmap, hash)
s.onRemove(oldest)
return nil
}
return err
}
func (s *cacheShard) reset(config Config) {
s.lock.Lock()
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
s.entries.Reset()
s.lock.Unlock()
}
func (s *cacheShard) len() int {
s.lock.RLock()
res := len(s.hashmap)
s.lock.RUnlock()
return res
}
func (s *cacheShard) getStats() Stats {
var stats = Stats{
Hits: atomic.LoadInt64(&s.stats.Hits),
Misses: atomic.LoadInt64(&s.stats.Misses),
DelHits: atomic.LoadInt64(&s.stats.DelHits),
DelMisses: atomic.LoadInt64(&s.stats.DelMisses),
Collisions: atomic.LoadInt64(&s.stats.Collisions),
}
return stats
}
func (s *cacheShard) hit() {
atomic.AddInt64(&s.stats.Hits, 1)
}
func (s *cacheShard) miss() {
atomic.AddInt64(&s.stats.Misses, 1)
}
func (s *cacheShard) delhit() {
atomic.AddInt64(&s.stats.DelHits, 1)
}
func (s *cacheShard) delmiss() {
atomic.AddInt64(&s.stats.DelMisses, 1)
}
func (s *cacheShard) collision() {
atomic.AddInt64(&s.stats.Collisions, 1)
}
func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
return &cacheShard{
hashmap: make(map[uint64]uint32, config.initialShardSize()),
entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose),
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
onRemove: callback,
isVerbose: config.Verbose,
logger: newLogger(config.Logger),
clock: clock,
lifeWindow: uint64(config.LifeWindow.Seconds()),
}
}

15
vendor/github.com/allegro/bigcache/stats.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package bigcache
// Stats stores cache statistics
type Stats struct {
// Hits is a number of successfully found keys
Hits int64 `json:"hits"`
// Misses is a number of not found keys
Misses int64 `json:"misses"`
// DelHits is a number of successfully deleted keys
DelHits int64 `json:"delete_hits"`
// DelMisses is a number of not deleted keys
DelMisses int64 `json:"delete_misses"`
// Collisions is a number of happened key-collisions
Collisions int64 `json:"collisions"`
}

16
vendor/github.com/allegro/bigcache/utils.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package bigcache
func max(a, b int) int {
if a > b {
return a
}
return b
}
func convertMBToBytes(value int) int {
return value * 1024 * 1024
}
func isPowerOfTwo(number int) bool {
return (number & (number - 1)) == 0
}

View File

@ -29,6 +29,14 @@ matrix:
- os: osx
go: 1.11.x
script:
- echo "Increase the maximum number of open file descriptors on macOS"
- NOFILE=20480
- sudo sysctl -w kern.maxfiles=$NOFILE
- sudo sysctl -w kern.maxfilesperproc=$NOFILE
- sudo launchctl limit maxfiles $NOFILE $NOFILE
- sudo launchctl limit maxfiles
- ulimit -S -n $NOFILE
- ulimit -n
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES

View File

@ -18,7 +18,7 @@ For prerequisites and detailed build instructions please read the
[Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum)
on the wiki.
Building geth requires both a Go (version 1.7 or later) and a C compiler.
Building geth requires both a Go (version 1.9 or later) and a C compiler.
You can install them using your favourite package manager.
Once the dependencies are installed, run
@ -168,7 +168,7 @@ HTTP based JSON-RPC API options:
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification)
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](https://www.jsonrpc.org/specification)
on all transports. You can reuse the same connection for multiple requests!
**Note: Please understand the security implications of opening up an HTTP/WS based transport before

View File

@ -243,11 +243,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// input offset is the bytes offset for packed output
inputOffset := 0
for _, abiArg := range abiArgs {
if abiArg.Type.T == ArrayTy {
inputOffset += 32 * abiArg.Type.Size
} else {
inputOffset += 32
}
inputOffset += getDynamicTypeOffset(abiArg.Type)
}
var ret []byte
for i, a := range args {
@ -257,14 +253,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
if err != nil {
return nil, err
}
// check for a slice type (string, bytes, slice)
if input.Type.requiresLengthPrefix() {
// calculate the offset
offset := inputOffset + len(variableInput)
// check for dynamic types
if isDynamicType(input.Type) {
// set the offset
ret = append(ret, packNum(reflect.ValueOf(offset))...)
// Append the packed output to the variable input. The variable input
// will be appended at the end of the input.
ret = append(ret, packNum(reflect.ValueOf(inputOffset))...)
// calculate next offset
inputOffset += len(packed)
// append to variable input
variableInput = append(variableInput, packed...)
} else {
// append the packed value to the input

View File

@ -324,6 +324,66 @@ func TestPack(t *testing.T) {
"foobar",
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
},
{
"string[]",
[]string{"hello", "foobar"},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
},
{
"string[2]",
[]string{"hello", "foobar"},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
"0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
},
{
"bytes32[][]",
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
},
{
"bytes32[][2]",
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
},
{
"bytes32[3][2]",
[][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
"0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
},
} {
typ, err := NewType(test.typ)
if err != nil {
@ -336,7 +396,7 @@ func TestPack(t *testing.T) {
}
if !bytes.Equal(output, test.output) {
t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output)
}
}
}

View File

@ -183,23 +183,39 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
return nil, err
}
if t.T == SliceTy || t.T == ArrayTy {
var packed []byte
switch t.T {
case SliceTy, ArrayTy:
var ret []byte
if t.requiresLengthPrefix() {
// append length
ret = append(ret, packNum(reflect.ValueOf(v.Len()))...)
}
// calculate offset if any
offset := 0
offsetReq := isDynamicType(*t.Elem)
if offsetReq {
offset = getDynamicTypeOffset(*t.Elem) * v.Len()
}
var tail []byte
for i := 0; i < v.Len(); i++ {
val, err := t.Elem.pack(v.Index(i))
if err != nil {
return nil, err
}
packed = append(packed, val...)
}
if t.T == SliceTy {
return packBytesSlice(packed, v.Len()), nil
} else if t.T == ArrayTy {
return packed, nil
if !offsetReq {
ret = append(ret, val...)
continue
}
ret = append(ret, packNum(reflect.ValueOf(offset))...)
offset += len(val)
tail = append(tail, val...)
}
return append(ret, tail...), nil
default:
return packElement(t, v), nil
}
return packElement(t, v), nil
}
// requireLengthPrefix returns whether the type requires any sort of length
@ -207,3 +223,27 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
func (t Type) requiresLengthPrefix() bool {
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
}
// isDynamicType returns true if the type is dynamic.
// StringTy, BytesTy, and SliceTy(irrespective of slice element type) are dynamic types
// ArrayTy is considered dynamic if and only if the Array element is a dynamic type.
// This function recursively checks the type for slice and array elements.
func isDynamicType(t Type) bool {
// dynamic types
// array is also a dynamic type if the array type is dynamic
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
}
// getDynamicTypeOffset returns the offset for the type.
// See `isDynamicType` to know which types are considered dynamic.
// If the type t is an array and element type is not a dynamic type, then we consider it a static type and
// return 32 * size of array since length prefix is not required.
// If t is a dynamic type or element type(for slices and arrays) is dynamic, then we simply return 32 as offset.
func getDynamicTypeOffset(t Type) int {
// if it is an array and there are no dynamic types
// then the array is static type
if t.T == ArrayTy && !isDynamicType(*t.Elem) {
return 32 * t.Size
}
return 32
}

View File

@ -265,7 +265,10 @@ func (ac *accountCache) scanAccounts() error {
case (addr == common.Address{}):
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
default:
return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
return &accounts.Account{
Address: addr,
URL: accounts.URL{Scheme: KeyStoreScheme, Path: path},
}
}
return nil
}

View File

@ -171,7 +171,10 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
if err != nil {
return nil, accounts.Account{}, err
}
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}}
a := accounts.Account{
Address: key.Address,
URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))},
}
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
zeroKey(key.PrivateKey)
return nil, a, err
@ -224,5 +227,6 @@ func toISO8601(t time.Time) string {
} else {
tz = fmt.Sprintf("%03d00", offset/3600)
}
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s",
t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
}

View File

@ -233,6 +233,7 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
PrivateKey: key,
}, nil
}
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
if cryptoJson.Cipher != "aes-128-ctr" {
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)

View File

@ -38,7 +38,13 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou
return accounts.Account{}, nil, err
}
key.Id = uuid.NewRandom()
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: keyStore.JoinPath(keyFileName(key.Address))}}
a := accounts.Account{
Address: key.Address,
URL: accounts.URL{
Scheme: KeyStoreScheme,
Path: keyStore.JoinPath(keyFileName(key.Address)),
},
}
err = keyStore.StoreKey(a.URL.Path, key, password)
return a, key, err
}

View File

@ -89,7 +89,7 @@ func runCmd(ctx *cli.Context) error {
genesisConfig *core.Genesis
)
if ctx.GlobalBool(MachineFlag.Name) {
tracer = NewJSONLogger(logconfig, os.Stdout)
tracer = vm.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.GlobalBool(DebugFlag.Name) {
debugLogger = vm.NewStructLogger(logconfig)
tracer = debugLogger
@ -206,6 +206,7 @@ func runCmd(ctx *cli.Context) error {
execTime := time.Since(tstart)
if ctx.GlobalBool(DumpFlag.Name) {
statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump()))
}

View File

@ -68,7 +68,7 @@ func stateTestCmd(ctx *cli.Context) error {
)
switch {
case ctx.GlobalBool(MachineFlag.Name):
tracer = NewJSONLogger(config, os.Stderr)
tracer = vm.NewJSONLogger(config, os.Stderr)
case ctx.GlobalBool(DebugFlag.Name):
debugger = vm.NewStructLogger(config)

View File

@ -256,7 +256,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
}
for _, boot := range enodes {
old, err := enode.ParseV4(boot.String())
if err != nil {
if err == nil {
stack.Server().AddPeer(old)
}
}

View File

@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io"
"math/big"
"os"
"reflect"
"unicode"
@ -152,7 +153,9 @@ func enableWhisper(ctx *cli.Context) bool {
func makeFullNode(ctx *cli.Context) *node.Node {
stack, cfg := makeConfigNode(ctx)
if ctx.GlobalIsSet(utils.ConstantinopleOverrideFlag.Name) {
cfg.Eth.ConstantinopleOverride = new(big.Int).SetUint64(ctx.GlobalUint64(utils.ConstantinopleOverrideFlag.Name))
}
utils.RegisterEthService(stack, &cfg.Eth)
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {

View File

@ -87,8 +87,10 @@ var (
utils.LightServFlag,
utils.LightPeersFlag,
utils.LightKDFFlag,
utils.WhitelistFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
utils.CacheGCFlag,
utils.TrieCacheGenFlag,
utils.ListenPortFlag,
@ -121,6 +123,7 @@ var (
utils.RinkebyFlag,
utils.VMEnableDebugFlag,
utils.NetworkIdFlag,
utils.ConstantinopleOverrideFlag,
utils.RPCCORSDomainFlag,
utils.RPCVirtualHostsFlag,
utils.EthStatsURLFlag,

View File

@ -81,6 +81,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.LightServFlag,
utils.LightPeersFlag,
utils.LightKDFFlag,
utils.WhitelistFlag,
},
},
{
@ -132,6 +133,7 @@ var AppHelpFlagGroups = []flagGroup{
Flags: []cli.Flag{
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
utils.CacheGCFlag,
utils.TrieCacheGenFlag,
},

View File

@ -20,35 +20,41 @@ import (
"encoding/binary"
"errors"
"math"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
math2 "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
)
// cppEthereumGenesisSpec represents the genesis specification format used by the
// alethGenesisSpec represents the genesis specification format used by the
// C++ Ethereum implementation.
type cppEthereumGenesisSpec struct {
type alethGenesisSpec struct {
SealEngine string `json:"sealEngine"`
Params struct {
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
NetworkID hexutil.Uint64 `json:"networkID"`
ChainID hexutil.Uint64 `json:"chainID"`
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
DurationLimit *hexutil.Big `json:"durationLimit"`
BlockReward *hexutil.Big `json:"blockReward"`
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
TieBreakingGas bool `json:"tieBreakingGas"`
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
BlockReward *hexutil.Big `json:"blockReward"`
NetworkID hexutil.Uint64 `json:"networkID"`
ChainID hexutil.Uint64 `json:"chainID"`
AllowFutureBlocks bool `json:"allowFutureBlocks"`
} `json:"params"`
Genesis struct {
@ -62,57 +68,68 @@ type cppEthereumGenesisSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"`
Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
}
// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled
// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
type cppEthereumGenesisSpecAccount struct {
Balance *hexutil.Big `json:"balance"`
Nonce uint64 `json:"nonce,omitempty"`
Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"`
type alethGenesisSpecAccount struct {
Balance *math2.HexOrDecimal256 `json:"balance"`
Nonce uint64 `json:"nonce,omitempty"`
Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
}
// cppEthereumGenesisSpecBuiltin is the precompiled contract definition.
type cppEthereumGenesisSpecBuiltin struct {
Name string `json:"name,omitempty"`
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"`
// alethGenesisSpecBuiltin is the precompiled contract definition.
type alethGenesisSpecBuiltin struct {
Name string `json:"name,omitempty"`
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
}
type cppEthereumGenesisSpecLinearPricing struct {
type alethGenesisSpecLinearPricing struct {
Base uint64 `json:"base"`
Word uint64 `json:"word"`
}
// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
// newAlethGenesisSpec converts a go-ethereum genesis block into a Aleth-specific
// chain specification format.
func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) {
// Only ethash is currently supported between go-ethereum and cpp-ethereum
func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSpec, error) {
// Only ethash is currently supported between go-ethereum and aleth
if genesis.Config.Ethash == nil {
return nil, errors.New("unsupported consensus engine")
}
// Reconstruct the chain spec in Parity's format
spec := &cppEthereumGenesisSpec{
// Reconstruct the chain spec in Aleth format
spec := &alethGenesisSpec{
SealEngine: "Ethash",
}
// Some defaults
spec.Params.AccountStartNonce = 0
spec.Params.TieBreakingGas = false
spec.Params.AllowFutureBlocks = false
spec.Params.DaoHardforkBlock = 0
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
// Byzantium
if num := genesis.Config.ByzantiumBlock; num != nil {
spec.setByzantium(num)
}
// Constantinople
if num := genesis.Config.ConstantinopleBlock; num != nil {
spec.setConstantinople(num)
}
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxInt64)
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
spec.Params.DifficultyBoundDivisor = (*math2.HexOrDecimal256)(params.DifficultyBoundDivisor)
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
@ -126,77 +143,104 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount)
for address, account := range genesis.Alloc {
spec.Accounts[address] = &cppEthereumGenesisSpecAccount{
Balance: (*hexutil.Big)(account.Balance),
Nonce: account.Nonce,
}
}
spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000},
}
spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12},
}
spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120},
}
spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3},
spec.setAccount(address, account)
}
spec.setPrecompile(1, &alethGenesisSpecBuiltin{Name: "ecrecover",
Linear: &alethGenesisSpecLinearPricing{Base: 3000}})
spec.setPrecompile(2, &alethGenesisSpecBuiltin{Name: "sha256",
Linear: &alethGenesisSpecLinearPricing{Base: 60, Word: 12}})
spec.setPrecompile(3, &alethGenesisSpecBuiltin{Name: "ripemd160",
Linear: &alethGenesisSpecLinearPricing{Base: 600, Word: 120}})
spec.setPrecompile(4, &alethGenesisSpecBuiltin{Name: "identity",
Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
if genesis.Config.ByzantiumBlock != nil {
spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
}
spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500},
}
spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000},
}
spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
}
spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
Linear: &alethGenesisSpecLinearPricing{Base: 500}})
spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
}
return spec, nil
}
func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpecBuiltin) {
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
spec.Accounts[common.UnprefixedAddress(common.BytesToAddress([]byte{address}))].Precompiled = data
}
func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
a, exist := spec.Accounts[common.UnprefixedAddress(address)]
if !exist {
a = &alethGenesisSpecAccount{}
spec.Accounts[common.UnprefixedAddress(address)] = a
}
a.Balance = (*math2.HexOrDecimal256)(account.Balance)
a.Nonce = account.Nonce
}
func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
}
func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
}
// parityChainSpec is the chain specification format used by Parity.
type parityChainSpec struct {
Name string `json:"name"`
Engine struct {
Name string `json:"name"`
Datadir string `json:"dataDir"`
Engine struct {
Ethash struct {
Params struct {
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
DurationLimit *hexutil.Big `json:"durationLimit"`
BlockReward *hexutil.Big `json:"blockReward"`
HomesteadTransition uint64 `json:"homesteadTransition"`
EIP150Transition uint64 `json:"eip150Transition"`
EIP160Transition uint64 `json:"eip160Transition"`
EIP161abcTransition uint64 `json:"eip161abcTransition"`
EIP161dTransition uint64 `json:"eip161dTransition"`
EIP649Reward *hexutil.Big `json:"eip649Reward"`
EIP100bTransition uint64 `json:"eip100bTransition"`
EIP649Transition uint64 `json:"eip649Transition"`
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
DurationLimit *hexutil.Big `json:"durationLimit"`
BlockReward map[string]string `json:"blockReward"`
DifficultyBombDelays map[string]string `json:"difficultyBombDelays"`
HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"`
EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"`
} `json:"params"`
} `json:"Ethash"`
} `json:"engine"`
Params struct {
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
NetworkID hexutil.Uint64 `json:"networkID"`
MaxCodeSize uint64 `json:"maxCodeSize"`
EIP155Transition uint64 `json:"eip155Transition"`
EIP98Transition uint64 `json:"eip98Transition"`
EIP86Transition uint64 `json:"eip86Transition"`
EIP140Transition uint64 `json:"eip140Transition"`
EIP211Transition uint64 `json:"eip211Transition"`
EIP214Transition uint64 `json:"eip214Transition"`
EIP658Transition uint64 `json:"eip658Transition"`
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
NetworkID hexutil.Uint64 `json:"networkID"`
ChainID hexutil.Uint64 `json:"chainID"`
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
} `json:"params"`
Genesis struct {
@ -215,22 +259,22 @@ type parityChainSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
Nodes []string `json:"nodes"`
Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"`
Nodes []string `json:"nodes"`
Accounts map[common.UnprefixedAddress]*parityChainSpecAccount `json:"accounts"`
}
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
type parityChainSpecAccount struct {
Balance *hexutil.Big `json:"balance"`
Nonce uint64 `json:"nonce,omitempty"`
Balance math2.HexOrDecimal256 `json:"balance"`
Nonce math2.HexOrDecimal64 `json:"nonce,omitempty"`
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
}
// parityChainSpecBuiltin is the precompiled contract definition.
type parityChainSpecBuiltin struct {
Name string `json:"name,omitempty"`
ActivateAt uint64 `json:"activate_at,omitempty"`
ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
}
@ -265,34 +309,51 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
}
// Reconstruct the chain spec in Parity's format
spec := &parityChainSpec{
Name: network,
Nodes: bootnodes,
Name: network,
Nodes: bootnodes,
Datadir: strings.ToLower(network),
}
spec.Engine.Ethash.Params.BlockReward = make(map[string]string)
spec.Engine.Ethash.Params.DifficultyBombDelays = make(map[string]string)
// Frontier
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64()
spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64()
spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64()
spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64()
spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward)
spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64()
spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
spec.Engine.Ethash.Params.BlockReward["0x0"] = hexutil.EncodeBig(ethash.FrontierBlockReward)
// Homestead
spec.Engine.Ethash.Params.HomesteadTransition = hexutil.Uint64(genesis.Config.HomesteadBlock.Uint64())
// Tangerine Whistle : 150
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-608.md
spec.Params.EIP150Transition = hexutil.Uint64(genesis.Config.EIP150Block.Uint64())
// Spurious Dragon: 155, 160, 161, 170
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-607.md
spec.Params.EIP155Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
spec.Params.EIP160Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
spec.Params.EIP161abcTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
spec.Params.EIP161dTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
// Byzantium
if num := genesis.Config.ByzantiumBlock; num != nil {
spec.setByzantium(num)
}
// Constantinople
if num := genesis.Config.ConstantinopleBlock; num != nil {
spec.setConstantinople(num)
}
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaxCodeSize = params.MaxCodeSize
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
spec.Params.EIP98Transition = math.MaxUint64
spec.Params.EIP86Transition = math.MaxUint64
spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64()
spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64()
spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64()
spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64()
// geth has it set from zero
spec.Params.MaxCodeSizeTransition = 0
// Disable this one
spec.Params.EIP98Transition = math.MaxInt64
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
@ -305,42 +366,77 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
spec.Accounts = make(map[common.Address]*parityChainSpecAccount)
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
for address, account := range genesis.Alloc {
spec.Accounts[address] = &parityChainSpecAccount{
Balance: (*hexutil.Big)(account.Balance),
Nonce: account.Nonce,
bal := math2.HexOrDecimal256(*account.Balance)
spec.Accounts[common.UnprefixedAddress(address)] = &parityChainSpecAccount{
Balance: bal,
Nonce: math2.HexOrDecimal64(account.Nonce),
}
}
spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{
Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}},
}
spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{
spec.setPrecompile(1, &parityChainSpecBuiltin{Name: "ecrecover",
Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}})
spec.setPrecompile(2, &parityChainSpecBuiltin{
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
}
spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{
})
spec.setPrecompile(3, &parityChainSpecBuiltin{
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
}
spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{
})
spec.setPrecompile(4, &parityChainSpecBuiltin{
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
}
})
if genesis.Config.ByzantiumBlock != nil {
spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{
Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
}
spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{
Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
}
spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{
Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
}
spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{
Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
}
blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
spec.setPrecompile(5, &parityChainSpecBuiltin{
Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
})
spec.setPrecompile(6, &parityChainSpecBuiltin{
Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
})
spec.setPrecompile(7, &parityChainSpecBuiltin{
Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
})
spec.setPrecompile(8, &parityChainSpecBuiltin{
Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
})
}
return spec, nil
}
func (spec *parityChainSpec) setPrecompile(address byte, data *parityChainSpecBuiltin) {
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
}
a := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
if _, exist := spec.Accounts[a]; !exist {
spec.Accounts[a] = &parityChainSpecAccount{}
}
spec.Accounts[a].Builtin = data
}
func (spec *parityChainSpec) setByzantium(num *big.Int) {
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ByzantiumBlockReward)
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(3000000)
n := hexutil.Uint64(num.Uint64())
spec.Engine.Ethash.Params.EIP100bTransition = n
spec.Params.EIP140Transition = n
spec.Params.EIP211Transition = n
spec.Params.EIP214Transition = n
spec.Params.EIP658Transition = n
}
func (spec *parityChainSpec) setConstantinople(num *big.Int) {
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ConstantinopleBlockReward)
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(2000000)
n := hexutil.Uint64(num.Uint64())
spec.Params.EIP145Transition = n
spec.Params.EIP1014Transition = n
spec.Params.EIP1052Transition = n
spec.Params.EIP1283Transition = n
}
// pyEthereumGenesisSpec represents the genesis specification format used by the
// Python Ethereum implementation.
type pyEthereumGenesisSpec struct {

View File

@ -0,0 +1,109 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"reflect"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/core"
)
// Tests the go-ethereum to Aleth chainspec conversion for the Stureby testnet.
func TestAlethSturebyConverter(t *testing.T) {
blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
if err != nil {
t.Fatalf("could not read file: %v", err)
}
var genesis core.Genesis
if err := json.Unmarshal(blob, &genesis); err != nil {
t.Fatalf("failed parsing genesis: %v", err)
}
spec, err := newAlethGenesisSpec("stureby", &genesis)
if err != nil {
t.Fatalf("failed creating chainspec: %v", err)
}
expBlob, err := ioutil.ReadFile("testdata/stureby_aleth.json")
if err != nil {
t.Fatalf("could not read file: %v", err)
}
expspec := &alethGenesisSpec{}
if err := json.Unmarshal(expBlob, expspec); err != nil {
t.Fatalf("failed parsing genesis: %v", err)
}
if !reflect.DeepEqual(expspec, spec) {
t.Errorf("chainspec mismatch")
c := spew.ConfigState{
DisablePointerAddresses: true,
SortKeys: true,
}
exp := strings.Split(c.Sdump(expspec), "\n")
got := strings.Split(c.Sdump(spec), "\n")
for i := 0; i < len(exp) && i < len(got); i++ {
if exp[i] != got[i] {
fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
}
}
}
}
// Tests the go-ethereum to Parity chainspec conversion for the Stureby testnet.
func TestParitySturebyConverter(t *testing.T) {
blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
if err != nil {
t.Fatalf("could not read file: %v", err)
}
var genesis core.Genesis
if err := json.Unmarshal(blob, &genesis); err != nil {
t.Fatalf("failed parsing genesis: %v", err)
}
spec, err := newParityChainSpec("Stureby", &genesis, []string{})
if err != nil {
t.Fatalf("failed creating chainspec: %v", err)
}
expBlob, err := ioutil.ReadFile("testdata/stureby_parity.json")
if err != nil {
t.Fatalf("could not read file: %v", err)
}
expspec := &parityChainSpec{}
if err := json.Unmarshal(expBlob, expspec); err != nil {
t.Fatalf("failed parsing genesis: %v", err)
}
expspec.Nodes = []string{}
if !reflect.DeepEqual(expspec, spec) {
t.Errorf("chainspec mismatch")
c := spew.ConfigState{
DisablePointerAddresses: true,
SortKeys: true,
}
exp := strings.Split(c.Sdump(expspec), "\n")
got := strings.Split(c.Sdump(spec), "\n")
for i := 0; i < len(exp) && i < len(got); i++ {
if exp[i] != got[i] {
fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
}
}
}
}

View File

@ -640,7 +640,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
files[filepath.Join(workdir, network+".json")] = genesis
if conf.Genesis.Config.Ethash != nil {
cppSpec, err := newCppEthereumGenesisSpec(network, conf.Genesis)
cppSpec, err := newAlethGenesisSpec(network, conf.Genesis)
if err != nil {
return nil, err
}

View File

@ -43,7 +43,8 @@ version: '2'
services:
ethstats:
build: .
image: {{.Network}}/ethstats{{if not .VHost}}
image: {{.Network}}/ethstats
container_name: {{.Network}}_ethstats_1{{if not .VHost}}
ports:
- "{{.Port}}:3000"{{end}}
environment:

View File

@ -77,6 +77,7 @@ services:
explorer:
build: .
image: {{.Network}}/explorer
container_name: {{.Network}}_explorer_1
ports:
- "{{.NodePort}}:{{.NodePort}}"
- "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}}

View File

@ -56,8 +56,10 @@ services:
faucet:
build: .
image: {{.Network}}/faucet
container_name: {{.Network}}_faucet_1
ports:
- "{{.EthPort}}:{{.EthPort}}"{{if not .VHost}}
- "{{.EthPort}}:{{.EthPort}}"
- "{{.EthPort}}:{{.EthPort}}/udp"{{if not .VHost}}
- "{{.ApiPort}}:8080"{{end}}
volumes:
- {{.Datadir}}:/root/.faucet

View File

@ -40,6 +40,7 @@ services:
nginx:
build: .
image: {{.Network}}/nginx
container_name: {{.Network}}_nginx_1
ports:
- "{{.Port}}:80"
volumes:

View File

@ -55,6 +55,7 @@ services:
{{.Type}}:
build: .
image: {{.Network}}/{{.Type}}
container_name: {{.Network}}_{{.Type}}_1
ports:
- "{{.Port}}:{{.Port}}"
- "{{.Port}}:{{.Port}}/udp"

View File

@ -57,6 +57,7 @@ services:
wallet:
build: .
image: {{.Network}}/wallet
container_name: {{.Network}}_wallet_1
ports:
- "{{.NodePort}}:{{.NodePort}}"
- "{{.NodePort}}:{{.NodePort}}/udp"

View File

@ -43,18 +43,23 @@ func main() {
Usage: "log level to emit to the screen",
},
}
app.Action = func(c *cli.Context) error {
app.Before = func(c *cli.Context) error {
// Set up the logger to print everything and the random generator
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int("loglevel")), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
rand.Seed(time.Now().UnixNano())
network := c.String("network")
if strings.Contains(network, " ") || strings.Contains(network, "-") {
log.Crit("No spaces or hyphens allowed in network name")
}
// Start the wizard and relinquish control
makeWizard(c.String("network")).run()
return nil
}
app.Action = runWizard
app.Run(os.Args)
}
// runWizard start the wizard and relinquish control to it.
func runWizard(c *cli.Context) error {
network := c.String("network")
if strings.Contains(network, " ") || strings.Contains(network, "-") || strings.ToLower(network) != network {
log.Crit("No spaces, hyphens or capital letters allowed in network name")
}
makeWizard(c.String("network")).run()
return nil
}

View File

@ -0,0 +1,112 @@
{
"sealEngine":"Ethash",
"params":{
"accountStartNonce":"0x00",
"maximumExtraDataSize":"0x20",
"homesteadForkBlock":"0x2710",
"daoHardforkBlock":"0x00",
"EIP150ForkBlock":"0x3a98",
"EIP158ForkBlock":"0x59d8",
"byzantiumForkBlock":"0x7530",
"constantinopleForkBlock":"0x9c40",
"minGasLimit":"0x1388",
"maxGasLimit":"0x7fffffffffffffff",
"tieBreakingGas":false,
"gasLimitBoundDivisor":"0x0400",
"minimumDifficulty":"0x20000",
"difficultyBoundDivisor":"0x0800",
"durationLimit":"0x0d",
"blockReward":"0x4563918244F40000",
"networkID":"0x4cb2e",
"chainID":"0x4cb2e",
"allowFutureBlocks":false
},
"genesis":{
"nonce":"0x0000000000000000",
"difficulty":"0x20000",
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
"author":"0x0000000000000000000000000000000000000000",
"timestamp":"0x59a4e76d",
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
"gasLimit":"0x47b760"
},
"accounts":{
"0000000000000000000000000000000000000001":{
"balance":"1",
"precompiled":{
"name":"ecrecover",
"linear":{
"base":3000,
"word":0
}
}
},
"0000000000000000000000000000000000000002":{
"balance":"1",
"precompiled":{
"name":"sha256",
"linear":{
"base":60,
"word":12
}
}
},
"0000000000000000000000000000000000000003":{
"balance":"1",
"precompiled":{
"name":"ripemd160",
"linear":{
"base":600,
"word":120
}
}
},
"0000000000000000000000000000000000000004":{
"balance":"1",
"precompiled":{
"name":"identity",
"linear":{
"base":15,
"word":3
}
}
},
"0000000000000000000000000000000000000005":{
"balance":"1",
"precompiled":{
"name":"modexp",
"startingBlock":"0x7530"
}
},
"0000000000000000000000000000000000000006":{
"balance":"1",
"precompiled":{
"name":"alt_bn128_G1_add",
"startingBlock":"0x7530",
"linear":{
"base":500,
"word":0
}
}
},
"0000000000000000000000000000000000000007":{
"balance":"1",
"precompiled":{
"name":"alt_bn128_G1_mul",
"startingBlock":"0x7530",
"linear":{
"base":40000,
"word":0
}
}
},
"0000000000000000000000000000000000000008":{
"balance":"1",
"precompiled":{
"name":"alt_bn128_pairing_product",
"startingBlock":"0x7530"
}
}
}
}

View File

@ -0,0 +1,47 @@
{
"config": {
"ethash":{},
"chainId": 314158,
"homesteadBlock": 10000,
"eip150Block": 15000,
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"eip155Block": 23000,
"eip158Block": 23000,
"byzantiumBlock": 30000,
"constantinopleBlock": 40000
},
"nonce": "0x0",
"timestamp": "0x59a4e76d",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
"gasLimit": "0x47b760",
"difficulty": "0x20000",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000",
"alloc": {
"0000000000000000000000000000000000000001": {
"balance": "0x01"
},
"0000000000000000000000000000000000000002": {
"balance": "0x01"
},
"0000000000000000000000000000000000000003": {
"balance": "0x01"
},
"0000000000000000000000000000000000000004": {
"balance": "0x01"
},
"0000000000000000000000000000000000000005": {
"balance": "0x01"
},
"0000000000000000000000000000000000000006": {
"balance": "0x01"
},
"0000000000000000000000000000000000000007": {
"balance": "0x01"
},
"0000000000000000000000000000000000000008": {
"balance": "0x01"
}
}
}

View File

@ -0,0 +1,181 @@
{
"name":"Stureby",
"dataDir":"stureby",
"engine":{
"Ethash":{
"params":{
"minimumDifficulty":"0x20000",
"difficultyBoundDivisor":"0x800",
"durationLimit":"0xd",
"blockReward":{
"0x0":"0x4563918244f40000",
"0x7530":"0x29a2241af62c0000",
"0x9c40":"0x1bc16d674ec80000"
},
"homesteadTransition":"0x2710",
"eip100bTransition":"0x7530",
"difficultyBombDelays":{
"0x7530":"0x2dc6c0",
"0x9c40":"0x1e8480"
}
}
}
},
"params":{
"accountStartNonce":"0x0",
"maximumExtraDataSize":"0x20",
"gasLimitBoundDivisor":"0x400",
"minGasLimit":"0x1388",
"networkID":"0x4cb2e",
"chainID":"0x4cb2e",
"maxCodeSize":"0x6000",
"maxCodeSizeTransition":"0x0",
"eip98Transition": "0x7fffffffffffffff",
"eip150Transition":"0x3a98",
"eip160Transition":"0x59d8",
"eip161abcTransition":"0x59d8",
"eip161dTransition":"0x59d8",
"eip155Transition":"0x59d8",
"eip140Transition":"0x7530",
"eip211Transition":"0x7530",
"eip214Transition":"0x7530",
"eip658Transition":"0x7530",
"eip145Transition":"0x9c40",
"eip1014Transition":"0x9c40",
"eip1052Transition":"0x9c40",
"eip1283Transition":"0x9c40"
},
"genesis":{
"seal":{
"ethereum":{
"nonce":"0x0000000000000000",
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty":"0x20000",
"author":"0x0000000000000000000000000000000000000000",
"timestamp":"0x59a4e76d",
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
"gasLimit":"0x47b760"
},
"nodes":[
"enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
"enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
"enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
"enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
"enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
"enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
"enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
"enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
],
"accounts":{
"0000000000000000000000000000000000000001":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"ecrecover",
"pricing":{
"linear":{
"base":3000,
"word":0
}
}
}
},
"0000000000000000000000000000000000000002":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"sha256",
"pricing":{
"linear":{
"base":60,
"word":12
}
}
}
},
"0000000000000000000000000000000000000003":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"ripemd160",
"pricing":{
"linear":{
"base":600,
"word":120
}
}
}
},
"0000000000000000000000000000000000000004":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"identity",
"pricing":{
"linear":{
"base":15,
"word":3
}
}
}
},
"0000000000000000000000000000000000000005":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"modexp",
"activate_at":"0x7530",
"pricing":{
"modexp":{
"divisor":20
}
}
}
},
"0000000000000000000000000000000000000006":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"alt_bn128_add",
"activate_at":"0x7530",
"pricing":{
"linear":{
"base":500,
"word":0
}
}
}
},
"0000000000000000000000000000000000000007":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"alt_bn128_mul",
"activate_at":"0x7530",
"pricing":{
"linear":{
"base":40000,
"word":0
}
}
}
},
"0000000000000000000000000000000000000008":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"alt_bn128_pairing",
"activate_at":"0x7530",
"pricing":{
"alt_bn128_pairing":{
"base":100000,
"pair":80000
}
}
}
}
}
}

View File

@ -23,6 +23,7 @@ import (
"io/ioutil"
"math/big"
"net"
"net/url"
"os"
"path/filepath"
"sort"
@ -118,6 +119,47 @@ func (w *wizard) readDefaultString(def string) string {
return def
}
// readDefaultYesNo reads a single line from stdin, trimming if from spaces and
// interpreting it as a 'yes' or a 'no'. If an empty line is entered, the default
// value is returned.
func (w *wizard) readDefaultYesNo(def bool) bool {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
return def
}
if text == "y" || text == "yes" {
return true
}
if text == "n" || text == "no" {
return false
}
log.Error("Invalid input, expected 'y', 'yes', 'n', 'no' or empty")
}
}
// readURL reads a single line from stdin, trimming if from spaces and trying to
// interpret it as a URL (http, https or file).
func (w *wizard) readURL() *url.URL {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
uri, err := url.Parse(strings.TrimSpace(text))
if err != nil {
log.Error("Invalid input, expected URL", "err", err)
continue
}
return uri
}
}
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
// to parse into an integer.
func (w *wizard) readInt() int {

View File

@ -137,14 +137,14 @@ func (w *wizard) deployDashboard() {
if w.conf.ethstats != "" {
fmt.Println()
fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)")
infos.trusted = w.readDefaultString("y") == "y"
infos.trusted = w.readDefaultYesNo(true)
}
// Try to deploy the dashboard container on the host
nocache := false
if existed {
fmt.Println()
fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil {
log.Error("Failed to deploy dashboard container", "err", err)

View File

@ -67,11 +67,11 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
if w.readDefaultString("y") != "y" {
if !w.readDefaultYesNo(true) {
// The user might want to clear the entire list, although generally probably not
fmt.Println()
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
if w.readDefaultString("n") != "n" {
if w.readDefaultYesNo(false) {
infos.banned = nil
}
// Offer the user to explicitly add/remove certain IP addresses
@ -106,7 +106,7 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
trusted := make([]string, 0, len(w.servers))
for _, client := range w.servers {

View File

@ -100,7 +100,7 @@ func (w *wizard) deployExplorer() {
if existed {
fmt.Println()
fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil {
log.Error("Failed to deploy explorer container", "err", err)

View File

@ -81,7 +81,7 @@ func (w *wizard) deployFaucet() {
if infos.captchaToken != "" {
fmt.Println()
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
if w.readDefaultString("y") != "y" {
if !w.readDefaultYesNo(true) {
infos.captchaToken, infos.captchaSecret = "", ""
}
}
@ -89,7 +89,7 @@ func (w *wizard) deployFaucet() {
// No previous authorization (or old one discarded)
fmt.Println()
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
if w.readDefaultString("n") == "n" {
if !w.readDefaultYesNo(false) {
log.Warn("Users will be able to requests funds via automated scripts")
} else {
// Captcha protection explicitly requested, read the site and secret keys
@ -132,7 +132,7 @@ func (w *wizard) deployFaucet() {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) funding account (y/n)? (default = yes)\n", key.Address.Hex())
if w.readDefaultString("y") != "y" {
if !w.readDefaultYesNo(true) {
infos.node.keyJSON, infos.node.keyPass = "", ""
}
}
@ -166,7 +166,7 @@ func (w *wizard) deployFaucet() {
if existed {
fmt.Println()
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy faucet container", "err", err)

View File

@ -20,9 +20,13 @@ import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/big"
"math/rand"
"net/http"
"os"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
@ -40,11 +44,12 @@ func (w *wizard) makeGenesis() {
Difficulty: big.NewInt(524288),
Alloc: make(core.GenesisAlloc),
Config: &params.ChainConfig{
HomesteadBlock: big.NewInt(1),
EIP150Block: big.NewInt(2),
EIP155Block: big.NewInt(3),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(4),
HomesteadBlock: big.NewInt(1),
EIP150Block: big.NewInt(2),
EIP155Block: big.NewInt(3),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(4),
ConstantinopleBlock: big.NewInt(5),
},
}
// Figure out which consensus engine to choose
@ -114,9 +119,13 @@ func (w *wizard) makeGenesis() {
}
break
}
// Add a batch of precompile balances to avoid them getting deleted
for i := int64(0); i < 256; i++ {
genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
fmt.Println()
fmt.Println("Should the precompile-addresses (0x1 .. 0xff) be pre-funded with 1 wei? (advisable yes)")
if w.readDefaultYesNo(true) {
// Add a batch of precompile balances to avoid them getting deleted
for i := int64(0); i < 256; i++ {
genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
}
}
// Query the user for some custom extras
fmt.Println()
@ -130,53 +139,130 @@ func (w *wizard) makeGenesis() {
w.conf.flush()
}
// importGenesis imports a Geth genesis spec into puppeth.
func (w *wizard) importGenesis() {
// Request the genesis JSON spec URL from the user
fmt.Println()
fmt.Println("Where's the genesis file? (local file or http/https url)")
url := w.readURL()
// Convert the various allowed URLs to a reader stream
var reader io.Reader
switch url.Scheme {
case "http", "https":
// Remote web URL, retrieve it via an HTTP client
res, err := http.Get(url.String())
if err != nil {
log.Error("Failed to retrieve remote genesis", "err", err)
return
}
defer res.Body.Close()
reader = res.Body
case "":
// Schemaless URL, interpret as a local file
file, err := os.Open(url.String())
if err != nil {
log.Error("Failed to open local genesis", "err", err)
return
}
defer file.Close()
reader = file
default:
log.Error("Unsupported genesis URL scheme", "scheme", url.Scheme)
return
}
// Parse the genesis file and inject it successful
var genesis core.Genesis
if err := json.NewDecoder(reader).Decode(&genesis); err != nil {
log.Error("Invalid genesis spec: %v", err)
return
}
log.Info("Imported genesis block")
w.conf.Genesis = &genesis
w.conf.flush()
}
// manageGenesis permits the modification of chain configuration parameters in
// a genesis config and the export of the entire genesis spec.
func (w *wizard) manageGenesis() {
// Figure out whether to modify or export the genesis
fmt.Println()
fmt.Println(" 1. Modify existing fork rules")
fmt.Println(" 2. Export genesis configuration")
fmt.Println(" 2. Export genesis configurations")
fmt.Println(" 3. Remove genesis configuration")
choice := w.read()
switch {
case choice == "1":
switch choice {
case "1":
// Fork rule updating requested, iterate over each fork
fmt.Println()
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock)
w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock)
fmt.Println()
fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
fmt.Printf("Which block should EIP150 (Tangerine Whistle) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block)
fmt.Println()
fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
fmt.Printf("Which block should EIP155 (Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block)
fmt.Println()
fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
fmt.Printf("Which block should EIP158/161 (also Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block)
fmt.Println()
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock)
w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock)
fmt.Println()
fmt.Printf("Which block should Constantinople come into effect? (default = %v)\n", w.conf.Genesis.Config.ConstantinopleBlock)
w.conf.Genesis.Config.ConstantinopleBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ConstantinopleBlock)
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
case choice == "2":
case "2":
// Save whatever genesis configuration we currently have
fmt.Println()
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
log.Error("Failed to save genesis file", "err", err)
}
log.Info("Exported existing genesis block")
fmt.Printf("Which folder to save the genesis specs into? (default = current)\n")
fmt.Printf(" Will create %s.json, %s-aleth.json, %s-harmony.json, %s-parity.json\n", w.network, w.network, w.network, w.network)
case choice == "3":
folder := w.readDefaultString(".")
if err := os.MkdirAll(folder, 0755); err != nil {
log.Error("Failed to create spec folder", "folder", folder, "err", err)
return
}
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
// Export the native genesis spec used by puppeth and Geth
gethJson := filepath.Join(folder, fmt.Sprintf("%s.json", w.network))
if err := ioutil.WriteFile((gethJson), out, 0644); err != nil {
log.Error("Failed to save genesis file", "err", err)
return
}
log.Info("Saved native genesis chain spec", "path", gethJson)
// Export the genesis spec used by Aleth (formerly C++ Ethereum)
if spec, err := newAlethGenesisSpec(w.network, w.conf.Genesis); err != nil {
log.Error("Failed to create Aleth chain spec", "err", err)
} else {
saveGenesis(folder, w.network, "aleth", spec)
}
// Export the genesis spec used by Parity
if spec, err := newParityChainSpec(w.network, w.conf.Genesis, []string{}); err != nil {
log.Error("Failed to create Parity chain spec", "err", err)
} else {
saveGenesis(folder, w.network, "parity", spec)
}
// Export the genesis spec used by Harmony (formerly EthereumJ
saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
case "3":
// Make sure we don't have any services running
if len(w.conf.servers()) > 0 {
log.Error("Genesis reset requires all services and servers torn down")
@ -186,8 +272,20 @@ func (w *wizard) manageGenesis() {
w.conf.Genesis = nil
w.conf.flush()
default:
log.Error("That's not something I can do")
return
}
}
// saveGenesis JSON encodes an arbitrary genesis spec into a pre-defined file.
func saveGenesis(folder, network, client string, spec interface{}) {
path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
out, _ := json.Marshal(spec)
if err := ioutil.WriteFile(path, out, 0644); err != nil {
log.Error("Failed to save genesis file", "client", client, "err", err)
return
}
log.Info("Saved genesis chain spec", "client", client, "path", path)
}

View File

@ -61,14 +61,14 @@ func (w *wizard) run() {
// Make sure we have a good network name to work with fmt.Println()
// Docker accepts hyphens in image names, but doesn't like it for container names
if w.network == "" {
fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
fmt.Println("Please specify a network name to administer (no spaces, hyphens or capital letters please)")
for {
w.network = w.readString()
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") && strings.ToLower(w.network) == w.network {
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
break
}
log.Error("I also like to live dangerously, still no spaces or hyphens")
log.Error("I also like to live dangerously, still no spaces, hyphens or capital letters")
}
}
log.Info("Administering Ethereum network", "name", w.network)
@ -131,7 +131,20 @@ func (w *wizard) run() {
case choice == "2":
if w.conf.Genesis == nil {
w.makeGenesis()
fmt.Println()
fmt.Println("What would you like to do? (default = create)")
fmt.Println(" 1. Create new genesis from scratch")
fmt.Println(" 2. Import already existing genesis")
choice := w.read()
switch {
case choice == "" || choice == "1":
w.makeGenesis()
case choice == "2":
w.importGenesis()
default:
log.Error("That's not something I can do")
}
} else {
w.manageGenesis()
}
@ -149,7 +162,6 @@ func (w *wizard) run() {
} else {
w.manageComponents()
}
default:
log.Error("That's not something I can do")
}

View File

@ -41,12 +41,12 @@ func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (str
// Reverse proxy is not running, offer to deploy a new one
fmt.Println()
fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)")
if w.readDefaultString("y") == "y" {
if w.readDefaultYesNo(true) {
nocache := false
if proxy != nil {
fmt.Println()
fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployNginx(client, w.network, port, nocache); err != nil {
log.Error("Failed to deploy reverse-proxy", "err", err)

View File

@ -126,7 +126,7 @@ func (w *wizard) deployNode(boot bool) {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) signing account (y/n)? (default = yes)\n", key.Address.Hex())
if w.readDefaultString("y") != "y" {
if !w.readDefaultYesNo(true) {
infos.keyJSON, infos.keyPass = "", ""
}
}
@ -165,7 +165,7 @@ func (w *wizard) deployNode(boot bool) {
if existed {
fmt.Println()
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy Ethereum node container", "err", err)

View File

@ -96,7 +96,7 @@ func (w *wizard) deployWallet() {
if existed {
fmt.Println()
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy wallet container", "err", err)

View File

@ -14,8 +14,6 @@
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// +build !windows
package main
import (
@ -28,6 +26,7 @@ import (
gorand "math/rand"
"net/http"
"os"
"runtime"
"strings"
"testing"
"time"
@ -37,8 +36,7 @@ import (
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@ -49,22 +47,41 @@ const (
var DefaultCurve = crypto.S256()
// TestAccessPassword tests for the correct creation of an ACT manifest protected by a password.
func TestACT(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
initCluster(t)
cases := []struct {
name string
f func(t *testing.T)
}{
{"Password", testPassword},
{"PK", testPK},
{"ACTWithoutBogus", testACTWithoutBogus},
{"ACTWithBogus", testACTWithBogus},
}
for _, tc := range cases {
t.Run(tc.name, tc.f)
}
}
// testPassword tests for the correct creation of an ACT manifest protected by a password.
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
// The parties participating - node (publisher), uploads to second node then disappears. Content which was uploaded
// is then fetched through 2nd node. since the tested code is not key-aware - we can just
// fetch from the 2nd node using HTTP BasicAuth
func TestAccessPassword(t *testing.T) {
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
func testPassword(t *testing.T) {
dataFilename := testutil.TempFileWithContent(t, data)
defer os.RemoveAll(dataFilename)
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t,
"--bzzapi",
srv.URL, //it doesn't matter through which node we upload content
cluster.Nodes[0].URL,
"up",
"--encrypt",
dataFilename)
@ -138,16 +155,17 @@ func TestAccessPassword(t *testing.T) {
if a.Publisher != "" {
t.Fatal("should be empty")
}
client := swarm.NewClient(srv.URL)
client := swarmapi.NewClient(cluster.Nodes[0].URL)
hash, err := client.UploadManifest(&m, false)
if err != nil {
t.Fatal(err)
}
httpClient := &http.Client{}
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash
url := srv.URL + "/" + "bzz:/" + hash
httpClient := &http.Client{}
response, err := httpClient.Get(url)
if err != nil {
t.Fatal(err)
@ -189,7 +207,7 @@ func TestAccessPassword(t *testing.T) {
//download file with 'swarm down' with wrong password
up = runSwarm(t,
"--bzzapi",
srv.URL,
cluster.Nodes[0].URL,
"down",
"bzz:/"+hash,
tmp,
@ -203,16 +221,12 @@ func TestAccessPassword(t *testing.T) {
up.ExpectExit()
}
// TestAccessPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
// testPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
// The parties participating - node (publisher), uploads to second node (which is also the grantee) then disappears.
// Content which was uploaded is then fetched through the grantee's http proxy. Since the tested code is private-key aware,
// the test will fail if the proxy's given private key is not granted on the ACT.
func TestAccessPK(t *testing.T) {
// Setup Swarm and upload a test file to it
cluster := newTestCluster(t, 2)
defer cluster.Shutdown()
func testPK(t *testing.T) {
dataFilename := testutil.TempFileWithContent(t, data)
defer os.RemoveAll(dataFilename)
@ -318,7 +332,7 @@ func TestAccessPK(t *testing.T) {
if a.Publisher != pkComp {
t.Fatal("publisher key did not match")
}
client := swarm.NewClient(cluster.Nodes[0].URL)
client := swarmapi.NewClient(cluster.Nodes[0].URL)
hash, err := client.UploadManifest(&m, false)
if err != nil {
@ -344,29 +358,24 @@ func TestAccessPK(t *testing.T) {
}
}
// TestAccessACT tests the creation of the ACT manifest end-to-end, without any bogus entries (i.e. default scenario = 3 nodes 1 unauthorized)
func TestAccessACT(t *testing.T) {
testAccessACT(t, 0)
// testACTWithoutBogus tests the creation of the ACT manifest end-to-end, without any bogus entries (i.e. default scenario = 3 nodes 1 unauthorized)
func testACTWithoutBogus(t *testing.T) {
testACT(t, 0)
}
// TestAccessACTScale tests the creation of the ACT manifest end-to-end, with 1000 bogus entries (i.e. 1000 EC keys + default scenario = 3 nodes 1 unauthorized = 1003 keys in the ACT manifest)
func TestAccessACTScale(t *testing.T) {
testAccessACT(t, 1000)
// testACTWithBogus tests the creation of the ACT manifest end-to-end, with 100 bogus entries (i.e. 100 EC keys + default scenario = 3 nodes 1 unauthorized = 103 keys in the ACT manifest)
func testACTWithBogus(t *testing.T) {
testACT(t, 100)
}
// TestAccessACT tests the e2e creation, uploading and downloading of an ACT access control with both EC keys AND password protection
// testACT tests the e2e creation, uploading and downloading of an ACT access control with both EC keys AND password protection
// the test fires up a 3 node cluster, then randomly picks 2 nodes which will be acting as grantees to the data
// set and also protects the ACT with a password. the third node should fail decoding the reference as it will not be granted access.
// the third node then then tries to download using a correct password (and succeeds) then uses a wrong password and fails.
// the publisher uploads through one of the nodes then disappears.
func testAccessACT(t *testing.T, bogusEntries int) {
// Setup Swarm and upload a test file to it
const clusterSize = 3
cluster := newTestCluster(t, clusterSize)
defer cluster.Shutdown()
func testACT(t *testing.T, bogusEntries int) {
var uploadThroughNode = cluster.Nodes[0]
client := swarm.NewClient(uploadThroughNode.URL)
client := swarmapi.NewClient(uploadThroughNode.URL)
r1 := gorand.New(gorand.NewSource(time.Now().UnixNano()))
nodeToSkip := r1.Intn(clusterSize) // a number between 0 and 2 (node indices in `cluster`)

View File

@ -26,14 +26,14 @@ import (
"testing"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/docker/docker/pkg/reexec"
)
func TestDumpConfig(t *testing.T) {
func TestConfigDump(t *testing.T) {
swarm := runSwarm(t, "dumpconfig")
defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf)
@ -91,8 +91,8 @@ func TestConfigCmdLineOverrides(t *testing.T) {
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@ -189,9 +189,9 @@ func TestConfigFileOverrides(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
"--ens-api", "",
"--ipcpath", conf.IPCPath,
"--datadir", dir,
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@ -407,9 +407,9 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
"--ens-api", "",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@ -466,7 +466,7 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
node.Shutdown()
}
func TestValidateConfig(t *testing.T) {
func TestConfigValidate(t *testing.T) {
for _, c := range []struct {
cfg *api.Config
err string

View File

@ -43,8 +43,8 @@ func TestCLISwarmExportImport(t *testing.T) {
}
cluster := newTestCluster(t, 1)
// generate random 10mb file
content := testutil.RandomBytes(1, 10000000)
// generate random 1mb file
content := testutil.RandomBytes(1, 1000000)
fileName := testutil.TempFileWithContent(t, string(content))
defer os.Remove(fileName)

View File

@ -169,7 +169,6 @@ func feedUpdate(ctx *cli.Context) {
query = new(feed.Query)
query.User = signer.Address()
query.Topic = getTopic(ctx)
}
// Retrieve a feed update request
@ -178,6 +177,11 @@ func feedUpdate(ctx *cli.Context) {
utils.Fatalf("Error retrieving feed status: %s", err.Error())
}
// Check that the provided signer matches the request to sign
if updateRequest.User != signer.Address() {
utils.Fatalf("Signer address does not match the update request")
}
// set the new data
updateRequest.SetData(data)

View File

@ -19,7 +19,6 @@ package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"testing"
@ -36,7 +35,6 @@ import (
)
func TestCLIFeedUpdate(t *testing.T) {
srv := swarmhttp.NewTestSwarmServer(t, func(api *api.API) swarmhttp.TestServer {
return swarmhttp.NewServer(api, "")
}, nil)
@ -44,7 +42,6 @@ func TestCLIFeedUpdate(t *testing.T) {
defer srv.Close()
// create a private key file for signing
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001979"
privKey, _ := crypto.HexToECDSA(privkeyHex)
address := crypto.PubkeyToAddress(privKey.PublicKey)
@ -71,7 +68,7 @@ func TestCLIFeedUpdate(t *testing.T) {
hexData}
// create an update and expect an exit without errors
log.Info(fmt.Sprintf("updating a feed with 'swarm feed update'"))
log.Info("updating a feed with 'swarm feed update'")
cmd := runSwarm(t, flags...)
cmd.ExpectExit()
@ -118,7 +115,7 @@ func TestCLIFeedUpdate(t *testing.T) {
"--user", address.Hex(),
}
log.Info(fmt.Sprintf("getting feed info with 'swarm feed info'"))
log.Info("getting feed info with 'swarm feed info'")
cmd = runSwarm(t, flags...)
_, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout
cmd.ExpectExit()
@ -143,9 +140,9 @@ func TestCLIFeedUpdate(t *testing.T) {
"--topic", topic.Hex(),
}
log.Info(fmt.Sprintf("Publishing manifest with 'swarm feed create'"))
log.Info("Publishing manifest with 'swarm feed create'")
cmd = runSwarm(t, flags...)
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`) // regex hack to extract stdout
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
cmd.ExpectExit()
manifestAddress := matches[0] // read the received feed manifest
@ -164,4 +161,36 @@ func TestCLIFeedUpdate(t *testing.T) {
if !bytes.Equal(data, retrieved) {
t.Fatalf("Received %s, expected %s", retrieved, data)
}
// test publishing a manifest for a different user
flags = []string{
"--bzzapi", srv.URL,
"feed", "create",
"--topic", topic.Hex(),
"--user", "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // different user
}
log.Info("Publishing manifest with 'swarm feed create' for a different user")
cmd = runSwarm(t, flags...)
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
cmd.ExpectExit()
manifestAddress = matches[0] // read the received feed manifest
// now let's try to update that user's manifest which we don't have the private key for
flags = []string{
"--bzzapi", srv.URL,
"--bzzaccount", pkFileName,
"feed", "update",
"--manifest", manifestAddress,
hexData}
// create an update and expect an error given there is a user mismatch
log.Info("updating a feed with 'swarm feed update'")
cmd = runSwarm(t, flags...)
cmd.ExpectRegexp("Fatal:.*") // best way so far to detect a failure.
cmd.ExpectExit()
if cmd.ExitStatus() == 0 {
t.Fatal("Expected nonzero exit code when updating a manifest with the wrong user. Got 0.")
}
}

View File

@ -24,7 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/fuse"
"gopkg.in/urfave/cli.v1"
@ -41,27 +41,24 @@ var fsCommand = cli.Command{
Action: mount,
CustomHelpTemplate: helpTemplate,
Name: "mount",
Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "mount a swarm hash to a mount point",
ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
ArgsUsage: "swarm fs mount <manifest hash> <mount point>",
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: unmount,
CustomHelpTemplate: helpTemplate,
Name: "unmount",
Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "unmount a swarmfs mount",
ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
ArgsUsage: "swarm fs unmount <mount point>",
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: listMounts,
CustomHelpTemplate: helpTemplate,
Name: "list",
Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "list swarmfs mounts",
ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
ArgsUsage: "swarm fs list",
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
},
@ -70,7 +67,7 @@ var fsCommand = cli.Command{
func mount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 2 {
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
utils.Fatalf("Usage: swarm fs mount <manifestHash> <file name>")
}
client, err := dialRPC(cliContext)
@ -97,7 +94,7 @@ func unmount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 1 {
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
utils.Fatalf("Usage: swarm fs unmount <mount path>")
}
client, err := dialRPC(cliContext)
if err != nil {
@ -145,20 +142,21 @@ func listMounts(cliContext *cli.Context) {
}
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
var endpoint string
endpoint := getIPCEndpoint(ctx)
log.Info("IPC endpoint", "path", endpoint)
return rpc.Dial(endpoint)
}
if ctx.IsSet(utils.IPCPathFlag.Name) {
endpoint = ctx.String(utils.IPCPathFlag.Name)
} else {
utils.Fatalf("swarm ipc endpoint not specified")
}
func getIPCEndpoint(ctx *cli.Context) string {
cfg := defaultNodeConfig
utils.SetNodeConfig(ctx, &cfg)
if endpoint == "" {
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
endpoint := cfg.IPCEndpoint()
if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
// Backwards compatibility with geth < 1.5 which required
// these prefixes.
endpoint = endpoint[4:]
}
return rpc.Dial(endpoint)
return endpoint
}

View File

@ -20,6 +20,7 @@ package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
@ -28,20 +29,35 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
colorable "github.com/mattn/go-colorable"
)
func init() {
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
type testFile struct {
filePath string
content string
}
// TestCLISwarmFsDefaultIPCPath tests if the most basic fs command, i.e., list
// can find and correctly connect to a running Swarm node on the default
// IPCPath.
func TestCLISwarmFsDefaultIPCPath(t *testing.T) {
cluster := newTestCluster(t, 1)
defer cluster.Shutdown()
handlingNode := cluster.Nodes[0]
list := runSwarm(t, []string{
"--datadir", handlingNode.Dir,
"fs",
"list",
}...)
list.WaitExit()
if list.Err != nil {
t.Fatal(list.Err)
}
}
// TestCLISwarmFs is a high-level test of swarmfs
//
// This test fails on travis for macOS as this executable exits with code 1
@ -65,9 +81,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
mount := runSwarm(t, []string{
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"mount",
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mhash,
mountPoint,
}...)
@ -107,9 +123,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmount := runSwarm(t, []string{
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"unmount",
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mountPoint,
}...)
_, matches := unmount.ExpectRegexp(hashRegexp)
@ -142,9 +158,9 @@ func TestCLISwarmFs(t *testing.T) {
//remount, check files
newMount := runSwarm(t, []string{
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"mount",
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
hash, // the latest hash
secondMountPoint,
}...)
@ -178,9 +194,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmountSec := runSwarm(t, []string{
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"unmount",
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
secondMountPoint,
}...)

View File

@ -57,6 +57,17 @@ func init() {
})
}
const clusterSize = 3
var clusteronce sync.Once
var cluster *testCluster
func initCluster(t *testing.T) {
clusteronce.Do(func() {
cluster = newTestCluster(t, clusterSize)
})
}
func serverFunc(api *api.API) swarmhttp.TestServer {
return swarmhttp.NewServer(api, "")
}

View File

@ -2,11 +2,13 @@ package main
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptrace"
"os"
"os/exec"
"strings"
@ -16,9 +18,13 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/testutil"
colorable "github.com/mattn/go-colorable"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
@ -27,16 +33,34 @@ const (
feedRandomDataLength = 8
)
// TODO: retrieve with manifest + extract repeating code
func cliFeedUploadAndSync(c *cli.Context) error {
metrics.GetOrRegisterCounter("feed-and-sync", nil).Inc(1)
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))))
errc := make(chan error)
go func() {
errc <- feedUploadAndSync(c)
}()
select {
case err := <-errc:
if err != nil {
metrics.GetOrRegisterCounter("feed-and-sync.fail", nil).Inc(1)
}
return err
case <-time.After(time.Duration(timeout) * time.Second):
metrics.GetOrRegisterCounter("feed-and-sync.timeout", nil).Inc(1)
return fmt.Errorf("timeout after %v sec", timeout)
}
}
// TODO: retrieve with manifest + extract repeating code
func feedUploadAndSync(c *cli.Context) error {
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
generateEndpoints(scheme, cluster, from, to)
generateEndpoints(scheme, cluster, appName, from, to)
log.Info("generating and uploading MRUs to " + endpoints[0] + " and syncing")
log.Info("generating and uploading feeds to " + endpoints[0] + " and syncing")
// create a random private key to sign updates with and derive the address
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test")
@ -205,12 +229,12 @@ func cliFeedUploadAndSync(c *cli.Context) error {
log.Info("all endpoints synced random data successfully")
// upload test file
log.Info("uploading to " + endpoints[0] + " and syncing")
seed := int(time.Now().UnixNano() / 1e6)
log.Info("feed uploading to "+endpoints[0]+" and syncing", "seed", seed)
f, cleanup := generateRandomFile(filesize * 1000)
defer cleanup()
randomBytes := testutil.RandomBytes(seed, filesize*1000)
hash, err := upload(f, endpoints[0])
hash, err := upload(&randomBytes, endpoints[0])
if err != nil {
return err
}
@ -218,9 +242,8 @@ func cliFeedUploadAndSync(c *cli.Context) error {
if err != nil {
return err
}
multihashHex := hexutil.Encode(multihash.ToMultihash(hashBytes))
fileHash, err := digest(f)
multihashHex := hexutil.Encode(hashBytes)
fileHash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
return err
}
@ -286,14 +309,37 @@ func cliFeedUploadAndSync(c *cli.Context) error {
}
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error {
ctx, sp := spancontext.StartSpan(context.Background(), "feed-and-sync.fetch")
defer sp.Finish()
log.Trace("sleeping", "ruid", ruid)
time.Sleep(3 * time.Second)
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user)
res, err := http.Get(endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user)
var tn time.Time
reqUri := endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user
req, _ := http.NewRequest("GET", reqUri, nil)
opentracing.GlobalTracer().Inject(
sp.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header))
trace := client.GetClientTrace("feed-and-sync - http get", "feed-and-sync", ruid, &tn)
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
transport := http.DefaultTransport
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
tn = time.Now()
res, err := transport.RoundTrip(req)
if err != nil {
log.Error(err.Error(), "ruid", ruid)
return err
}
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength)
if res.StatusCode != 200 {

View File

@ -17,23 +17,38 @@
package main
import (
"fmt"
"os"
"sort"
"github.com/ethereum/go-ethereum/cmd/utils"
gethmetrics "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/influxdb"
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
"github.com/ethereum/go-ethereum/swarm/tracing"
"github.com/ethereum/go-ethereum/log"
cli "gopkg.in/urfave/cli.v1"
)
var (
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
)
var (
endpoints []string
includeLocalhost bool
cluster string
appName string
scheme string
filesize int
syncDelay int
from int
to int
verbosity int
timeout int
single bool
)
func main() {
@ -45,10 +60,16 @@ func main() {
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "cluster-endpoint",
Value: "testing",
Usage: "cluster to point to (local, open or testing)",
Value: "prod",
Usage: "cluster to point to (prod or a given namespace)",
Destination: &cluster,
},
cli.StringFlag{
Name: "app",
Value: "swarm",
Usage: "application to point to (swarm or swarm-private)",
Destination: &appName,
},
cli.IntFlag{
Name: "cluster-from",
Value: 8501,
@ -78,14 +99,42 @@ func main() {
Usage: "file size for generated random file in KB",
Destination: &filesize,
},
cli.IntFlag{
Name: "sync-delay",
Value: 5,
Usage: "duration of delay in seconds to wait for content to be synced",
Destination: &syncDelay,
},
cli.IntFlag{
Name: "verbosity",
Value: 1,
Usage: "verbosity",
Destination: &verbosity,
},
cli.IntFlag{
Name: "timeout",
Value: 120,
Usage: "timeout in seconds after which kill the process",
Destination: &timeout,
},
cli.BoolFlag{
Name: "single",
Usage: "whether to fetch content from a single node or from all nodes",
Destination: &single,
},
}
app.Flags = append(app.Flags, []cli.Flag{
utils.MetricsEnabledFlag,
swarmmetrics.MetricsInfluxDBEndpointFlag,
swarmmetrics.MetricsInfluxDBDatabaseFlag,
swarmmetrics.MetricsInfluxDBUsernameFlag,
swarmmetrics.MetricsInfluxDBPasswordFlag,
swarmmetrics.MetricsInfluxDBHostTagFlag,
}...)
app.Flags = append(app.Flags, tracing.Flags...)
app.Commands = []cli.Command{
{
Name: "upload_and_sync",
@ -104,8 +153,38 @@ func main() {
sort.Sort(cli.FlagsByName(app.Flags))
sort.Sort(cli.CommandsByName(app.Commands))
app.Before = func(ctx *cli.Context) error {
tracing.Setup(ctx)
return nil
}
app.After = func(ctx *cli.Context) error {
return emitMetrics(ctx)
}
err := app.Run(os.Args)
if err != nil {
log.Error(err.Error())
os.Exit(1)
}
}
func emitMetrics(ctx *cli.Context) error {
if gethmetrics.Enabled {
var (
endpoint = ctx.GlobalString(swarmmetrics.MetricsInfluxDBEndpointFlag.Name)
database = ctx.GlobalString(swarmmetrics.MetricsInfluxDBDatabaseFlag.Name)
username = ctx.GlobalString(swarmmetrics.MetricsInfluxDBUsernameFlag.Name)
password = ctx.GlobalString(swarmmetrics.MetricsInfluxDBPasswordFlag.Name)
hosttag = ctx.GlobalString(swarmmetrics.MetricsInfluxDBHostTagFlag.Name)
)
return influxdb.InfluxDBWithTagsOnce(gethmetrics.DefaultRegistry, endpoint, database, username, password, "swarm-smoke.", map[string]string{
"host": hosttag,
"version": gitCommit,
"filesize": fmt.Sprintf("%v", filesize),
})
}
return nil
}

View File

@ -18,39 +18,41 @@ package main
import (
"bytes"
"context"
"crypto/md5"
crand "crypto/rand"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/http/httptrace"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/testutil"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
func generateEndpoints(scheme string, cluster string, from int, to int) {
func generateEndpoints(scheme string, cluster string, app string, from int, to int) {
if cluster == "prod" {
cluster = ""
} else if cluster == "local" {
for port := from; port <= to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://localhost:%v", scheme, port))
for port := from; port < to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port))
}
return
} else {
cluster = cluster + "."
}
for port := from; port <= to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%sswarm-gateways.net", scheme, port, cluster))
for port := from; port < to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://%s-%v-%s.stg.swarm-gateways.net", scheme, app, port, cluster))
}
}
if includeLocalhost {
@ -59,22 +61,51 @@ func generateEndpoints(scheme string, cluster string, from int, to int) {
}
func cliUploadAndSync(c *cli.Context) error {
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
generateEndpoints(scheme, cluster, from, to)
metrics.GetOrRegisterCounter("upload-and-sync", nil).Inc(1)
log.Info("uploading to " + endpoints[0] + " and syncing")
errc := make(chan error)
go func() {
errc <- uploadAndSync(c)
}()
f, cleanup := generateRandomFile(filesize * 1000)
defer cleanup()
select {
case err := <-errc:
if err != nil {
metrics.GetOrRegisterCounter("upload-and-sync.fail", nil).Inc(1)
}
return err
case <-time.After(time.Duration(timeout) * time.Second):
metrics.GetOrRegisterCounter("upload-and-sync.timeout", nil).Inc(1)
return fmt.Errorf("timeout after %v sec", timeout)
}
}
hash, err := upload(f, endpoints[0])
func uploadAndSync(c *cli.Context) error {
defer func(now time.Time) {
totalTime := time.Since(now)
log.Info("total time", "time", totalTime, "kb", filesize)
metrics.GetOrRegisterCounter("upload-and-sync.total-time", nil).Inc(int64(totalTime))
}(time.Now())
generateEndpoints(scheme, cluster, appName, from, to)
seed := int(time.Now().UnixNano() / 1e6)
log.Info("uploading to "+endpoints[0]+" and syncing", "seed", seed)
randomBytes := testutil.RandomBytes(seed, filesize*1000)
t1 := time.Now()
hash, err := upload(&randomBytes, endpoints[0])
if err != nil {
log.Error(err.Error())
return err
}
metrics.GetOrRegisterCounter("upload-and-sync.upload-time", nil).Inc(int64(time.Since(t1)))
fhash, err := digest(f)
fhash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
log.Error(err.Error())
return err
@ -82,23 +113,47 @@ func cliUploadAndSync(c *cli.Context) error {
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
time.Sleep(3 * time.Second)
time.Sleep(time.Duration(syncDelay) * time.Second)
wg := sync.WaitGroup{}
for _, endpoint := range endpoints {
if single {
rand.Seed(time.Now().UTC().UnixNano())
randIndex := 1 + rand.Intn(len(endpoints)-1)
ruid := uuid.New()[:8]
wg.Add(1)
go func(endpoint string, ruid string) {
for {
start := time.Now()
err := fetch(hash, endpoint, fhash, ruid)
fetchTime := time.Since(start)
if err != nil {
continue
}
metrics.GetOrRegisterMeter("upload-and-sync.single.fetch-time", nil).Mark(int64(fetchTime))
wg.Done()
return
}
}(endpoint, ruid)
}(endpoints[randIndex], ruid)
} else {
for _, endpoint := range endpoints {
ruid := uuid.New()[:8]
wg.Add(1)
go func(endpoint string, ruid string) {
for {
start := time.Now()
err := fetch(hash, endpoint, fhash, ruid)
fetchTime := time.Since(start)
if err != nil {
continue
}
metrics.GetOrRegisterMeter("upload-and-sync.each.fetch-time", nil).Mark(int64(fetchTime))
wg.Done()
return
}
}(endpoint, ruid)
}
}
wg.Wait()
log.Info("all endpoints synced random file successfully")
@ -108,13 +163,33 @@ func cliUploadAndSync(c *cli.Context) error {
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
func fetch(hash string, endpoint string, original []byte, ruid string) error {
ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
defer sp.Finish()
log.Trace("sleeping", "ruid", ruid)
time.Sleep(3 * time.Second)
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
var tn time.Time
reqUri := endpoint + "/bzz:/" + hash + "/"
req, _ := http.NewRequest("GET", reqUri, nil)
opentracing.GlobalTracer().Inject(
sp.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header))
trace := client.GetClientTrace("upload-and-sync - http get", "upload-and-sync", ruid, &tn)
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
transport := http.DefaultTransport
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
tn = time.Now()
res, err := transport.RoundTrip(req)
if err != nil {
log.Warn(err.Error(), "ruid", ruid)
log.Error(err.Error(), "ruid", ruid)
return err
}
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
@ -145,16 +220,19 @@ func fetch(hash string, endpoint string, original []byte, ruid string) error {
}
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
func upload(f *os.File, endpoint string) (string, error) {
var out bytes.Buffer
cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return "", err
func upload(dataBytes *[]byte, endpoint string) (string, error) {
swarm := client.NewClient(endpoint)
f := &client.File{
ReadCloser: ioutil.NopCloser(bytes.NewReader(*dataBytes)),
ManifestEntry: api.ManifestEntry{
ContentType: "text/plain",
Mode: 0660,
Size: int64(len(*dataBytes)),
},
}
hash := strings.TrimRight(out.String(), "\r\n")
return hash, nil
// upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
return swarm.Upload(f, "", false)
}
func digest(r io.Reader) ([]byte, error) {
@ -177,27 +255,3 @@ func generateRandomData(datasize int) ([]byte, error) {
}
return b, nil
}
// generateRandomFile is creating a temporary file with the requested byte size
func generateRandomFile(size int) (f *os.File, teardown func()) {
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
if err != nil {
panic(err)
}
// callback for tmp file cleanup
teardown = func() {
tmp.Close()
os.Remove(tmp.Name())
}
buf := make([]byte, size)
_, err = crand.Read(buf)
if err != nil {
panic(err)
}
ioutil.WriteFile(tmp.Name(), buf, 0755)
return tmp, teardown
}

View File

@ -31,8 +31,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/log"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/testutil"
"github.com/mattn/go-colorable"
)
@ -42,42 +41,50 @@ func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
func TestSwarmUp(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
initCluster(t)
cases := []struct {
name string
f func(t *testing.T)
}{
{"NoEncryption", testNoEncryption},
{"Encrypted", testEncrypted},
{"RecursiveNoEncryption", testRecursiveNoEncryption},
{"RecursiveEncrypted", testRecursiveEncrypted},
{"DefaultPathAll", testDefaultPathAll},
}
for _, tc := range cases {
t.Run(tc.name, tc.f)
}
}
// testNoEncryption tests that running 'swarm up' makes the resulting file
// available from all nodes via the HTTP API
func TestCLISwarmUp(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUp(false, t)
}
func TestCLISwarmUpRecursive(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUpRecursive(false, t)
func testNoEncryption(t *testing.T) {
testDefault(false, t)
}
// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
// testEncrypted tests that running 'swarm up --encrypted' makes the resulting file
// available from all nodes via the HTTP API
func TestCLISwarmUpEncrypted(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUp(true, t)
}
func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUpRecursive(true, t)
func testEncrypted(t *testing.T) {
testDefault(true, t)
}
func testCLISwarmUp(toEncrypt bool, t *testing.T) {
log.Info("starting 3 node cluster")
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()
func testRecursiveNoEncryption(t *testing.T) {
testRecursive(false, t)
}
func testRecursiveEncrypted(t *testing.T) {
testRecursive(true, t)
}
func testDefault(toEncrypt bool, t *testing.T) {
tmpFileName := testutil.TempFileWithContent(t, data)
defer os.Remove(tmpFileName)
@ -182,11 +189,7 @@ func testCLISwarmUp(toEncrypt bool, t *testing.T) {
}
}
func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
fmt.Println("starting 3 node cluster")
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()
func testRecursive(toEncrypt bool, t *testing.T) {
tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
@ -253,7 +256,7 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
switch mode := fi.Mode(); {
case mode.IsRegular():
if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
if file, err := swarmapi.Open(path.Join(tmpDownload, v.Name())); err != nil {
t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
} else {
ff := make([]byte, len(data))
@ -274,22 +277,16 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
}
}
// TestCLISwarmUpDefaultPath tests swarm recursive upload with relative and absolute
// testDefaultPathAll tests swarm recursive upload with relative and absolute
// default paths and with encryption.
func TestCLISwarmUpDefaultPath(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUpDefaultPath(false, false, t)
testCLISwarmUpDefaultPath(false, true, t)
testCLISwarmUpDefaultPath(true, false, t)
testCLISwarmUpDefaultPath(true, true, t)
func testDefaultPathAll(t *testing.T) {
testDefaultPath(false, false, t)
testDefaultPath(false, true, t)
testDefaultPath(true, false, t)
testDefaultPath(true, true, t)
}
func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
func testDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
tmp, err := ioutil.TempDir("", "swarm-defaultpath-test")
if err != nil {
t.Fatal(err)
@ -312,7 +309,7 @@ func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T
args := []string{
"--bzzapi",
srv.URL,
cluster.Nodes[0].URL,
"--recursive",
"--defaultpath",
defaultPath,
@ -329,7 +326,7 @@ func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T
up.ExpectExit()
hash := matches[0]
client := swarm.NewClient(srv.URL)
client := swarmapi.NewClient(cluster.Nodes[0].URL)
m, isEncrypted, err := client.DownloadManifest(hash)
if err != nil {

View File

@ -140,6 +140,10 @@ var (
Name: "rinkeby",
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
}
ConstantinopleOverrideFlag = cli.Uint64Flag{
Name: "override.constantinople",
Usage: "Manually specify constantinople fork-block, overriding the bundled setting",
}
DeveloperFlag = cli.BoolFlag{
Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
@ -182,6 +186,10 @@ var (
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
WhitelistFlag = cli.StringFlag{
Name: "whitelist",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)",
}
// Dashboard settings
DashboardEnabledFlag = cli.BoolFlag{
Name: metrics.DashboardEnabledFlag,
@ -295,7 +303,12 @@ var (
CacheDatabaseFlag = cli.IntFlag{
Name: "cache.database",
Usage: "Percentage of cache memory allowance to use for database io",
Value: 75,
Value: 50,
}
CacheTrieFlag = cli.IntFlag{
Name: "cache.trie",
Usage: "Percentage of cache memory allowance to use for trie caching",
Value: 25,
}
CacheGCFlag = cli.IntFlag{
Name: "cache.gc",
@ -819,17 +832,12 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
// makeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
func makeDatabaseHandles() int {
limit, err := fdlimit.Current()
limit, err := fdlimit.Maximum()
if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
}
if limit < 2048 {
if err := fdlimit.Raise(2048); err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err)
}
}
if limit > 2048 { // cap database file descriptors even if more is available
limit = 2048
if err := fdlimit.Raise(uint64(limit)); err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err)
}
return limit / 2 // Leave half for networking and other stuff
}
@ -973,16 +981,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setWS(ctx, cfg)
setNodeUserIdent(ctx, cfg)
switch {
case ctx.GlobalIsSet(DataDirFlag.Name):
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
case ctx.GlobalBool(DeveloperFlag.Name):
cfg.DataDir = "" // unless explicitly requested, use memory databases
case ctx.GlobalBool(TestnetFlag.Name):
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
case ctx.GlobalBool(RinkebyFlag.Name):
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
}
setDataDir(ctx, cfg)
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name)
@ -995,6 +994,19 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
}
}
func setDataDir(ctx *cli.Context, cfg *node.Config) {
switch {
case ctx.GlobalIsSet(DataDirFlag.Name):
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
case ctx.GlobalBool(DeveloperFlag.Name):
cfg.DataDir = "" // unless explicitly requested, use memory databases
case ctx.GlobalBool(TestnetFlag.Name):
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
case ctx.GlobalBool(RinkebyFlag.Name):
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
}
}
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
@ -1068,6 +1080,29 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) {
}
}
func setWhitelist(ctx *cli.Context, cfg *eth.Config) {
whitelist := ctx.GlobalString(WhitelistFlag.Name)
if whitelist == "" {
return
}
cfg.Whitelist = make(map[uint64]common.Hash)
for _, entry := range strings.Split(whitelist, ",") {
parts := strings.Split(entry, "=")
if len(parts) != 2 {
Fatalf("Invalid whitelist entry: %s", entry)
}
number, err := strconv.ParseUint(parts[0], 0, 64)
if err != nil {
Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
}
var hash common.Hash
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
}
cfg.Whitelist[number] = hash
}
}
// checkExclusive verifies that only a single instance of the provided flags was
// set by the user. Each flag might optionally be followed by a string type to
// specialize it further.
@ -1133,6 +1168,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
setGPO(ctx, &cfg.GPO)
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
setWhitelist(ctx, cfg)
if ctx.GlobalIsSet(SyncModeFlag.Name) {
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
@ -1146,7 +1182,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
}
@ -1157,8 +1192,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
}
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
@ -1368,7 +1406,6 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
var err error
chainDb = MakeChainDatabase(ctx, stack)
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
@ -1393,12 +1430,16 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
cache := &core.CacheConfig{
Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
TrieNodeLimit: eth.DefaultConfig.TrieCache,
TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
TrieCleanLimit: eth.DefaultConfig.TrieCleanCache,
TrieDirtyLimit: eth.DefaultConfig.TrieDirtyCache,
TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil)

View File

@ -696,7 +696,7 @@ func (c *Clique) SealHash(header *types.Header) common.Hash {
return sigHash(header)
}
// Close implements consensus.Engine. It's a noop for clique as there is are no background threads.
// Close implements consensus.Engine. It's a noop for clique as there are no background threads.
func (c *Clique) Close() error {
return nil
}

View File

@ -47,7 +47,10 @@ import (
)
var (
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
ErrNoGenesis = errors.New("Genesis not found in chain")
)
@ -68,9 +71,10 @@ const (
// CacheConfig contains the configuration values for the trie caching/pruning
// that's resident in a blockchain.
type CacheConfig struct {
Disabled bool // Whether to disable trie write caching (archive node)
TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
Disabled bool // Whether to disable trie write caching (archive node)
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
}
// BlockChain represents the canonical chain given a database with a genesis
@ -140,8 +144,9 @@ type BlockChain struct {
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = &CacheConfig{
TrieNodeLimit: 256,
TrieTimeLimit: 5 * time.Minute,
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
}
}
bodyCache, _ := lru.New(bodyCacheLimit)
@ -156,7 +161,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
cacheConfig: cacheConfig,
db: db,
triegc: prque.New(nil),
stateCache: state.NewDatabase(db),
stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
quit: make(chan struct{}),
shouldPreserve: shouldPreserve,
bodyCache: bodyCache,
@ -205,6 +210,11 @@ func (bc *BlockChain) getProcInterrupt() bool {
return atomic.LoadInt32(&bc.procInterrupt) == 1
}
// GetVMConfig returns the block chain VM config.
func (bc *BlockChain) GetVMConfig() *vm.Config {
return &bc.vmConfig
}
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
@ -393,6 +403,11 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, bc.stateCache)
}
// StateCache returns the caching database underpinning the blockchain instance.
func (bc *BlockChain) StateCache() state.Database {
return bc.stateCache
}
// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *BlockChain) Reset() error {
return bc.ResetWithGenesisBlock(bc.genesisBlock)
@ -438,7 +453,11 @@ func (bc *BlockChain) repair(head **types.Block) error {
return nil
}
// Otherwise rewind one block and recheck state availability there
(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
if block == nil {
return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
}
(*head) = block
}
}
@ -554,6 +573,17 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
return rawdb.HasBody(bc.db, hash, number)
}
// HasFastBlock checks if a fast block is fully present in the database or not.
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
if !bc.HasBlock(hash, number) {
return false
}
if bc.receiptsCache.Contains(hash) {
return true
}
return rawdb.HasReceipts(bc.db, hash, number)
}
// HasState checks if state trie is fully present in the database or not.
func (bc *BlockChain) HasState(hash common.Hash) bool {
_, err := bc.stateCache.OpenTrie(hash)
@ -611,12 +641,10 @@ func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
if receipts, ok := bc.receiptsCache.Get(hash); ok {
return receipts.(types.Receipts)
}
number := rawdb.ReadHeaderNumber(bc.db, hash)
if number == nil {
return nil
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number)
bc.receiptsCache.Add(hash, receipts)
return receipts
@ -938,7 +966,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// If we exceeded our memory allowance, flush matured singleton nodes to disk
var (
nodes, imgs = triedb.Size()
limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
)
if nodes > limit || imgs > 4*1024*1024 {
triedb.Cap(limit - ethdb.IdealBatchSize)
@ -1020,6 +1048,18 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
return status, nil
}
// addFutureBlock checks if the block is within the max allowed window to get
// accepted for future processing, and returns an error if the block is too far
// ahead and was not added.
func (bc *BlockChain) addFutureBlock(block *types.Block) error {
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
if block.Time().Cmp(max) > 0 {
return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
}
bc.futureBlocks.Add(block.Hash(), block)
return nil
}
// InsertChain attempts to insert the given batch of blocks in to the canonical
// chain or, otherwise, create a fork. If an error is returned it will return
// the index number of the failing block as well an error describing what went
@ -1027,18 +1067,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
//
// After insertion is done, all accumulated events will be fired.
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
n, events, logs, err := bc.insertChain(chain)
bc.PostChainEvents(events, logs)
return n, err
}
// insertChain will execute the actual chain insertion and event aggregation. The
// only reason this method exists as a separate one is to make locking cleaner
// with deferred statements.
func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
// Sanity check that we have something meaningful to import
if len(chain) == 0 {
return 0, nil, nil, nil
return 0, nil
}
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ {
@ -1047,16 +1078,36 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
}
}
// Pre-checks passed, start the full block imports
bc.wg.Add(1)
defer bc.wg.Done()
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
n, events, logs, err := bc.insertChain(chain, true)
bc.chainmu.Unlock()
bc.wg.Done()
bc.PostChainEvents(events, logs)
return n, err
}
// insertChain is the internal implementation of insertChain, which assumes that
// 1) chains are contiguous, and 2) The chain mutex is held.
//
// This method is split out so that import batches that require re-injecting
// historical blocks can do so without releasing the lock, which could lead to
// racey behaviour. If a sidechain import is in progress, and the historic state
// is imported, but then new canon-head is added before the actual sidechain
// completes, then the historic state could be pruned again
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
// If the chain is terminating, don't even bother starting u
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
return 0, nil, nil, nil
}
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
@ -1073,16 +1124,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
for i, block := range chain {
headers[i] = block.Header()
seals[i] = true
seals[i] = verifySeals
}
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
defer close(abort)
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
// Peek the error for the first block to decide the directing import logic
it := newInsertIterator(chain, results, bc.Validator())
// Iterate over the blocks and insert when the verifier permits
for i, block := range chain {
block, err := it.next()
switch {
// First block is pruned, insert as sidechain and reorg only if TD grows enough
case err == consensus.ErrPrunedAncestor:
return bc.insertSidechain(it)
// First block is future, shove it (and all children) to the future queue (unknown ancestor)
case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err
}
block, err = it.next()
}
stats.queued += it.processed()
stats.ignored += it.remaining()
// If there are any still remaining, mark as ignored
return it.index, events, coalescedLogs, err
// First block (and state) is known
// 1. We did a roll-back, and should now do a re-import
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
// from the canonical chain, which has not been verified.
case err == ErrKnownBlock:
// Skip all known blocks that behind us
current := bc.CurrentBlock().NumberU64()
for block != nil && err == ErrKnownBlock && current >= block.NumberU64() {
stats.ignored++
block, err = it.next()
}
// Falls through to the block import
// Some other error occurred, abort
case err != nil:
stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err)
return it.index, events, coalescedLogs, err
}
// No validation errors for the first block (or chain prefix skipped)
for ; block != nil && err == nil; block, err = it.next() {
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
@ -1091,115 +1182,53 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
// If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
bc.reportBlock(block, nil, ErrBlacklistedHash)
return i, events, coalescedLogs, ErrBlacklistedHash
return it.index, events, coalescedLogs, ErrBlacklistedHash
}
// Wait for the block's verification to complete
bstart := time.Now()
// Retrieve the parent block and it's state to execute on top
start := time.Now()
err := <-results
if err == nil {
err = bc.Validator().ValidateBody(block)
}
switch {
case err == ErrKnownBlock:
// Block and state both already known. However if the current block is below
// this number we did a rollback and we should reimport it nonetheless.
if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
stats.ignored++
continue
}
case err == consensus.ErrFutureBlock:
// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
// the chain is discarded and processed at a later time if given.
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
if block.Time().Cmp(max) > 0 {
return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
}
bc.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
bc.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
case err == consensus.ErrPrunedAncestor:
// Block competing with the canonical chain, store in the db, but don't process
// until the competitor TD goes above the canonical TD
currentBlock := bc.CurrentBlock()
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
if localTd.Cmp(externTd) > 0 {
if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
return i, events, coalescedLogs, err
}
continue
}
// Competitor chain beat canonical, gather all blocks from the common ancestor
var winner []*types.Block
parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
for !bc.HasState(parent.Root()) {
winner = append(winner, parent)
parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
}
for j := 0; j < len(winner)/2; j++ {
winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
}
// Import all the pruned blocks to make the state available
bc.chainmu.Unlock()
_, evs, logs, err := bc.insertChain(winner)
bc.chainmu.Lock()
events, coalescedLogs = evs, logs
if err != nil {
return i, events, coalescedLogs, err
}
case err != nil:
bc.reportBlock(block, nil, err)
return i, events, coalescedLogs, err
}
// Create a new statedb using the parent block and report an
// error if it fails.
var parent *types.Block
if i == 0 {
parent := it.previous()
if parent == nil {
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
} else {
parent = chain[i-1]
}
state, err := state.New(parent.Root(), bc.stateCache)
if err != nil {
return i, events, coalescedLogs, err
return it.index, events, coalescedLogs, err
}
// Process block using the parent state as reference point.
t0 := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
t1 := time.Now()
if err != nil {
bc.reportBlock(block, receipts, err)
return i, events, coalescedLogs, err
return it.index, events, coalescedLogs, err
}
// Validate the state using the default validator
err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
if err != nil {
if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err)
return i, events, coalescedLogs, err
return it.index, events, coalescedLogs, err
}
proctime := time.Since(bstart)
t2 := time.Now()
proctime := time.Since(start)
// Write the block to the chain and get the status.
status, err := bc.WriteBlockWithState(block, receipts, state)
t3 := time.Now()
if err != nil {
return i, events, coalescedLogs, err
return it.index, events, coalescedLogs, err
}
blockInsertTimer.UpdateSince(start)
blockExecutionTimer.Update(t1.Sub(t0))
blockValidationTimer.Update(t2.Sub(t1))
blockWriteTimer.Update(t3.Sub(t2))
switch status {
case CanonStatTy:
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
"elapsed", common.PrettyDuration(time.Since(start)),
"root", block.Root())
coalescedLogs = append(coalescedLogs, logs...)
blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block
@ -1207,78 +1236,153 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
bc.gcproc += proctime
case SideStatTy:
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
blockInsertTimer.UpdateSince(bstart)
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
events = append(events, ChainSideEvent{block})
}
blockInsertTimer.UpdateSince(start)
stats.processed++
stats.usedGas += usedGas
cache, _ := bc.stateCache.TrieDB().Size()
stats.report(chain, i, cache)
stats.report(chain, it.index, cache)
}
// Any blocks remaining here? The only ones we care about are the future ones
if block != nil && err == consensus.ErrFutureBlock {
if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err
}
block, err = it.next()
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err
}
stats.queued++
}
}
stats.ignored += it.remaining()
// Append a single chain head event if we've progressed the chain
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
events = append(events, ChainHeadEvent{lastCanon})
}
return 0, events, coalescedLogs, nil
return it.index, events, coalescedLogs, err
}
// insertStats tracks and reports on block insertion.
type insertStats struct {
queued, processed, ignored int
usedGas uint64
lastIndex int
startTime mclock.AbsTime
}
// statsReportLimit is the time limit during import and export after which we
// always print out progress. This avoids the user wondering what's going on.
const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
// Fetch the timings for the batch
// insertSidechain is called when an import batch hits upon a pruned ancestor
// error, which happens when a sidechain with a sufficiently old fork-block is
// found.
//
// The method writes all (header-and-body-valid) blocks to disk, then tries to
// switch over to the new chain if the TD exceeded the current chain.
func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) {
var (
now = mclock.Now()
elapsed = time.Duration(now) - time.Duration(st.startTime)
externTd *big.Int
current = bc.CurrentBlock().NumberU64()
)
// If we're at the last block of the batch or report period reached, log
if index == len(chain)-1 || elapsed >= statsReportLimit {
var (
end = chain[index]
txs = countTransactions(chain[st.lastIndex : index+1])
)
context := []interface{}{
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
"number", end.Number(), "hash", end.Hash(),
}
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
context = append(context, []interface{}{"cache", cache}...)
// The first sidechain block error is already verified to be ErrPrunedAncestor.
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
// ones. Any other errors means that the block is invalid, and should not be written
// to disk.
block, err := it.current(), consensus.ErrPrunedAncestor
for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
// Check the canonical state root for that number
if number := block.NumberU64(); current >= number {
if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() {
// This is most likely a shadow-state attack. When a fork is imported into the
// database, and it eventually reaches a block height which is not pruned, we
// just found that the state already exist! This means that the sidechain block
// refers to a state which already exists in our canon chain.
//
// If left unchecked, we would now proceed importing the blocks, without actually
// having verified the state of the previous blocks.
log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...)
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
// mechanism.
return it.index, nil, nil, errors.New("sidechain ghost-state attack")
}
}
if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...)
if externTd == nil {
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
}
log.Info("Imported new chain segment", context...)
externTd = new(big.Int).Add(externTd, block.Difficulty())
*st = insertStats{startTime: now, lastIndex: index + 1}
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
start := time.Now()
if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
return it.index, nil, nil, err
}
log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
}
}
}
func countTransactions(chain []*types.Block) (c int) {
for _, b := range chain {
c += len(b.Transactions())
// At this point, we've written all sidechain blocks to database. Loop ended
// either on some other error or all were processed. If there was some other
// error, we can ignore the rest of those blocks.
//
// If the externTd was larger than our local TD, we now need to reimport the previous
// blocks to regenerate the required state
localTd := bc.GetTd(bc.CurrentBlock().Hash(), current)
if localTd.Cmp(externTd) > 0 {
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd)
return it.index, nil, nil, err
}
return c
// Gather all the sidechain hashes (full blocks may be memory heavy)
var (
hashes []common.Hash
numbers []uint64
)
parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64())
for parent != nil && !bc.HasState(parent.Root) {
hashes = append(hashes, parent.Hash())
numbers = append(numbers, parent.Number.Uint64())
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
}
if parent == nil {
return it.index, nil, nil, errors.New("missing parent")
}
// Import all the pruned blocks to make the state available
var (
blocks []*types.Block
memory common.StorageSize
)
for i := len(hashes) - 1; i >= 0; i-- {
// Append the next block to our batch
block := bc.GetBlock(hashes[i], numbers[i])
blocks = append(blocks, block)
memory += block.Size()
// If memory use grew too large, import and continue. Sadly we need to discard
// all raised events and logs from notifications since we're too heavy on the
// memory here.
if len(blocks) >= 2048 || memory > 64*1024*1024 {
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
if _, _, _, err := bc.insertChain(blocks, false); err != nil {
return 0, nil, nil, err
}
blocks, memory = blocks[:0], 0
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
return 0, nil, nil, nil
}
}
}
if len(blocks) > 0 {
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
return bc.insertChain(blocks, false)
}
return 0, nil, nil, nil
}
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
@ -1453,8 +1557,10 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
bc.addBadBlock(block)
var receiptString string
for _, receipt := range receipts {
receiptString += fmt.Sprintf("\t%v\n", receipt)
for i, receipt := range receipts {
receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
}
log.Error(fmt.Sprintf(`
########## BAD BLOCK #########

View File

@ -0,0 +1,143 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// insertStats tracks and reports on block insertion.
type insertStats struct {
queued, processed, ignored int
usedGas uint64
lastIndex int
startTime mclock.AbsTime
}
// statsReportLimit is the time limit during import and export after which we
// always print out progress. This avoids the user wondering what's going on.
const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
// Fetch the timings for the batch
var (
now = mclock.Now()
elapsed = time.Duration(now) - time.Duration(st.startTime)
)
// If we're at the last block of the batch or report period reached, log
if index == len(chain)-1 || elapsed >= statsReportLimit {
// Count the number of transactions in this segment
var txs int
for _, block := range chain[st.lastIndex : index+1] {
txs += len(block.Transactions())
}
end := chain[index]
// Assemble the log context and send it to the logger
context := []interface{}{
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
"number", end.Number(), "hash", end.Hash(),
}
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
context = append(context, []interface{}{"cache", cache}...)
if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...)
}
if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...)
}
log.Info("Imported new chain segment", context...)
// Bump the stats reported to the next section
*st = insertStats{startTime: now, lastIndex: index + 1}
}
}
// insertIterator is a helper to assist during chain import.
type insertIterator struct {
chain types.Blocks
results <-chan error
index int
validator Validator
}
// newInsertIterator creates a new iterator based on the given blocks, which are
// assumed to be a contiguous chain.
func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator {
return &insertIterator{
chain: chain,
results: results,
index: -1,
validator: validator,
}
}
// next returns the next block in the iterator, along with any potential validation
// error for that block. When the end is reached, it will return (nil, nil).
func (it *insertIterator) next() (*types.Block, error) {
if it.index+1 >= len(it.chain) {
it.index = len(it.chain)
return nil, nil
}
it.index++
if err := <-it.results; err != nil {
return it.chain[it.index], err
}
return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
}
// current returns the current block that's being processed.
func (it *insertIterator) current() *types.Block {
if it.index < 0 || it.index+1 >= len(it.chain) {
return nil
}
return it.chain[it.index]
}
// previous returns the previous block was being processed, or nil
func (it *insertIterator) previous() *types.Block {
if it.index < 1 {
return nil
}
return it.chain[it.index-1]
}
// first returns the first block in the it.
func (it *insertIterator) first() *types.Block {
return it.chain[0]
}
// remaining returns the number of remaining blocks.
func (it *insertIterator) remaining() int {
return len(it.chain) - it.index
}
// processed returns the number of processed blocks.
func (it *insertIterator) processed() int {
return it.index + 1
}

View File

@ -579,11 +579,11 @@ func testInsertNonceError(t *testing.T, full bool) {
blockchain.hc.engine = blockchain.engine
failRes, err = blockchain.InsertHeaderChain(headers, 1)
}
// Check that the returned error indicates the failure.
// Check that the returned error indicates the failure
if failRes != failAt {
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
}
// Check that all no blocks after the failing block have been inserted.
// Check that all blocks after the failing block have been inserted
for j := 0; j < i-failAt; j++ {
if full {
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
@ -1345,7 +1345,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
t.Fatalf("failed to insert shared chain: %v", err)
}
if _, err := chain.InsertChain(original); err != nil {
t.Fatalf("failed to insert shared chain: %v", err)
t.Fatalf("failed to insert original chain: %v", err)
}
// Ensure that the state associated with the forking point is pruned away
if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {

View File

@ -151,6 +151,9 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlockWithOverride(db, genesis, nil)
}
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constantinopleOverride *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@ -178,6 +181,9 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
// Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored)
if constantinopleOverride != nil {
newcfg.ConstantinopleBlock = constantinopleOverride
}
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil {
log.Warn("Found genesis block without chain config")

View File

@ -271,6 +271,15 @@ func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
}
}
// HasReceipts verifies the existence of all the transaction receipts belonging
// to a block.
func HasReceipts(db DatabaseReader, hash common.Hash, number uint64) bool {
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
return false
}
return true
}
// ReadReceipts retrieves all the transaction receipts belonging to a block.
func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
// Retrieve the flattened receipt slice

View File

@ -72,13 +72,19 @@ type Trie interface {
}
// NewDatabase creates a backing store for state. The returned database is safe for
// concurrent use and retains cached trie nodes in memory. The pool is an optional
// intermediate trie-node memory pool between the low level storage layer and the
// high level trie abstraction.
// concurrent use and retains a few recent expanded trie nodes in memory. To keep
// more historical state in memory, use the NewDatabaseWithCache constructor.
func NewDatabase(db ethdb.Database) Database {
return NewDatabaseWithCache(db, 0)
}
// NewDatabase creates a backing store for state. The returned database is safe for
// concurrent use and retains both a few recent expanded trie nodes in memory, as
// well as a lot of collapsed RLP trie nodes in a large memory cache.
func NewDatabaseWithCache(db ethdb.Database, cache int) Database {
csc, _ := lru.New(codeSizeCacheSize)
return &cachingDB{
db: trie.NewDatabase(db),
db: trie.NewDatabaseWithCache(db, cache),
codeSizeCache: csc,
}
}

View File

@ -825,7 +825,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error {
// addTxsLocked attempts to queue a batch of transactions if they are valid,
// whilst assuming the transaction pool lock is already held.
func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
// Add the batch of transaction, tracking the accepted ones
// Add the batch of transactions, tracking the accepted ones
dirty := make(map[common.Address]struct{})
errs := make([]error, len(txs))

View File

@ -81,8 +81,8 @@ type Header struct {
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Time *big.Int `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash" gencodec:"required"`
Nonce BlockNonce `json:"nonce" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
}
// field type overrides for gencodec

View File

@ -13,6 +13,7 @@ import (
var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
@ -28,8 +29,8 @@ func (h Header) MarshalJSON() ([]byte, error) {
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash" gencodec:"required"`
Nonce BlockNonce `json:"nonce" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
Hash common.Hash `json:"hash"`
}
var enc Header
@ -52,6 +53,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (h *Header) UnmarshalJSON(input []byte) error {
type Header struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
@ -67,8 +69,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash" gencodec:"required"`
Nonce *BlockNonce `json:"nonce" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@ -126,13 +128,11 @@ func (h *Header) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'extraData' for Header")
}
h.Extra = *dec.Extra
if dec.MixDigest == nil {
return errors.New("missing required field 'mixHash' for Header")
if dec.MixDigest != nil {
h.MixDigest = *dec.MixDigest
}
h.MixDigest = *dec.MixDigest
if dec.Nonce == nil {
return errors.New("missing required field 'nonce' for Header")
if dec.Nonce != nil {
h.Nonce = *dec.Nonce
}
h.Nonce = *dec.Nonce
return nil
}

Some files were not shown because too many files have changed in this diff Show More