forked from cerc-io/plugeth
all: gofmt -w -s (#15419)
This commit is contained in:
parent
bfdc0fa362
commit
9619a61024
@ -472,7 +472,7 @@ func TestBindings(t *testing.T) {
|
|||||||
t.Fatalf("failed to create temporary workspace: %v", err)
|
t.Fatalf("failed to create temporary workspace: %v", err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(ws)
|
defer os.RemoveAll(ws)
|
||||||
|
|
||||||
pkg := filepath.Join(ws, "bindtest")
|
pkg := filepath.Join(ws, "bindtest")
|
||||||
if err = os.MkdirAll(pkg, 0700); err != nil {
|
if err = os.MkdirAll(pkg, 0700); err != nil {
|
||||||
t.Fatalf("failed to create package: %v", err)
|
t.Fatalf("failed to create package: %v", err)
|
||||||
|
@ -365,7 +365,7 @@ func TestUnmarshal(t *testing.T) {
|
|||||||
buff.Write(common.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000"))
|
buff.Write(common.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000"))
|
||||||
|
|
||||||
err = abi.Unpack(&mixedBytes, "mixedBytes", buff.Bytes())
|
err = abi.Unpack(&mixedBytes, "mixedBytes", buff.Bytes())
|
||||||
if err !=nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else {
|
} else {
|
||||||
if bytes.Compare(p0, p0Exp) != 0 {
|
if bytes.Compare(p0, p0Exp) != 0 {
|
||||||
|
@ -182,8 +182,9 @@ type bintree struct {
|
|||||||
Func func() (*asset, error)
|
Func func() (*asset, error)
|
||||||
Children map[string]*bintree
|
Children map[string]*bintree
|
||||||
}
|
}
|
||||||
|
|
||||||
var _bintree = &bintree{nil, map[string]*bintree{
|
var _bintree = &bintree{nil, map[string]*bintree{
|
||||||
"faucet.html": &bintree{faucetHtml, map[string]*bintree{}},
|
"faucet.html": {faucetHtml, map[string]*bintree{}},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// RestoreAsset restores an asset under the given directory
|
// RestoreAsset restores an asset under the given directory
|
||||||
@ -232,4 +233,3 @@ func _filePath(dir, name string) string {
|
|||||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||||
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,14 +31,14 @@ const testSectionSize = 4096
|
|||||||
// Tests that wildcard filter rules (nil) can be specified and are handled well.
|
// Tests that wildcard filter rules (nil) can be specified and are handled well.
|
||||||
func TestMatcherWildcards(t *testing.T) {
|
func TestMatcherWildcards(t *testing.T) {
|
||||||
matcher := NewMatcher(testSectionSize, [][][]byte{
|
matcher := NewMatcher(testSectionSize, [][][]byte{
|
||||||
[][]byte{common.Address{}.Bytes(), common.Address{0x01}.Bytes()}, // Default address is not a wildcard
|
{common.Address{}.Bytes(), common.Address{0x01}.Bytes()}, // Default address is not a wildcard
|
||||||
[][]byte{common.Hash{}.Bytes(), common.Hash{0x01}.Bytes()}, // Default hash is not a wildcard
|
{common.Hash{}.Bytes(), common.Hash{0x01}.Bytes()}, // Default hash is not a wildcard
|
||||||
[][]byte{common.Hash{0x01}.Bytes()}, // Plain rule, sanity check
|
{common.Hash{0x01}.Bytes()}, // Plain rule, sanity check
|
||||||
[][]byte{common.Hash{0x01}.Bytes(), nil}, // Wildcard suffix, drop rule
|
{common.Hash{0x01}.Bytes(), nil}, // Wildcard suffix, drop rule
|
||||||
[][]byte{nil, common.Hash{0x01}.Bytes()}, // Wildcard prefix, drop rule
|
{nil, common.Hash{0x01}.Bytes()}, // Wildcard prefix, drop rule
|
||||||
[][]byte{nil, nil}, // Wildcard combo, drop rule
|
{nil, nil}, // Wildcard combo, drop rule
|
||||||
[][]byte{}, // Inited wildcard rule, drop rule
|
{}, // Inited wildcard rule, drop rule
|
||||||
nil, // Proper wildcard rule, drop rule
|
nil, // Proper wildcard rule, drop rule
|
||||||
})
|
})
|
||||||
if len(matcher.filters) != 3 {
|
if len(matcher.filters) != 3 {
|
||||||
t.Fatalf("filter system size mismatch: have %d, want %d", len(matcher.filters), 3)
|
t.Fatalf("filter system size mismatch: have %d, want %d", len(matcher.filters), 3)
|
||||||
|
@ -60,7 +60,7 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
|
|||||||
req.section, // Requested data
|
req.section, // Requested data
|
||||||
req.section, // Duplicated data (ensure it doesn't double close anything)
|
req.section, // Duplicated data (ensure it doesn't double close anything)
|
||||||
}, [][]byte{
|
}, [][]byte{
|
||||||
[]byte{},
|
{},
|
||||||
new(big.Int).SetUint64(req.section).Bytes(),
|
new(big.Int).SetUint64(req.section).Bytes(),
|
||||||
new(big.Int).SetUint64(req.section).Bytes(),
|
new(big.Int).SetUint64(req.section).Bytes(),
|
||||||
})
|
})
|
||||||
|
@ -356,15 +356,15 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
|
|||||||
GasLimit: 6283185,
|
GasLimit: 6283185,
|
||||||
Difficulty: big.NewInt(1),
|
Difficulty: big.NewInt(1),
|
||||||
Alloc: map[common.Address]GenesisAccount{
|
Alloc: map[common.Address]GenesisAccount{
|
||||||
common.BytesToAddress([]byte{1}): GenesisAccount{Balance: big.NewInt(1)}, // ECRecover
|
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
|
||||||
common.BytesToAddress([]byte{2}): GenesisAccount{Balance: big.NewInt(1)}, // SHA256
|
common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256
|
||||||
common.BytesToAddress([]byte{3}): GenesisAccount{Balance: big.NewInt(1)}, // RIPEMD
|
common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD
|
||||||
common.BytesToAddress([]byte{4}): GenesisAccount{Balance: big.NewInt(1)}, // Identity
|
common.BytesToAddress([]byte{4}): {Balance: big.NewInt(1)}, // Identity
|
||||||
common.BytesToAddress([]byte{5}): GenesisAccount{Balance: big.NewInt(1)}, // ModExp
|
common.BytesToAddress([]byte{5}): {Balance: big.NewInt(1)}, // ModExp
|
||||||
common.BytesToAddress([]byte{6}): GenesisAccount{Balance: big.NewInt(1)}, // ECAdd
|
common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd
|
||||||
common.BytesToAddress([]byte{7}): GenesisAccount{Balance: big.NewInt(1)}, // ECScalarMul
|
common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul
|
||||||
common.BytesToAddress([]byte{8}): GenesisAccount{Balance: big.NewInt(1)}, // ECPairing
|
common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing
|
||||||
faucet: GenesisAccount{Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
|
faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -820,7 +820,7 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
|
|||||||
// Only reprocess the internal state if something was actually added
|
// Only reprocess the internal state if something was actually added
|
||||||
if len(dirty) > 0 {
|
if len(dirty) > 0 {
|
||||||
addrs := make([]common.Address, 0, len(dirty))
|
addrs := make([]common.Address, 0, len(dirty))
|
||||||
for addr, _ := range dirty {
|
for addr := range dirty {
|
||||||
addrs = append(addrs, addr)
|
addrs = append(addrs, addr)
|
||||||
}
|
}
|
||||||
pool.promoteExecutables(addrs)
|
pool.promoteExecutables(addrs)
|
||||||
@ -907,7 +907,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
|
|||||||
// Gather all the accounts potentially needing updates
|
// Gather all the accounts potentially needing updates
|
||||||
if accounts == nil {
|
if accounts == nil {
|
||||||
accounts = make([]common.Address, 0, len(pool.queue))
|
accounts = make([]common.Address, 0, len(pool.queue))
|
||||||
for addr, _ := range pool.queue {
|
for addr := range pool.queue {
|
||||||
accounts = append(accounts, addr)
|
accounts = append(accounts, addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,7 +105,7 @@ func validateTxPoolInternals(pool *TxPool) error {
|
|||||||
for addr, txs := range pool.pending {
|
for addr, txs := range pool.pending {
|
||||||
// Find the last transaction
|
// Find the last transaction
|
||||||
var last uint64
|
var last uint64
|
||||||
for nonce, _ := range txs.txs.items {
|
for nonce := range txs.txs.items {
|
||||||
if last < nonce {
|
if last < nonce {
|
||||||
last = nonce
|
last = nonce
|
||||||
}
|
}
|
||||||
|
@ -192,7 +192,7 @@ func BenchmarkNoBloomBits(b *testing.B) {
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
mux := new(event.TypeMux)
|
mux := new(event.TypeMux)
|
||||||
backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
|
backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
|
||||||
filter := New(backend, 0, int64(headNum), []common.Address{common.Address{}}, nil)
|
filter := New(backend, 0, int64(headNum), []common.Address{{}}, nil)
|
||||||
filter.Logs(context.Background())
|
filter.Logs(context.Background())
|
||||||
d := time.Since(start)
|
d := time.Since(start)
|
||||||
fmt.Println("Finished running filter benchmarks")
|
fmt.Println("Finished running filter benchmarks")
|
||||||
|
@ -56,7 +56,7 @@ func (eth *LightEthereum) startBloomHandlers() {
|
|||||||
task.Bitsets = make([][]byte, len(task.Sections))
|
task.Bitsets = make([][]byte, len(task.Sections))
|
||||||
compVectors, err := light.GetBloomBits(task.Context, eth.odr, task.Bit, task.Sections)
|
compVectors, err := light.GetBloomBits(task.Context, eth.odr, task.Bit, task.Sections)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for i, _ := range task.Sections {
|
for i := range task.Sections {
|
||||||
if blob, err := bitutil.DecompressBytes(compVectors[i], int(light.BloomTrieFrequency/8)); err == nil {
|
if blob, err := bitutil.DecompressBytes(compVectors[i], int(light.BloomTrieFrequency/8)); err == nil {
|
||||||
task.Bitsets[i] = blob
|
task.Bitsets[i] = blob
|
||||||
} else {
|
} else {
|
||||||
|
@ -191,7 +191,7 @@ func (d *requestDistributor) nextRequest() (distPeer, *distReq, time.Duration) {
|
|||||||
for (len(d.peers) > 0 || elem == d.reqQueue.Front()) && elem != nil {
|
for (len(d.peers) > 0 || elem == d.reqQueue.Front()) && elem != nil {
|
||||||
req := elem.Value.(*distReq)
|
req := elem.Value.(*distReq)
|
||||||
canSend := false
|
canSend := false
|
||||||
for peer, _ := range d.peers {
|
for peer := range d.peers {
|
||||||
if _, ok := checkedPeers[peer]; !ok && peer.canQueue() && req.canSend(peer) {
|
if _, ok := checkedPeers[peer]; !ok && peer.canQueue() && req.canSend(peer) {
|
||||||
canSend = true
|
canSend = true
|
||||||
cost := req.getCost(peer)
|
cost := req.getCost(peer)
|
||||||
|
@ -124,7 +124,7 @@ func testRequestDistributor(t *testing.T, resend bool) {
|
|||||||
|
|
||||||
dist := newRequestDistributor(nil, stop)
|
dist := newRequestDistributor(nil, stop)
|
||||||
var peers [testDistPeerCount]*testDistPeer
|
var peers [testDistPeerCount]*testDistPeer
|
||||||
for i, _ := range peers {
|
for i := range peers {
|
||||||
peers[i] = &testDistPeer{}
|
peers[i] = &testDistPeer{}
|
||||||
go peers[i].worker(t, !resend, stop)
|
go peers[i].worker(t, !resend, stop)
|
||||||
dist.registerTestPeer(peers[i])
|
dist.registerTestPeer(peers[i])
|
||||||
|
18
les/odr.go
18
les/odr.go
@ -27,20 +27,20 @@ import (
|
|||||||
|
|
||||||
// LesOdr implements light.OdrBackend
|
// LesOdr implements light.OdrBackend
|
||||||
type LesOdr struct {
|
type LesOdr struct {
|
||||||
db ethdb.Database
|
db ethdb.Database
|
||||||
chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer
|
chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer
|
||||||
retriever *retrieveManager
|
retriever *retrieveManager
|
||||||
stop chan struct{}
|
stop chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLesOdr(db ethdb.Database, chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer, retriever *retrieveManager) *LesOdr {
|
func NewLesOdr(db ethdb.Database, chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer, retriever *retrieveManager) *LesOdr {
|
||||||
return &LesOdr{
|
return &LesOdr{
|
||||||
db: db,
|
db: db,
|
||||||
chtIndexer: chtIndexer,
|
chtIndexer: chtIndexer,
|
||||||
bloomTrieIndexer: bloomTrieIndexer,
|
bloomTrieIndexer: bloomTrieIndexer,
|
||||||
bloomIndexer: bloomIndexer,
|
bloomIndexer: bloomIndexer,
|
||||||
retriever: retriever,
|
retriever: retriever,
|
||||||
stop: make(chan struct{}),
|
stop: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,7 +290,7 @@ func TestSubscriptionMultipleNamespaces(t *testing.T) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
done := true
|
done := true
|
||||||
for id, _ := range count {
|
for id := range count {
|
||||||
if count, found := count[id]; !found || count < (2*n) {
|
if count, found := count[id]; !found || count < (2*n) {
|
||||||
done = false
|
done = false
|
||||||
}
|
}
|
||||||
|
@ -244,25 +244,25 @@ func TestClientFileList(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests := map[string][]string{
|
tests := map[string][]string{
|
||||||
"": []string{"dir1/", "dir2/", "file1.txt", "file2.txt"},
|
"": {"dir1/", "dir2/", "file1.txt", "file2.txt"},
|
||||||
"file": []string{"file1.txt", "file2.txt"},
|
"file": {"file1.txt", "file2.txt"},
|
||||||
"file1": []string{"file1.txt"},
|
"file1": {"file1.txt"},
|
||||||
"file2.txt": []string{"file2.txt"},
|
"file2.txt": {"file2.txt"},
|
||||||
"file12": []string{},
|
"file12": {},
|
||||||
"dir": []string{"dir1/", "dir2/"},
|
"dir": {"dir1/", "dir2/"},
|
||||||
"dir1": []string{"dir1/"},
|
"dir1": {"dir1/"},
|
||||||
"dir1/": []string{"dir1/file3.txt", "dir1/file4.txt"},
|
"dir1/": {"dir1/file3.txt", "dir1/file4.txt"},
|
||||||
"dir1/file": []string{"dir1/file3.txt", "dir1/file4.txt"},
|
"dir1/file": {"dir1/file3.txt", "dir1/file4.txt"},
|
||||||
"dir1/file3.txt": []string{"dir1/file3.txt"},
|
"dir1/file3.txt": {"dir1/file3.txt"},
|
||||||
"dir1/file34": []string{},
|
"dir1/file34": {},
|
||||||
"dir2/": []string{"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
|
"dir2/": {"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
|
||||||
"dir2/file": []string{"dir2/file5.txt"},
|
"dir2/file": {"dir2/file5.txt"},
|
||||||
"dir2/dir": []string{"dir2/dir3/", "dir2/dir4/"},
|
"dir2/dir": {"dir2/dir3/", "dir2/dir4/"},
|
||||||
"dir2/dir3/": []string{"dir2/dir3/file6.txt"},
|
"dir2/dir3/": {"dir2/dir3/file6.txt"},
|
||||||
"dir2/dir4/": []string{"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
"dir2/dir4/": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
||||||
"dir2/dir4/file": []string{"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
"dir2/dir4/file": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
||||||
"dir2/dir4/file7.txt": []string{"dir2/dir4/file7.txt"},
|
"dir2/dir4/file7.txt": {"dir2/dir4/file7.txt"},
|
||||||
"dir2/dir4/file78": []string{},
|
"dir2/dir4/file78": {},
|
||||||
}
|
}
|
||||||
for prefix, expected := range tests {
|
for prefix, expected := range tests {
|
||||||
actual := ls(prefix)
|
actual := ls(prefix)
|
||||||
|
@ -50,7 +50,6 @@ data_{i} := size(subtree_{i}) || key_{j} || key_{j+1} .... || key_{j+n-1}
|
|||||||
The underlying hash function is configurable
|
The underlying hash function is configurable
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Tree chunker is a concrete implementation of data chunking.
|
Tree chunker is a concrete implementation of data chunking.
|
||||||
This chunker works in a simple way, it builds a tree out of the document so that each node either represents a chunk of real data or a chunk of data representing an branching non-leaf node of the tree. In particular each such non-leaf chunk will represent is a concatenation of the hash of its respective children. This scheme simultaneously guarantees data integrity as well as self addressing. Abstract nodes are transparent since their represented size component is strictly greater than their maximum data size, since they encode a subtree.
|
This chunker works in a simple way, it builds a tree out of the document so that each node either represents a chunk of real data or a chunk of data representing an branching non-leaf node of the tree. In particular each such non-leaf chunk will represent is a concatenation of the hash of its respective children. This scheme simultaneously guarantees data integrity as well as self addressing. Abstract nodes are transparent since their represented size component is strictly greater than their maximum data size, since they encode a subtree.
|
||||||
@ -61,17 +60,17 @@ The hashing itself does use extra copies and allocation though, since it does ne
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
errAppendOppNotSuported = errors.New("Append operation not supported")
|
errAppendOppNotSuported = errors.New("Append operation not supported")
|
||||||
errOperationTimedOut = errors.New("operation timed out")
|
errOperationTimedOut = errors.New("operation timed out")
|
||||||
)
|
)
|
||||||
|
|
||||||
type TreeChunker struct {
|
type TreeChunker struct {
|
||||||
branches int64
|
branches int64
|
||||||
hashFunc SwarmHasher
|
hashFunc SwarmHasher
|
||||||
// calculated
|
// calculated
|
||||||
hashSize int64 // self.hashFunc.New().Size()
|
hashSize int64 // self.hashFunc.New().Size()
|
||||||
chunkSize int64 // hashSize* branches
|
chunkSize int64 // hashSize* branches
|
||||||
workerCount int64 // the number of worker routines used
|
workerCount int64 // the number of worker routines used
|
||||||
workerLock sync.RWMutex // lock for the worker count
|
workerLock sync.RWMutex // lock for the worker count
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTreeChunker(params *ChunkerParams) (self *TreeChunker) {
|
func NewTreeChunker(params *ChunkerParams) (self *TreeChunker) {
|
||||||
@ -124,7 +123,6 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
|
|||||||
panic("chunker must be initialised")
|
panic("chunker must be initialised")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
jobC := make(chan *hashJob, 2*ChunkProcessors)
|
jobC := make(chan *hashJob, 2*ChunkProcessors)
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
errC := make(chan error)
|
errC := make(chan error)
|
||||||
@ -164,7 +162,6 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
|
|||||||
close(errC)
|
close(errC)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
||||||
defer close(quitC)
|
defer close(quitC)
|
||||||
select {
|
select {
|
||||||
case err := <-errC:
|
case err := <-errC:
|
||||||
@ -172,7 +169,7 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case <-time.NewTimer(splitTimeout).C:
|
case <-time.NewTimer(splitTimeout).C:
|
||||||
return nil,errOperationTimedOut
|
return nil, errOperationTimedOut
|
||||||
}
|
}
|
||||||
|
|
||||||
return key, nil
|
return key, nil
|
||||||
|
@ -123,7 +123,7 @@ type PyramidChunker struct {
|
|||||||
hashSize int64
|
hashSize int64
|
||||||
branches int64
|
branches int64
|
||||||
workerCount int64
|
workerCount int64
|
||||||
workerLock sync.RWMutex
|
workerLock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPyramidChunker(params *ChunkerParams) (self *PyramidChunker) {
|
func NewPyramidChunker(params *ChunkerParams) (self *PyramidChunker) {
|
||||||
@ -634,4 +634,4 @@ func (self *PyramidChunker) enqueueDataChunk(chunkData []byte, size uint64, pare
|
|||||||
|
|
||||||
return pkey
|
return pkey
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -25,26 +25,26 @@ import (
|
|||||||
|
|
||||||
// This table defines supported forks and their chain config.
|
// This table defines supported forks and their chain config.
|
||||||
var Forks = map[string]*params.ChainConfig{
|
var Forks = map[string]*params.ChainConfig{
|
||||||
"Frontier": ¶ms.ChainConfig{
|
"Frontier": {
|
||||||
ChainId: big.NewInt(1),
|
ChainId: big.NewInt(1),
|
||||||
},
|
},
|
||||||
"Homestead": ¶ms.ChainConfig{
|
"Homestead": {
|
||||||
ChainId: big.NewInt(1),
|
ChainId: big.NewInt(1),
|
||||||
HomesteadBlock: big.NewInt(0),
|
HomesteadBlock: big.NewInt(0),
|
||||||
},
|
},
|
||||||
"EIP150": ¶ms.ChainConfig{
|
"EIP150": {
|
||||||
ChainId: big.NewInt(1),
|
ChainId: big.NewInt(1),
|
||||||
HomesteadBlock: big.NewInt(0),
|
HomesteadBlock: big.NewInt(0),
|
||||||
EIP150Block: big.NewInt(0),
|
EIP150Block: big.NewInt(0),
|
||||||
},
|
},
|
||||||
"EIP158": ¶ms.ChainConfig{
|
"EIP158": {
|
||||||
ChainId: big.NewInt(1),
|
ChainId: big.NewInt(1),
|
||||||
HomesteadBlock: big.NewInt(0),
|
HomesteadBlock: big.NewInt(0),
|
||||||
EIP150Block: big.NewInt(0),
|
EIP150Block: big.NewInt(0),
|
||||||
EIP155Block: big.NewInt(0),
|
EIP155Block: big.NewInt(0),
|
||||||
EIP158Block: big.NewInt(0),
|
EIP158Block: big.NewInt(0),
|
||||||
},
|
},
|
||||||
"Byzantium": ¶ms.ChainConfig{
|
"Byzantium": {
|
||||||
ChainId: big.NewInt(1),
|
ChainId: big.NewInt(1),
|
||||||
HomesteadBlock: big.NewInt(0),
|
HomesteadBlock: big.NewInt(0),
|
||||||
EIP150Block: big.NewInt(0),
|
EIP150Block: big.NewInt(0),
|
||||||
@ -53,22 +53,22 @@ var Forks = map[string]*params.ChainConfig{
|
|||||||
DAOForkBlock: big.NewInt(0),
|
DAOForkBlock: big.NewInt(0),
|
||||||
ByzantiumBlock: big.NewInt(0),
|
ByzantiumBlock: big.NewInt(0),
|
||||||
},
|
},
|
||||||
"FrontierToHomesteadAt5": ¶ms.ChainConfig{
|
"FrontierToHomesteadAt5": {
|
||||||
ChainId: big.NewInt(1),
|
ChainId: big.NewInt(1),
|
||||||
HomesteadBlock: big.NewInt(5),
|
HomesteadBlock: big.NewInt(5),
|
||||||
},
|
},
|
||||||
"HomesteadToEIP150At5": ¶ms.ChainConfig{
|
"HomesteadToEIP150At5": {
|
||||||
ChainId: big.NewInt(1),
|
ChainId: big.NewInt(1),
|
||||||
HomesteadBlock: big.NewInt(0),
|
HomesteadBlock: big.NewInt(0),
|
||||||
EIP150Block: big.NewInt(5),
|
EIP150Block: big.NewInt(5),
|
||||||
},
|
},
|
||||||
"HomesteadToDaoAt5": ¶ms.ChainConfig{
|
"HomesteadToDaoAt5": {
|
||||||
ChainId: big.NewInt(1),
|
ChainId: big.NewInt(1),
|
||||||
HomesteadBlock: big.NewInt(0),
|
HomesteadBlock: big.NewInt(0),
|
||||||
DAOForkBlock: big.NewInt(5),
|
DAOForkBlock: big.NewInt(5),
|
||||||
DAOForkSupport: true,
|
DAOForkSupport: true,
|
||||||
},
|
},
|
||||||
"EIP158ToByzantiumAt5": ¶ms.ChainConfig{
|
"EIP158ToByzantiumAt5": {
|
||||||
ChainId: big.NewInt(1),
|
ChainId: big.NewInt(1),
|
||||||
HomesteadBlock: big.NewInt(0),
|
HomesteadBlock: big.NewInt(0),
|
||||||
EIP150Block: big.NewInt(0),
|
EIP150Block: big.NewInt(0),
|
||||||
|
@ -112,7 +112,7 @@ type stTransactionMarshaling struct {
|
|||||||
func (t *StateTest) Subtests() []StateSubtest {
|
func (t *StateTest) Subtests() []StateSubtest {
|
||||||
var sub []StateSubtest
|
var sub []StateSubtest
|
||||||
for fork, pss := range t.json.Post {
|
for fork, pss := range t.json.Post {
|
||||||
for i, _ := range pss {
|
for i := range pss {
|
||||||
sub = append(sub, StateSubtest{fork, i})
|
sub = append(sub, StateSubtest{fork, i})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user