Merge pull request #1357 from obscuren/core-optimisations-2
core: optimisations
This commit is contained in:
commit
7c4ed8055c
7
Godeps/Godeps.json
generated
7
Godeps/Godeps.json
generated
@ -1,6 +1,6 @@
|
||||
{
|
||||
"ImportPath": "github.com/ethereum/go-ethereum",
|
||||
"GoVersion": "go1.4.2",
|
||||
"GoVersion": "go1.4",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
@ -28,6 +28,11 @@
|
||||
"ImportPath": "github.com/gizak/termui",
|
||||
"Rev": "bab8dce01c193d82bc04888a0a9a7814d505f532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/howeyc/fsnotify",
|
||||
"Comment": "v0.9.0-11-g6b1ef89",
|
||||
"Rev": "6b1ef893dc11e0447abda6da20a5203481878dda"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp",
|
||||
"Rev": "5cff77a69fb22f5f1774c4451ea2aab63d4d2f20"
|
||||
|
23
Godeps/_workspace/src/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
362
Godeps/_workspace/src/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
362
Godeps/_workspace/src/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
@ -0,0 +1,362 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
25
Godeps/_workspace/src/github.com/hashicorp/golang-lru/README.md
generated
vendored
Normal file
25
Godeps/_workspace/src/github.com/hashicorp/golang-lru/README.md
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
golang-lru
|
||||
==========
|
||||
|
||||
This provides the `lru` package which implements a fixed-size
|
||||
thread safe LRU cache. It is based on the cache in Groupcache.
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Using the LRU is very simple:
|
||||
|
||||
```go
|
||||
l, _ := New(128)
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, nil)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
panic(fmt.Sprintf("bad len: %v", l.Len()))
|
||||
}
|
||||
```
|
175
Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go
generated
vendored
Normal file
175
Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
// This package provides a simple LRU cache. It is based on the
|
||||
// LRU implementation in groupcache:
|
||||
// https://github.com/golang/groupcache/tree/master/lru
|
||||
package lru
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Cache is a thread-safe fixed size LRU cache.
|
||||
type Cache struct {
|
||||
size int
|
||||
evictList *list.List
|
||||
items map[interface{}]*list.Element
|
||||
lock sync.RWMutex
|
||||
onEvicted func(key interface{}, value interface{})
|
||||
}
|
||||
|
||||
// entry is used to hold a value in the evictList
|
||||
type entry struct {
|
||||
key interface{}
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// New creates an LRU of the given size
|
||||
func New(size int) (*Cache, error) {
|
||||
return NewWithEvict(size, nil)
|
||||
}
|
||||
|
||||
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("Must provide a positive size")
|
||||
}
|
||||
c := &Cache{
|
||||
size: size,
|
||||
evictList: list.New(),
|
||||
items: make(map[interface{}]*list.Element, size),
|
||||
onEvicted: onEvicted,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache
|
||||
func (c *Cache) Purge() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.onEvicted != nil {
|
||||
for k, v := range c.items {
|
||||
c.onEvicted(k, v.Value.(*entry).value)
|
||||
}
|
||||
}
|
||||
|
||||
c.evictList = list.New()
|
||||
c.items = make(map[interface{}]*list.Element, c.size)
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occured.
|
||||
func (c *Cache) Add(key, value interface{}) bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
ent.Value.(*entry).value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := &entry{key, value}
|
||||
entry := c.evictList.PushFront(ent)
|
||||
c.items[key] = entry
|
||||
|
||||
evict := c.evictList.Len() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Check if a key is in the cache, without updating the recent-ness or deleting it for being stale.
|
||||
func (c *Cache) Contains(key interface{}) (ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Returns the key value (or undefined if not found) without updating the "recently used"-ness of the key.
|
||||
// (If you find yourself using this a lot, you might be using the wrong sort of data structure, but there are some use cases where it's handy.)
|
||||
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
if ent, ok := c.items[key]; ok {
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.removeOldest()
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *Cache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
keys := make([]interface{}, len(c.items))
|
||||
ent := c.evictList.Back()
|
||||
i := 0
|
||||
for ent != nil {
|
||||
keys[i] = ent.Value.(*entry).key
|
||||
ent = ent.Prev()
|
||||
i++
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *Cache) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *Cache) removeElement(e *list.Element) {
|
||||
c.evictList.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.items, kv.key)
|
||||
if c.onEvicted != nil {
|
||||
c.onEvicted(kv.key, kv.value)
|
||||
}
|
||||
}
|
127
Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go
generated
vendored
Normal file
127
Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
package lru
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestLRU(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
if k != v {
|
||||
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
|
||||
}
|
||||
evictCounter += 1
|
||||
}
|
||||
l, err := NewWithEvict(128, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
|
||||
if evictCounter != 128 {
|
||||
t.Fatalf("bad evict count: %v", evictCounter)
|
||||
}
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||
t.Fatalf("bad key: %v", k)
|
||||
}
|
||||
}
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 256; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("should not be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 192; i++ {
|
||||
l.Remove(i)
|
||||
_, ok := l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be deleted")
|
||||
}
|
||||
}
|
||||
|
||||
l.Get(192) // expect 192 to be last key in l.Keys()
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
|
||||
t.Fatalf("out of order key: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
l.Purge()
|
||||
if l.Len() != 0 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
if _, ok := l.Get(200); ok {
|
||||
t.Fatalf("should contain nothing")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Add returns true/false if an eviction occured
|
||||
func TestLRUAdd(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
evictCounter += 1
|
||||
}
|
||||
|
||||
l, err := NewWithEvict(1, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if l.Add(1, 1) == true || evictCounter != 0 {
|
||||
t.Errorf("should not have an eviction")
|
||||
}
|
||||
if l.Add(2, 2) == false || evictCounter != 1 {
|
||||
t.Errorf("should have an eviction")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Contains doesn't update recent-ness
|
||||
func TestLRUContains(t *testing.T) {
|
||||
l, err := New(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Peek doesn't update recent-ness
|
||||
func TestLRUPeek(t *testing.T) {
|
||||
l, err := New(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
163
core/bench_test.go
Normal file
163
core/bench_test.go
Normal file
@ -0,0 +1,163 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
func BenchmarkInsertChain_empty_memdb(b *testing.B) {
|
||||
benchInsertChain(b, false, nil)
|
||||
}
|
||||
func BenchmarkInsertChain_empty_diskdb(b *testing.B) {
|
||||
benchInsertChain(b, true, nil)
|
||||
}
|
||||
func BenchmarkInsertChain_valueTx_memdb(b *testing.B) {
|
||||
benchInsertChain(b, false, genValueTx(0))
|
||||
}
|
||||
func BenchmarkInsertChain_valueTx_diskdb(b *testing.B) {
|
||||
benchInsertChain(b, true, genValueTx(0))
|
||||
}
|
||||
func BenchmarkInsertChain_valueTx_100kB_memdb(b *testing.B) {
|
||||
benchInsertChain(b, false, genValueTx(100*1024))
|
||||
}
|
||||
func BenchmarkInsertChain_valueTx_100kB_diskdb(b *testing.B) {
|
||||
benchInsertChain(b, true, genValueTx(100*1024))
|
||||
}
|
||||
func BenchmarkInsertChain_uncles_memdb(b *testing.B) {
|
||||
benchInsertChain(b, false, genUncles)
|
||||
}
|
||||
func BenchmarkInsertChain_uncles_diskdb(b *testing.B) {
|
||||
benchInsertChain(b, true, genUncles)
|
||||
}
|
||||
func BenchmarkInsertChain_ring200_memdb(b *testing.B) {
|
||||
benchInsertChain(b, false, genTxRing(200))
|
||||
}
|
||||
func BenchmarkInsertChain_ring200_diskdb(b *testing.B) {
|
||||
benchInsertChain(b, true, genTxRing(200))
|
||||
}
|
||||
func BenchmarkInsertChain_ring1000_memdb(b *testing.B) {
|
||||
benchInsertChain(b, false, genTxRing(1000))
|
||||
}
|
||||
func BenchmarkInsertChain_ring1000_diskdb(b *testing.B) {
|
||||
benchInsertChain(b, true, genTxRing(1000))
|
||||
}
|
||||
|
||||
var (
|
||||
// This is the content of the genesis block used by the benchmarks.
|
||||
benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey)
|
||||
benchRootFunds = common.BigPow(2, 100)
|
||||
)
|
||||
|
||||
// genValueTx returns a block generator that includes a single
|
||||
// value-transfer transaction with n bytes of extra data in each
|
||||
// block.
|
||||
func genValueTx(nbytes int) func(int, *BlockGen) {
|
||||
return func(i int, gen *BlockGen) {
|
||||
toaddr := common.Address{}
|
||||
data := make([]byte, nbytes)
|
||||
gas := IntrinsicGas(data)
|
||||
tx, _ := types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data).SignECDSA(benchRootKey)
|
||||
gen.AddTx(tx)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
ringKeys = make([]*ecdsa.PrivateKey, 1000)
|
||||
ringAddrs = make([]common.Address, len(ringKeys))
|
||||
)
|
||||
|
||||
func init() {
|
||||
ringKeys[0] = benchRootKey
|
||||
ringAddrs[0] = benchRootAddr
|
||||
for i := 1; i < len(ringKeys); i++ {
|
||||
ringKeys[i], _ = crypto.GenerateKey()
|
||||
ringAddrs[i] = crypto.PubkeyToAddress(ringKeys[i].PublicKey)
|
||||
}
|
||||
}
|
||||
|
||||
// genTxRing returns a block generator that sends ether in a ring
|
||||
// among n accounts. This is creates n entries in the state database
|
||||
// and fills the blocks with many small transactions.
|
||||
func genTxRing(naccounts int) func(int, *BlockGen) {
|
||||
from := 0
|
||||
return func(i int, gen *BlockGen) {
|
||||
gas := CalcGasLimit(gen.PrevBlock(i - 1))
|
||||
for {
|
||||
gas.Sub(gas, params.TxGas)
|
||||
if gas.Cmp(params.TxGas) < 0 {
|
||||
break
|
||||
}
|
||||
to := (from + 1) % naccounts
|
||||
tx := types.NewTransaction(
|
||||
gen.TxNonce(ringAddrs[from]),
|
||||
ringAddrs[to],
|
||||
benchRootFunds,
|
||||
params.TxGas,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
tx, _ = tx.SignECDSA(ringKeys[from])
|
||||
gen.AddTx(tx)
|
||||
from = to
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// genUncles generates blocks with two uncle headers.
|
||||
func genUncles(i int, gen *BlockGen) {
|
||||
if i >= 6 {
|
||||
b2 := gen.PrevBlock(i - 6).Header()
|
||||
b2.Extra = []byte("foo")
|
||||
gen.AddUncle(b2)
|
||||
b3 := gen.PrevBlock(i - 6).Header()
|
||||
b3.Extra = []byte("bar")
|
||||
gen.AddUncle(b3)
|
||||
}
|
||||
}
|
||||
|
||||
func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
||||
// Create the database in memory or in a temporary directory.
|
||||
var db common.Database
|
||||
if !disk {
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
} else {
|
||||
dir, err := ioutil.TempDir("", "eth-core-bench")
|
||||
if err != nil {
|
||||
b.Fatalf("cannot create temporary directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
db, err = ethdb.NewLDBDatabase(dir)
|
||||
if err != nil {
|
||||
b.Fatalf("cannot create temporary database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
}
|
||||
|
||||
// Generate a chain of b.N blocks using the supplied block
|
||||
// generator function.
|
||||
genesis := GenesisBlockForTesting(db, benchRootAddr, benchRootFunds)
|
||||
chain := GenerateChain(genesis, db, b.N, gen)
|
||||
|
||||
// Time the insertion of the new chain.
|
||||
// State and blocks are stored in the same DB.
|
||||
evmux := new(event.TypeMux)
|
||||
chainman, _ := NewChainManager(genesis, db, db, FakePow{}, evmux)
|
||||
chainman.SetProcessor(NewBlockProcessor(db, db, FakePow{}, chainman, evmux))
|
||||
defer chainman.Stop()
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
if i, err := chainman.InsertChain(chain); err != nil {
|
||||
b.Fatalf("insert error (block %d): %v\n", i, err)
|
||||
}
|
||||
}
|
@ -11,12 +11,12 @@ import (
|
||||
func newChain(size int) (chain []*types.Block) {
|
||||
var parentHash common.Hash
|
||||
for i := 0; i < size; i++ {
|
||||
block := types.NewBlock(parentHash, common.Address{}, common.Hash{}, new(big.Int), 0, nil)
|
||||
block.Header().Number = big.NewInt(int64(i))
|
||||
head := &types.Header{ParentHash: parentHash, Number: big.NewInt(int64(i))}
|
||||
block := types.NewBlock(head, nil, nil, nil)
|
||||
chain = append(chain, block)
|
||||
parentHash = block.Hash()
|
||||
}
|
||||
return
|
||||
return chain
|
||||
}
|
||||
|
||||
func insertChainCache(cache *BlockCache, chain []*types.Block) {
|
||||
|
@ -57,8 +57,8 @@ func NewBlockProcessor(db, extra common.Database, pow pow.PoW, chainManager *Cha
|
||||
}
|
||||
|
||||
func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block *types.Block, transientProcess bool) (receipts types.Receipts, err error) {
|
||||
coinbase := statedb.GetOrNewStateObject(block.Header().Coinbase)
|
||||
coinbase.SetGasLimit(block.Header().GasLimit)
|
||||
coinbase := statedb.GetOrNewStateObject(block.Coinbase())
|
||||
coinbase.SetGasLimit(block.GasLimit())
|
||||
|
||||
// Process the transactions on to parent state
|
||||
receipts, err = sm.ApplyTransactions(coinbase, statedb, block, block.Transactions(), transientProcess)
|
||||
@ -69,11 +69,11 @@ func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block
|
||||
return receipts, nil
|
||||
}
|
||||
|
||||
func (self *BlockProcessor) ApplyTransaction(coinbase *state.StateObject, statedb *state.StateDB, block *types.Block, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
|
||||
func (self *BlockProcessor) ApplyTransaction(coinbase *state.StateObject, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
|
||||
// If we are mining this block and validating we want to set the logs back to 0
|
||||
|
||||
cb := statedb.GetStateObject(coinbase.Address())
|
||||
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, block), tx, cb)
|
||||
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, header), tx, cb)
|
||||
if err != nil && (IsNonceErr(err) || state.IsGasLimitErr(err) || IsInvalidTxErr(err)) {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -81,9 +81,8 @@ func (self *BlockProcessor) ApplyTransaction(coinbase *state.StateObject, stated
|
||||
// Update the state with pending changes
|
||||
statedb.Update()
|
||||
|
||||
cumulative := new(big.Int).Set(usedGas.Add(usedGas, gas))
|
||||
receipt := types.NewReceipt(statedb.Root().Bytes(), cumulative)
|
||||
|
||||
usedGas.Add(usedGas, gas)
|
||||
receipt := types.NewReceipt(statedb.Root().Bytes(), usedGas)
|
||||
logs := statedb.GetLogs(tx.Hash())
|
||||
receipt.SetLogs(logs)
|
||||
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
||||
@ -108,12 +107,13 @@ func (self *BlockProcessor) ApplyTransactions(coinbase *state.StateObject, state
|
||||
totalUsedGas = big.NewInt(0)
|
||||
err error
|
||||
cumulativeSum = new(big.Int)
|
||||
header = block.Header()
|
||||
)
|
||||
|
||||
for i, tx := range txs {
|
||||
statedb.StartRecord(tx.Hash(), block.Hash(), i)
|
||||
|
||||
receipt, txGas, err := self.ApplyTransaction(coinbase, statedb, block, tx, totalUsedGas, transientProcess)
|
||||
receipt, txGas, err := self.ApplyTransaction(coinbase, statedb, header, tx, totalUsedGas, transientProcess)
|
||||
if err != nil && (IsNonceErr(err) || state.IsGasLimitErr(err) || IsInvalidTxErr(err)) {
|
||||
return nil, err
|
||||
}
|
||||
@ -142,11 +142,10 @@ func (sm *BlockProcessor) RetryProcess(block *types.Block) (logs state.Logs, err
|
||||
sm.mutex.Lock()
|
||||
defer sm.mutex.Unlock()
|
||||
|
||||
header := block.Header()
|
||||
if !sm.bc.HasBlock(header.ParentHash) {
|
||||
return nil, ParentError(header.ParentHash)
|
||||
if !sm.bc.HasBlock(block.ParentHash()) {
|
||||
return nil, ParentError(block.ParentHash())
|
||||
}
|
||||
parent := sm.bc.GetBlock(header.ParentHash)
|
||||
parent := sm.bc.GetBlock(block.ParentHash())
|
||||
|
||||
// FIXME Change to full header validation. See #1225
|
||||
errch := make(chan bool)
|
||||
@ -168,30 +167,32 @@ func (sm *BlockProcessor) Process(block *types.Block) (logs state.Logs, err erro
|
||||
sm.mutex.Lock()
|
||||
defer sm.mutex.Unlock()
|
||||
|
||||
header := block.Header()
|
||||
if sm.bc.HasBlock(header.Hash()) {
|
||||
return nil, &KnownBlockError{header.Number, header.Hash()}
|
||||
if sm.bc.HasBlock(block.Hash()) {
|
||||
return nil, &KnownBlockError{block.Number(), block.Hash()}
|
||||
}
|
||||
|
||||
if !sm.bc.HasBlock(header.ParentHash) {
|
||||
return nil, ParentError(header.ParentHash)
|
||||
if !sm.bc.HasBlock(block.ParentHash()) {
|
||||
return nil, ParentError(block.ParentHash())
|
||||
}
|
||||
parent := sm.bc.GetBlock(header.ParentHash)
|
||||
parent := sm.bc.GetBlock(block.ParentHash())
|
||||
return sm.processWithParent(block, parent)
|
||||
}
|
||||
|
||||
func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs state.Logs, err error) {
|
||||
// Create a new state based on the parent's root (e.g., create copy)
|
||||
state := state.New(parent.Root(), sm.db)
|
||||
header := block.Header()
|
||||
uncles := block.Uncles()
|
||||
txs := block.Transactions()
|
||||
|
||||
// Block validation
|
||||
if err = ValidateHeader(sm.Pow, block.Header(), parent.Header(), false); err != nil {
|
||||
if err = ValidateHeader(sm.Pow, header, parent, false); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// There can be at most two uncles
|
||||
if len(block.Uncles()) > 2 {
|
||||
return nil, ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(block.Uncles()))
|
||||
if len(uncles) > 2 {
|
||||
return nil, ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(uncles))
|
||||
}
|
||||
|
||||
receipts, err := sm.TransitionState(state, parent, block, false)
|
||||
@ -199,8 +200,6 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
|
||||
return
|
||||
}
|
||||
|
||||
header := block.Header()
|
||||
|
||||
// Validate the received block's bloom with the one derived from the generated receipts.
|
||||
// For valid blocks this should always validate to true.
|
||||
rbloom := types.CreateBloom(receipts)
|
||||
@ -211,7 +210,7 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
|
||||
|
||||
// The transactions Trie's root (R = (Tr [[i, RLP(T1)], [i, RLP(T2)], ... [n, RLP(Tn)]]))
|
||||
// can be used by light clients to make sure they've received the correct Txs
|
||||
txSha := types.DeriveSha(block.Transactions())
|
||||
txSha := types.DeriveSha(txs)
|
||||
if txSha != header.TxHash {
|
||||
err = fmt.Errorf("invalid transaction root hash. received=%x calculated=%x", header.TxHash, txSha)
|
||||
return
|
||||
@ -225,7 +224,7 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
|
||||
}
|
||||
|
||||
// Verify UncleHash before running other uncle validations
|
||||
unclesSha := block.CalculateUnclesHash()
|
||||
unclesSha := types.CalcUncleHash(uncles)
|
||||
if unclesSha != header.UncleHash {
|
||||
err = fmt.Errorf("invalid uncles root hash. received=%x calculated=%x", header.UncleHash, unclesSha)
|
||||
return
|
||||
@ -236,7 +235,7 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
|
||||
return
|
||||
}
|
||||
// Accumulate static rewards; block reward, uncle's and uncle inclusion.
|
||||
AccumulateRewards(state, block)
|
||||
AccumulateRewards(state, header, uncles)
|
||||
|
||||
// Commit state objects/accounts to a temporary trie (does not save)
|
||||
// used to calculate the state root.
|
||||
@ -260,20 +259,44 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
|
||||
return state.Logs(), nil
|
||||
}
|
||||
|
||||
var (
|
||||
big8 = big.NewInt(8)
|
||||
big32 = big.NewInt(32)
|
||||
)
|
||||
|
||||
// AccumulateRewards credits the coinbase of the given block with the
|
||||
// mining reward. The total reward consists of the static block reward
|
||||
// and rewards for included uncles. The coinbase of each uncle block is
|
||||
// also rewarded.
|
||||
func AccumulateRewards(statedb *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||
reward := new(big.Int).Set(BlockReward)
|
||||
r := new(big.Int)
|
||||
for _, uncle := range uncles {
|
||||
r.Add(uncle.Number, big8)
|
||||
r.Sub(r, header.Number)
|
||||
r.Mul(r, BlockReward)
|
||||
r.Div(r, big8)
|
||||
statedb.AddBalance(uncle.Coinbase, r)
|
||||
|
||||
r.Div(BlockReward, big32)
|
||||
reward.Add(reward, r)
|
||||
}
|
||||
statedb.AddBalance(header.Coinbase, reward)
|
||||
}
|
||||
|
||||
func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *types.Block) error {
|
||||
ancestors := set.New()
|
||||
uncles := set.New()
|
||||
ancestorHeaders := make(map[common.Hash]*types.Header)
|
||||
for _, ancestor := range sm.bc.GetAncestors(block, 7) {
|
||||
ancestorHeaders[ancestor.Hash()] = ancestor.Header()
|
||||
ancestors.Add(ancestor.Hash())
|
||||
ancestors := make(map[common.Hash]*types.Block)
|
||||
for _, ancestor := range sm.bc.GetBlocksFromHash(block.ParentHash(), 7) {
|
||||
ancestors[ancestor.Hash()] = ancestor
|
||||
// Include ancestors uncles in the uncle set. Uncles must be unique.
|
||||
for _, uncle := range ancestor.Uncles() {
|
||||
uncles.Add(uncle.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
ancestors[block.Hash()] = block
|
||||
uncles.Add(block.Hash())
|
||||
|
||||
for i, uncle := range block.Uncles() {
|
||||
hash := uncle.Hash()
|
||||
if uncles.Has(hash) {
|
||||
@ -282,22 +305,20 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty
|
||||
}
|
||||
uncles.Add(hash)
|
||||
|
||||
if ancestors.Has(hash) {
|
||||
if ancestors[hash] != nil {
|
||||
branch := fmt.Sprintf(" O - %x\n |\n", block.Hash())
|
||||
ancestors.Each(func(item interface{}) bool {
|
||||
branch += fmt.Sprintf(" O - %x\n |\n", hash)
|
||||
return true
|
||||
})
|
||||
for h := range ancestors {
|
||||
branch += fmt.Sprintf(" O - %x\n |\n", h)
|
||||
}
|
||||
glog.Infoln(branch)
|
||||
|
||||
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
|
||||
}
|
||||
|
||||
if !ancestors.Has(uncle.ParentHash) || uncle.ParentHash == parent.Hash() {
|
||||
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() {
|
||||
return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
|
||||
}
|
||||
|
||||
if err := ValidateHeader(sm.Pow, uncle, ancestorHeaders[uncle.ParentHash], true); err != nil {
|
||||
if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash], true); err != nil {
|
||||
return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
|
||||
}
|
||||
}
|
||||
@ -325,7 +346,7 @@ func (sm *BlockProcessor) GetLogs(block *types.Block) (logs state.Logs, err erro
|
||||
|
||||
// TODO: remove backward compatibility
|
||||
var (
|
||||
parent = sm.bc.GetBlock(block.Header().ParentHash)
|
||||
parent = sm.bc.GetBlock(block.ParentHash())
|
||||
state = state.New(parent.Root(), sm.db)
|
||||
)
|
||||
|
||||
@ -336,19 +357,22 @@ func (sm *BlockProcessor) GetLogs(block *types.Block) (logs state.Logs, err erro
|
||||
|
||||
// See YP section 4.3.4. "Block Header Validity"
|
||||
// Validates a block. Returns an error if the block is invalid.
|
||||
func ValidateHeader(pow pow.PoW, block, parent *types.Header, checkPow bool) error {
|
||||
func ValidateHeader(pow pow.PoW, block *types.Header, parent *types.Block, checkPow bool) error {
|
||||
if big.NewInt(int64(len(block.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
|
||||
return fmt.Errorf("Block extra data too long (%d)", len(block.Extra))
|
||||
}
|
||||
|
||||
expd := CalcDifficulty(block, parent)
|
||||
expd := CalcDifficulty(int64(block.Time), int64(parent.Time()), parent.Difficulty())
|
||||
if expd.Cmp(block.Difficulty) != 0 {
|
||||
return fmt.Errorf("Difficulty check failed for block %v, %v", block.Difficulty, expd)
|
||||
}
|
||||
|
||||
a := new(big.Int).Sub(block.GasLimit, parent.GasLimit)
|
||||
var a, b *big.Int
|
||||
a = parent.GasLimit()
|
||||
a = a.Sub(a, block.GasLimit)
|
||||
a.Abs(a)
|
||||
b := new(big.Int).Div(parent.GasLimit, params.GasLimitBoundDivisor)
|
||||
b = parent.GasLimit()
|
||||
b = b.Div(b, params.GasLimitBoundDivisor)
|
||||
if !(a.Cmp(b) < 0) || (block.GasLimit.Cmp(params.MinGasLimit) == -1) {
|
||||
return fmt.Errorf("GasLimit check failed for block %v (%v > %v)", block.GasLimit, a, b)
|
||||
}
|
||||
@ -357,11 +381,13 @@ func ValidateHeader(pow pow.PoW, block, parent *types.Header, checkPow bool) err
|
||||
return BlockFutureErr
|
||||
}
|
||||
|
||||
if new(big.Int).Sub(block.Number, parent.Number).Cmp(big.NewInt(1)) != 0 {
|
||||
num := parent.Number()
|
||||
num.Sub(block.Number, num)
|
||||
if num.Cmp(big.NewInt(1)) != 0 {
|
||||
return BlockNumberErr
|
||||
}
|
||||
|
||||
if block.Time <= parent.Time {
|
||||
if block.Time <= uint64(parent.Time()) {
|
||||
return BlockEqualTSErr //ValidationError("Block timestamp equal or less than previous block (%v - %v)", block.Time, parent.Time)
|
||||
}
|
||||
|
||||
@ -375,26 +401,6 @@ func ValidateHeader(pow pow.PoW, block, parent *types.Header, checkPow bool) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func AccumulateRewards(statedb *state.StateDB, block *types.Block) {
|
||||
reward := new(big.Int).Set(BlockReward)
|
||||
|
||||
for _, uncle := range block.Uncles() {
|
||||
num := new(big.Int).Add(big.NewInt(8), uncle.Number)
|
||||
num.Sub(num, block.Number())
|
||||
|
||||
r := new(big.Int)
|
||||
r.Mul(BlockReward, num)
|
||||
r.Div(r, big.NewInt(8))
|
||||
|
||||
statedb.AddBalance(uncle.Coinbase, r)
|
||||
|
||||
reward.Add(reward, new(big.Int).Div(BlockReward, big.NewInt(32)))
|
||||
}
|
||||
|
||||
// Get the account associated with the coinbase
|
||||
statedb.AddBalance(block.Header().Coinbase, reward)
|
||||
}
|
||||
|
||||
func getBlockReceipts(db common.Database, bhash common.Hash) (receipts types.Receipts, err error) {
|
||||
var rdata []byte
|
||||
rdata, err = db.Get(append(receiptsPre, bhash[:]...))
|
||||
|
@ -26,20 +26,19 @@ func proc() (*BlockProcessor, *ChainManager) {
|
||||
}
|
||||
|
||||
func TestNumber(t *testing.T) {
|
||||
_, chain := proc()
|
||||
block1 := chain.NewBlock(common.Address{})
|
||||
block1.Header().Number = big.NewInt(3)
|
||||
block1.Header().Time--
|
||||
|
||||
pow := ezp.New()
|
||||
_, chain := proc()
|
||||
|
||||
err := ValidateHeader(pow, block1.Header(), chain.Genesis().Header(), false)
|
||||
statedb := state.New(chain.Genesis().Root(), chain.stateDb)
|
||||
header := makeHeader(chain.Genesis(), statedb)
|
||||
header.Number = big.NewInt(3)
|
||||
err := ValidateHeader(pow, header, chain.Genesis(), false)
|
||||
if err != BlockNumberErr {
|
||||
t.Errorf("expected block number error %v", err)
|
||||
t.Errorf("expected block number error, got %q", err)
|
||||
}
|
||||
|
||||
block1 = chain.NewBlock(common.Address{})
|
||||
err = ValidateHeader(pow, block1.Header(), chain.Genesis().Header(), false)
|
||||
header = makeHeader(chain.Genesis(), statedb)
|
||||
err = ValidateHeader(pow, header, chain.Genesis(), false)
|
||||
if err == BlockNumberErr {
|
||||
t.Errorf("didn't expect block number error")
|
||||
}
|
||||
|
28
core/canary.go
Normal file
28
core/canary.go
Normal file
@ -0,0 +1,28 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
)
|
||||
|
||||
var (
|
||||
jeff = common.HexToAddress("9d38997c624a71b21278389ea2fdc460d000e4b2")
|
||||
vitalik = common.HexToAddress("b1e570be07eaa673e4fd0c8265b64ef739385709")
|
||||
christoph = common.HexToAddress("529bc43a5d93789fa28de1961db6a07e752204ae")
|
||||
gav = common.HexToAddress("e3e942b2aa524293c84ff6c7f87a6635790ad5e4")
|
||||
)
|
||||
|
||||
// Canary will check the 0'd address of the 4 contracts above.
|
||||
// If two or more are set to anything other than a 0 the canary
|
||||
// dies a horrible death.
|
||||
func Canary(statedb *state.StateDB) bool {
|
||||
r := new(big.Int)
|
||||
r.Add(r, statedb.GetState(jeff, common.Hash{}).Big())
|
||||
r.Add(r, statedb.GetState(vitalik, common.Hash{}).Big())
|
||||
r.Add(r, statedb.GetState(christoph, common.Hash{}).Big())
|
||||
r.Add(r, statedb.GetState(gav, common.Hash{}).Big())
|
||||
|
||||
return r.Cmp(big.NewInt(1)) > 0
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -11,7 +10,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
// So we can generate blocks easily
|
||||
// FakePow is a non-validating proof of work implementation.
|
||||
// It returns true from Verify for any block.
|
||||
type FakePow struct{}
|
||||
|
||||
func (f FakePow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte) {
|
||||
@ -23,81 +23,125 @@ func (f FakePow) Turbo(bool) {}
|
||||
|
||||
// So we can deterministically seed different blockchains
|
||||
var (
|
||||
CanonicalSeed = 1
|
||||
ForkSeed = 2
|
||||
canonicalSeed = 1
|
||||
forkSeed = 2
|
||||
)
|
||||
|
||||
// Utility functions for making chains on the fly
|
||||
// Exposed for sake of testing from other packages (eg. go-ethash)
|
||||
func NewBlockFromParent(addr common.Address, parent *types.Block) *types.Block {
|
||||
return newBlockFromParent(addr, parent)
|
||||
// BlockGen creates blocks for testing.
|
||||
// See GenerateChain for a detailed explanation.
|
||||
type BlockGen struct {
|
||||
i int
|
||||
parent *types.Block
|
||||
chain []*types.Block
|
||||
header *types.Header
|
||||
statedb *state.StateDB
|
||||
|
||||
coinbase *state.StateObject
|
||||
txs []*types.Transaction
|
||||
receipts []*types.Receipt
|
||||
uncles []*types.Header
|
||||
}
|
||||
|
||||
func MakeBlock(bman *BlockProcessor, parent *types.Block, i int, db common.Database, seed int) *types.Block {
|
||||
return makeBlock(bman, parent, i, db, seed)
|
||||
}
|
||||
|
||||
func MakeChain(bman *BlockProcessor, parent *types.Block, max int, db common.Database, seed int) types.Blocks {
|
||||
return makeChain(bman, parent, max, db, seed)
|
||||
}
|
||||
|
||||
func NewChainMan(block *types.Block, eventMux *event.TypeMux, db common.Database) *ChainManager {
|
||||
return newChainManager(block, eventMux, db)
|
||||
}
|
||||
|
||||
func NewBlockProc(db common.Database, cman *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
|
||||
return newBlockProcessor(db, cman, eventMux)
|
||||
}
|
||||
|
||||
func NewCanonical(n int, db common.Database) (*BlockProcessor, error) {
|
||||
return newCanonical(n, db)
|
||||
}
|
||||
|
||||
// block time is fixed at 10 seconds
|
||||
func newBlockFromParent(addr common.Address, parent *types.Block) *types.Block {
|
||||
block := types.NewBlock(parent.Hash(), addr, parent.Root(), common.BigPow(2, 32), 0, nil)
|
||||
block.SetUncles(nil)
|
||||
block.SetTransactions(nil)
|
||||
block.SetReceipts(nil)
|
||||
|
||||
header := block.Header()
|
||||
header.Difficulty = CalcDifficulty(block.Header(), parent.Header())
|
||||
header.Number = new(big.Int).Add(parent.Header().Number, common.Big1)
|
||||
header.Time = parent.Header().Time + 10
|
||||
header.GasLimit = CalcGasLimit(parent)
|
||||
|
||||
block.Td = parent.Td
|
||||
|
||||
return block
|
||||
}
|
||||
|
||||
// Actually make a block by simulating what miner would do
|
||||
// we seed chains by the first byte of the coinbase
|
||||
func makeBlock(bman *BlockProcessor, parent *types.Block, i int, db common.Database, seed int) *types.Block {
|
||||
var addr common.Address
|
||||
addr[0], addr[19] = byte(seed), byte(i)
|
||||
block := newBlockFromParent(addr, parent)
|
||||
state := state.New(block.Root(), db)
|
||||
cbase := state.GetOrNewStateObject(addr)
|
||||
cbase.SetGasLimit(CalcGasLimit(parent))
|
||||
cbase.AddBalance(BlockReward)
|
||||
state.Update()
|
||||
block.SetRoot(state.Root())
|
||||
return block
|
||||
}
|
||||
|
||||
// Make a chain with real blocks
|
||||
// Runs ProcessWithParent to get proper state roots
|
||||
func makeChain(bman *BlockProcessor, parent *types.Block, max int, db common.Database, seed int) types.Blocks {
|
||||
bman.bc.currentBlock = parent
|
||||
blocks := make(types.Blocks, max)
|
||||
for i := 0; i < max; i++ {
|
||||
block := makeBlock(bman, parent, i, db, seed)
|
||||
_, err := bman.processWithParent(block, parent)
|
||||
if err != nil {
|
||||
fmt.Println("process with parent failed", err)
|
||||
panic(err)
|
||||
// SetCoinbase sets the coinbase of the generated block.
|
||||
// It can be called at most once.
|
||||
func (b *BlockGen) SetCoinbase(addr common.Address) {
|
||||
if b.coinbase != nil {
|
||||
if len(b.txs) > 0 {
|
||||
panic("coinbase must be set before adding transactions")
|
||||
}
|
||||
panic("coinbase can only be set once")
|
||||
}
|
||||
b.header.Coinbase = addr
|
||||
b.coinbase = b.statedb.GetOrNewStateObject(addr)
|
||||
b.coinbase.SetGasLimit(b.header.GasLimit)
|
||||
}
|
||||
|
||||
// SetExtra sets the extra data field of the generated block.
|
||||
func (b *BlockGen) SetExtra(data []byte) {
|
||||
b.header.Extra = data
|
||||
}
|
||||
|
||||
// AddTx adds a transaction to the generated block. If no coinbase has
|
||||
// been set, the block's coinbase is set to the zero address.
|
||||
//
|
||||
// AddTx panics if the transaction cannot be executed. In addition to
|
||||
// the protocol-imposed limitations (gas limit, etc.), there are some
|
||||
// further limitations on the content of transactions that can be
|
||||
// added. Notably, contract code relying on the BLOCKHASH instruction
|
||||
// will panic during execution.
|
||||
func (b *BlockGen) AddTx(tx *types.Transaction) {
|
||||
if b.coinbase == nil {
|
||||
b.SetCoinbase(common.Address{})
|
||||
}
|
||||
_, gas, err := ApplyMessage(NewEnv(b.statedb, nil, tx, b.header), tx, b.coinbase)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b.statedb.Update()
|
||||
b.header.GasUsed.Add(b.header.GasUsed, gas)
|
||||
receipt := types.NewReceipt(b.statedb.Root().Bytes(), b.header.GasUsed)
|
||||
logs := b.statedb.GetLogs(tx.Hash())
|
||||
receipt.SetLogs(logs)
|
||||
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
||||
b.txs = append(b.txs, tx)
|
||||
b.receipts = append(b.receipts, receipt)
|
||||
}
|
||||
|
||||
// TxNonce returns the next valid transaction nonce for the
|
||||
// account at addr. It panics if the account does not exist.
|
||||
func (b *BlockGen) TxNonce(addr common.Address) uint64 {
|
||||
if !b.statedb.HasAccount(addr) {
|
||||
panic("account does not exist")
|
||||
}
|
||||
return b.statedb.GetNonce(addr)
|
||||
}
|
||||
|
||||
// AddUncle adds an uncle header to the generated block.
|
||||
func (b *BlockGen) AddUncle(h *types.Header) {
|
||||
b.uncles = append(b.uncles, h)
|
||||
}
|
||||
|
||||
// PrevBlock returns a previously generated block by number. It panics if
|
||||
// num is greater or equal to the number of the block being generated.
|
||||
// For index -1, PrevBlock returns the parent block given to GenerateChain.
|
||||
func (b *BlockGen) PrevBlock(index int) *types.Block {
|
||||
if index >= b.i {
|
||||
panic("block index out of range")
|
||||
}
|
||||
if index == -1 {
|
||||
return b.parent
|
||||
}
|
||||
return b.chain[index]
|
||||
}
|
||||
|
||||
// GenerateChain creates a chain of n blocks. The first block's
|
||||
// parent will be the provided parent. db is used to store
|
||||
// intermediate states and should contain the parent's state trie.
|
||||
//
|
||||
// The generator function is called with a new block generator for
|
||||
// every block. Any transactions and uncles added to the generator
|
||||
// become part of the block. If gen is nil, the blocks will be empty
|
||||
// and their coinbase will be the zero address.
|
||||
//
|
||||
// Blocks created by GenerateChain do not contain valid proof of work
|
||||
// values. Inserting them into ChainManager requires use of FakePow or
|
||||
// a similar non-validating proof of work implementation.
|
||||
func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int, *BlockGen)) []*types.Block {
|
||||
statedb := state.New(parent.Root(), db)
|
||||
blocks := make(types.Blocks, n)
|
||||
genblock := func(i int, h *types.Header) *types.Block {
|
||||
b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb}
|
||||
if gen != nil {
|
||||
gen(i, b)
|
||||
}
|
||||
AccumulateRewards(statedb, h, b.uncles)
|
||||
statedb.Update()
|
||||
h.Root = statedb.Root()
|
||||
return types.NewBlock(h, b.txs, b.uncles, b.receipts)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
header := makeHeader(parent, statedb)
|
||||
block := genblock(i, header)
|
||||
block.Td = CalcTD(block, parent)
|
||||
blocks[i] = block
|
||||
parent = block
|
||||
@ -105,41 +149,38 @@ func makeChain(bman *BlockProcessor, parent *types.Block, max int, db common.Dat
|
||||
return blocks
|
||||
}
|
||||
|
||||
// Create a new chain manager starting from given block
|
||||
// Effectively a fork factory
|
||||
func newChainManager(block *types.Block, eventMux *event.TypeMux, db common.Database) *ChainManager {
|
||||
genesis := GenesisBlock(0, db)
|
||||
bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: eventMux, pow: FakePow{}}
|
||||
bc.txState = state.ManageState(state.New(genesis.Root(), db))
|
||||
bc.futureBlocks = NewBlockCache(1000)
|
||||
if block == nil {
|
||||
bc.Reset()
|
||||
} else {
|
||||
bc.currentBlock = block
|
||||
bc.td = block.Td
|
||||
func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
|
||||
time := parent.Time() + 10 // block time is fixed at 10 seconds
|
||||
return &types.Header{
|
||||
Root: state.Root(),
|
||||
ParentHash: parent.Hash(),
|
||||
Coinbase: parent.Coinbase(),
|
||||
Difficulty: CalcDifficulty(time, parent.Time(), parent.Difficulty()),
|
||||
GasLimit: CalcGasLimit(parent),
|
||||
GasUsed: new(big.Int),
|
||||
Number: new(big.Int).Add(parent.Number(), common.Big1),
|
||||
Time: uint64(time),
|
||||
}
|
||||
return bc
|
||||
}
|
||||
|
||||
// block processor with fake pow
|
||||
func newBlockProcessor(db common.Database, cman *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
|
||||
chainMan := newChainManager(nil, eventMux, db)
|
||||
bman := NewBlockProcessor(db, db, FakePow{}, chainMan, eventMux)
|
||||
return bman
|
||||
}
|
||||
|
||||
// Make a new, deterministic canonical chain by running InsertChain
|
||||
// on result of makeChain
|
||||
// newCanonical creates a new deterministic canonical chain by running
|
||||
// InsertChain on the result of makeChain.
|
||||
func newCanonical(n int, db common.Database) (*BlockProcessor, error) {
|
||||
eventMux := &event.TypeMux{}
|
||||
|
||||
bman := newBlockProcessor(db, newChainManager(nil, eventMux, db), eventMux)
|
||||
evmux := &event.TypeMux{}
|
||||
chainman, _ := NewChainManager(GenesisBlock(0, db), db, db, FakePow{}, evmux)
|
||||
bman := NewBlockProcessor(db, db, FakePow{}, chainman, evmux)
|
||||
bman.bc.SetProcessor(bman)
|
||||
parent := bman.bc.CurrentBlock()
|
||||
if n == 0 {
|
||||
return bman, nil
|
||||
}
|
||||
lchain := makeChain(bman, parent, n, db, CanonicalSeed)
|
||||
lchain := makeChain(parent, n, db, canonicalSeed)
|
||||
_, err := bman.bc.InsertChain(lchain)
|
||||
return bman, err
|
||||
}
|
||||
|
||||
func makeChain(parent *types.Block, n int, db common.Database, seed int) []*types.Block {
|
||||
return GenerateChain(parent, db, n, func(i int, b *BlockGen) {
|
||||
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
|
||||
})
|
||||
}
|
||||
|
78
core/chain_makers_test.go
Normal file
78
core/chain_makers_test.go
Normal file
@ -0,0 +1,78 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
func ExampleGenerateChain() {
|
||||
var (
|
||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||
key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
||||
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
)
|
||||
|
||||
// Ensure that key1 has some funds in the genesis block.
|
||||
genesis := GenesisBlockForTesting(db, addr1, big.NewInt(1000000))
|
||||
|
||||
// This call generates a chain of 5 blocks. The function runs for
|
||||
// each block and adds different features to gen based on the
|
||||
// block index.
|
||||
chain := GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
|
||||
switch i {
|
||||
case 0:
|
||||
// In block 1, addr1 sends addr2 some ether.
|
||||
tx, _ := types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(key1)
|
||||
gen.AddTx(tx)
|
||||
case 1:
|
||||
// In block 2, addr1 sends some more ether to addr2.
|
||||
// addr2 passes it on to addr3.
|
||||
tx1, _ := types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1)
|
||||
tx2, _ := types.NewTransaction(gen.TxNonce(addr2), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
|
||||
gen.AddTx(tx1)
|
||||
gen.AddTx(tx2)
|
||||
case 2:
|
||||
// Block 3 is empty but was mined by addr3.
|
||||
gen.SetCoinbase(addr3)
|
||||
gen.SetExtra([]byte("yeehaw"))
|
||||
case 3:
|
||||
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
|
||||
b2 := gen.PrevBlock(1).Header()
|
||||
b2.Extra = []byte("foo")
|
||||
gen.AddUncle(b2)
|
||||
b3 := gen.PrevBlock(2).Header()
|
||||
b3.Extra = []byte("foo")
|
||||
gen.AddUncle(b3)
|
||||
}
|
||||
})
|
||||
|
||||
// Import the chain. This runs all block validation rules.
|
||||
evmux := &event.TypeMux{}
|
||||
chainman, _ := NewChainManager(genesis, db, db, FakePow{}, evmux)
|
||||
chainman.SetProcessor(NewBlockProcessor(db, db, FakePow{}, chainman, evmux))
|
||||
if i, err := chainman.InsertChain(chain); err != nil {
|
||||
fmt.Printf("insert error (block %d): %v\n", i, err)
|
||||
return
|
||||
}
|
||||
|
||||
state := chainman.State()
|
||||
fmt.Printf("last block: #%d\n", chainman.CurrentBlock().Number())
|
||||
fmt.Println("balance of addr1:", state.GetBalance(addr1))
|
||||
fmt.Println("balance of addr2:", state.GetBalance(addr2))
|
||||
fmt.Println("balance of addr3:", state.GetBalance(addr3))
|
||||
// Output:
|
||||
// last block: #5
|
||||
// balance of addr1: 989000
|
||||
// balance of addr2: 10000
|
||||
// balance of addr3: 5906250000000001000
|
||||
}
|
@ -11,15 +11,19 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/compression/rle"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/hashicorp/golang-lru"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -33,35 +37,40 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
blockCacheLimit = 10000
|
||||
blockCacheLimit = 256
|
||||
maxFutureBlocks = 256
|
||||
maxTimeFutureBlocks = 30
|
||||
)
|
||||
|
||||
func CalcDifficulty(block, parent *types.Header) *big.Int {
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block b should have when created at time
|
||||
// given the parent block's time and difficulty.
|
||||
func CalcDifficulty(time int64, parentTime int64, parentDiff *big.Int) *big.Int {
|
||||
diff := new(big.Int)
|
||||
|
||||
adjust := new(big.Int).Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
||||
if big.NewInt(int64(block.Time)-int64(parent.Time)).Cmp(params.DurationLimit) < 0 {
|
||||
diff.Add(parent.Difficulty, adjust)
|
||||
adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
|
||||
if big.NewInt(time-parentTime).Cmp(params.DurationLimit) < 0 {
|
||||
diff.Add(parentDiff, adjust)
|
||||
} else {
|
||||
diff.Sub(parent.Difficulty, adjust)
|
||||
diff.Sub(parentDiff, adjust)
|
||||
}
|
||||
|
||||
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
||||
return params.MinimumDifficulty
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
// CalcTD computes the total difficulty of block.
|
||||
func CalcTD(block, parent *types.Block) *big.Int {
|
||||
if parent == nil {
|
||||
return block.Difficulty()
|
||||
}
|
||||
return new(big.Int).Add(parent.Td, block.Header().Difficulty)
|
||||
d := block.Difficulty()
|
||||
d.Add(d, parent.Td)
|
||||
return d
|
||||
}
|
||||
|
||||
// CalcGasLimit computes the gas limit of the next block after parent.
|
||||
// The result may be modified by the caller.
|
||||
func CalcGasLimit(parent *types.Block) *big.Int {
|
||||
decay := new(big.Int).Div(parent.GasLimit(), params.GasLimitBoundDivisor)
|
||||
contrib := new(big.Int).Mul(parent.GasUsed(), big.NewInt(3))
|
||||
@ -71,11 +80,11 @@ func CalcGasLimit(parent *types.Block) *big.Int {
|
||||
gl := new(big.Int).Sub(parent.GasLimit(), decay)
|
||||
gl = gl.Add(gl, contrib)
|
||||
gl = gl.Add(gl, big.NewInt(1))
|
||||
gl = common.BigMax(gl, params.MinGasLimit)
|
||||
gl.Set(common.BigMax(gl, params.MinGasLimit))
|
||||
|
||||
if gl.Cmp(params.GenesisGasLimit) < 0 {
|
||||
gl2 := new(big.Int).Add(parent.GasLimit(), decay)
|
||||
return common.BigMin(params.GenesisGasLimit, gl2)
|
||||
gl.Add(parent.GasLimit(), decay)
|
||||
gl.Set(common.BigMin(gl, params.GenesisGasLimit))
|
||||
}
|
||||
return gl
|
||||
}
|
||||
@ -100,8 +109,9 @@ type ChainManager struct {
|
||||
transState *state.StateDB
|
||||
txState *state.ManagedState
|
||||
|
||||
cache *BlockCache
|
||||
futureBlocks *BlockCache
|
||||
cache *lru.Cache // cache is the LRU caching
|
||||
futureBlocks *lru.Cache // future blocks are blocks added for later processing
|
||||
pendingBlocks *lru.Cache // pending blocks contain blocks not yet written to the db
|
||||
|
||||
quit chan struct{}
|
||||
// procInterrupt must be atomically called
|
||||
@ -112,13 +122,14 @@ type ChainManager struct {
|
||||
}
|
||||
|
||||
func NewChainManager(genesis *types.Block, blockDb, stateDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
|
||||
cache, _ := lru.New(blockCacheLimit)
|
||||
bc := &ChainManager{
|
||||
blockDb: blockDb,
|
||||
stateDb: stateDb,
|
||||
genesisBlock: GenesisBlock(42, stateDb),
|
||||
eventMux: mux,
|
||||
quit: make(chan struct{}),
|
||||
cache: NewBlockCache(blockCacheLimit),
|
||||
cache: cache,
|
||||
pow: pow,
|
||||
}
|
||||
// Check the genesis block given to the chain manager. If the genesis block mismatches block number 0
|
||||
@ -147,7 +158,7 @@ func NewChainManager(genesis *types.Block, blockDb, stateDb common.Database, pow
|
||||
// Take ownership of this particular state
|
||||
bc.txState = state.ManageState(bc.State().Copy())
|
||||
|
||||
bc.futureBlocks = NewBlockCache(maxFutureBlocks)
|
||||
bc.futureBlocks, _ = lru.New(maxFutureBlocks)
|
||||
bc.makeCache()
|
||||
|
||||
go bc.update()
|
||||
@ -159,11 +170,11 @@ func (bc *ChainManager) SetHead(head *types.Block) {
|
||||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.Header().ParentHash) {
|
||||
for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
|
||||
bc.removeBlock(block)
|
||||
}
|
||||
|
||||
bc.cache = NewBlockCache(blockCacheLimit)
|
||||
bc.cache, _ = lru.New(blockCacheLimit)
|
||||
bc.currentBlock = head
|
||||
bc.makeCache()
|
||||
|
||||
@ -251,65 +262,23 @@ func (bc *ChainManager) setLastState() {
|
||||
}
|
||||
|
||||
func (bc *ChainManager) makeCache() {
|
||||
if bc.cache == nil {
|
||||
bc.cache = NewBlockCache(blockCacheLimit)
|
||||
}
|
||||
bc.cache, _ = lru.New(blockCacheLimit)
|
||||
// load in last `blockCacheLimit` - 1 blocks. Last block is the current.
|
||||
ancestors := bc.GetAncestors(bc.currentBlock, blockCacheLimit-1)
|
||||
ancestors = append(ancestors, bc.currentBlock)
|
||||
for _, block := range ancestors {
|
||||
bc.cache.Push(block)
|
||||
bc.cache.Add(bc.genesisBlock.Hash(), bc.genesisBlock)
|
||||
for _, block := range bc.GetBlocksFromHash(bc.currentBlock.Hash(), blockCacheLimit) {
|
||||
bc.cache.Add(block.Hash(), block)
|
||||
}
|
||||
}
|
||||
|
||||
// Block creation & chain handling
|
||||
func (bc *ChainManager) NewBlock(coinbase common.Address) *types.Block {
|
||||
bc.mu.RLock()
|
||||
defer bc.mu.RUnlock()
|
||||
|
||||
var (
|
||||
root common.Hash
|
||||
parentHash common.Hash
|
||||
)
|
||||
|
||||
if bc.currentBlock != nil {
|
||||
root = bc.currentBlock.Header().Root
|
||||
parentHash = bc.lastBlockHash
|
||||
}
|
||||
|
||||
block := types.NewBlock(
|
||||
parentHash,
|
||||
coinbase,
|
||||
root,
|
||||
common.BigPow(2, 32),
|
||||
0,
|
||||
nil)
|
||||
block.SetUncles(nil)
|
||||
block.SetTransactions(nil)
|
||||
block.SetReceipts(nil)
|
||||
|
||||
parent := bc.currentBlock
|
||||
if parent != nil {
|
||||
header := block.Header()
|
||||
header.Difficulty = CalcDifficulty(block.Header(), parent.Header())
|
||||
header.Number = new(big.Int).Add(parent.Header().Number, common.Big1)
|
||||
header.GasLimit = CalcGasLimit(parent)
|
||||
}
|
||||
|
||||
return block
|
||||
}
|
||||
|
||||
func (bc *ChainManager) Reset() {
|
||||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.Header().ParentHash) {
|
||||
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
|
||||
bc.removeBlock(block)
|
||||
}
|
||||
|
||||
if bc.cache == nil {
|
||||
bc.cache = NewBlockCache(blockCacheLimit)
|
||||
}
|
||||
bc.cache, _ = lru.New(blockCacheLimit)
|
||||
|
||||
// Prepare the genesis block
|
||||
bc.write(bc.genesisBlock)
|
||||
@ -328,7 +297,7 @@ func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) {
|
||||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.Header().ParentHash) {
|
||||
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
|
||||
bc.removeBlock(block)
|
||||
}
|
||||
|
||||
@ -393,15 +362,20 @@ func (bc *ChainManager) insert(block *types.Block) {
|
||||
}
|
||||
|
||||
func (bc *ChainManager) write(block *types.Block) {
|
||||
enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
|
||||
key := append(blockHashPre, block.Hash().Bytes()...)
|
||||
err := bc.blockDb.Put(key, enc)
|
||||
if err != nil {
|
||||
glog.Fatal("db write fail:", err)
|
||||
}
|
||||
tstart := time.Now()
|
||||
|
||||
// Push block to cache
|
||||
bc.cache.Push(block)
|
||||
go func() {
|
||||
enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
|
||||
key := append(blockHashPre, block.Hash().Bytes()...)
|
||||
err := bc.blockDb.Put(key, enc)
|
||||
if err != nil {
|
||||
glog.Fatal("db write fail:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if glog.V(logger.Debug) {
|
||||
glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
|
||||
}
|
||||
}
|
||||
|
||||
// Accessors
|
||||
@ -411,6 +385,16 @@ func (bc *ChainManager) Genesis() *types.Block {
|
||||
|
||||
// Block fetching methods
|
||||
func (bc *ChainManager) HasBlock(hash common.Hash) bool {
|
||||
if bc.cache.Contains(hash) {
|
||||
return true
|
||||
}
|
||||
|
||||
if bc.pendingBlocks != nil {
|
||||
if _, exist := bc.pendingBlocks.Get(hash); exist {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
data, _ := bc.blockDb.Get(append(blockHashPre, hash[:]...))
|
||||
return len(data) != 0
|
||||
}
|
||||
@ -437,11 +421,15 @@ func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) (
|
||||
}
|
||||
|
||||
func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
|
||||
/*
|
||||
if block := self.cache.Get(hash); block != nil {
|
||||
return block
|
||||
if block, ok := self.cache.Get(hash); ok {
|
||||
return block.(*types.Block)
|
||||
}
|
||||
|
||||
if self.pendingBlocks != nil {
|
||||
if block, _ := self.pendingBlocks.Get(hash); block != nil {
|
||||
return block.(*types.Block)
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
data, _ := self.blockDb.Get(append(blockHashPre, hash[:]...))
|
||||
if len(data) == 0 {
|
||||
@ -452,6 +440,10 @@ func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
|
||||
glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the block to the cache
|
||||
self.cache.Add(hash, (*types.Block)(&block))
|
||||
|
||||
return (*types.Block)(&block)
|
||||
}
|
||||
|
||||
@ -463,6 +455,19 @@ func (self *ChainManager) GetBlockByNumber(num uint64) *types.Block {
|
||||
|
||||
}
|
||||
|
||||
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
|
||||
func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
|
||||
for i := 0; i < n; i++ {
|
||||
block := self.GetBlock(hash)
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
hash = block.ParentHash()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// non blocking version
|
||||
func (self *ChainManager) getBlockByNumber(num uint64) *types.Block {
|
||||
key, _ := self.blockDb.Get(append(blockNumPre, big.NewInt(int64(num)).Bytes()...))
|
||||
@ -482,45 +487,12 @@ func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncl
|
||||
return
|
||||
}
|
||||
|
||||
func (self *ChainManager) GetAncestors(block *types.Block, length int) (blocks []*types.Block) {
|
||||
for i := 0; i < length; i++ {
|
||||
block = self.GetBlock(block.ParentHash())
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// setTotalDifficulty updates the TD of the chain manager. Note, this function
|
||||
// assumes that the `mu` mutex is held!
|
||||
func (bc *ChainManager) setTotalDifficulty(td *big.Int) {
|
||||
bc.td = new(big.Int).Set(td)
|
||||
}
|
||||
|
||||
func (self *ChainManager) CalcTotalDiff(block *types.Block) (*big.Int, error) {
|
||||
parent := self.GetBlock(block.Header().ParentHash)
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("Unable to calculate total diff without known parent %x", block.Header().ParentHash)
|
||||
}
|
||||
|
||||
parentTd := parent.Td
|
||||
|
||||
uncleDiff := new(big.Int)
|
||||
for _, uncle := range block.Uncles() {
|
||||
uncleDiff = uncleDiff.Add(uncleDiff, uncle.Difficulty)
|
||||
}
|
||||
|
||||
td := new(big.Int)
|
||||
td = td.Add(parentTd, uncleDiff)
|
||||
td = td.Add(td, block.Header().Difficulty)
|
||||
|
||||
return td, nil
|
||||
}
|
||||
|
||||
func (bc *ChainManager) Stop() {
|
||||
close(bc.quit)
|
||||
atomic.StoreInt32(&bc.procInterrupt, 1)
|
||||
@ -538,16 +510,94 @@ type queueEvent struct {
|
||||
}
|
||||
|
||||
func (self *ChainManager) procFutureBlocks() {
|
||||
var blocks []*types.Block
|
||||
self.futureBlocks.Each(func(i int, block *types.Block) {
|
||||
blocks = append(blocks, block)
|
||||
})
|
||||
blocks := make([]*types.Block, self.futureBlocks.Len())
|
||||
for i, hash := range self.futureBlocks.Keys() {
|
||||
block, _ := self.futureBlocks.Get(hash)
|
||||
blocks[i] = block.(*types.Block)
|
||||
}
|
||||
if len(blocks) > 0 {
|
||||
types.BlockBy(types.Number).Sort(blocks)
|
||||
self.InsertChain(blocks)
|
||||
}
|
||||
}
|
||||
|
||||
func (self *ChainManager) enqueueForWrite(block *types.Block) {
|
||||
self.pendingBlocks.Add(block.Hash(), block)
|
||||
}
|
||||
|
||||
func (self *ChainManager) flushQueuedBlocks() {
|
||||
db, batchWrite := self.blockDb.(*ethdb.LDBDatabase)
|
||||
batch := new(leveldb.Batch)
|
||||
for _, key := range self.pendingBlocks.Keys() {
|
||||
b, _ := self.pendingBlocks.Get(key)
|
||||
block := b.(*types.Block)
|
||||
|
||||
enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
|
||||
key := append(blockHashPre, block.Hash().Bytes()...)
|
||||
if batchWrite {
|
||||
batch.Put(key, rle.Compress(enc))
|
||||
} else {
|
||||
self.blockDb.Put(key, enc)
|
||||
}
|
||||
}
|
||||
|
||||
if batchWrite {
|
||||
db.LDB().Write(batch, nil)
|
||||
}
|
||||
}
|
||||
|
||||
type writeStatus byte
|
||||
|
||||
const (
|
||||
nonStatTy writeStatus = iota
|
||||
canonStatTy
|
||||
splitStatTy
|
||||
sideStatTy
|
||||
)
|
||||
|
||||
func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, err error) {
|
||||
self.wg.Add(1)
|
||||
defer self.wg.Done()
|
||||
|
||||
cblock := self.currentBlock
|
||||
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
|
||||
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
|
||||
if block.Td.Cmp(self.Td()) > 0 {
|
||||
// chain fork
|
||||
if block.ParentHash() != cblock.Hash() {
|
||||
// during split we merge two different chains and create the new canonical chain
|
||||
err := self.merge(cblock, block)
|
||||
if err != nil {
|
||||
return nonStatTy, err
|
||||
}
|
||||
|
||||
status = splitStatTy
|
||||
}
|
||||
|
||||
self.mu.Lock()
|
||||
self.setTotalDifficulty(block.Td)
|
||||
self.insert(block)
|
||||
self.mu.Unlock()
|
||||
|
||||
self.setTransState(state.New(block.Root(), self.stateDb))
|
||||
self.txState.SetState(state.New(block.Root(), self.stateDb))
|
||||
|
||||
status = canonStatTy
|
||||
} else {
|
||||
status = sideStatTy
|
||||
}
|
||||
|
||||
// Write block to database. Eventually we'll have to improve on this and throw away blocks that are
|
||||
// not in the canonical chain.
|
||||
self.mu.Lock()
|
||||
self.enqueueForWrite(block)
|
||||
self.mu.Unlock()
|
||||
// Delete from future blocks
|
||||
self.futureBlocks.Remove(block.Hash())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. It an error is returned
|
||||
// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go).
|
||||
func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
@ -557,6 +607,8 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
self.chainmu.Lock()
|
||||
defer self.chainmu.Unlock()
|
||||
|
||||
self.pendingBlocks, _ = lru.New(len(chain))
|
||||
|
||||
// A queued approach to delivering events. This is generally
|
||||
// faster than direct delivery and requires much less mutex
|
||||
// acquiring.
|
||||
@ -574,6 +626,7 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
// Start the parallel nonce verifier.
|
||||
go verifyNonces(self.pow, chain, nonceQuit, nonceDone)
|
||||
defer close(nonceQuit)
|
||||
defer self.flushQueuedBlocks()
|
||||
|
||||
txcount := 0
|
||||
for i, block := range chain {
|
||||
@ -621,15 +674,13 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
|
||||
}
|
||||
|
||||
block.SetQueued(true)
|
||||
self.futureBlocks.Push(block)
|
||||
self.futureBlocks.Add(block.Hash(), block)
|
||||
stats.queued++
|
||||
continue
|
||||
}
|
||||
|
||||
if IsParentErr(err) && self.futureBlocks.Has(block.ParentHash()) {
|
||||
block.SetQueued(true)
|
||||
self.futureBlocks.Push(block)
|
||||
if IsParentErr(err) && self.futureBlocks.Contains(block.ParentHash()) {
|
||||
self.futureBlocks.Add(block.Hash(), block)
|
||||
stats.queued++
|
||||
continue
|
||||
}
|
||||
@ -641,59 +692,29 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||
|
||||
txcount += len(block.Transactions())
|
||||
|
||||
cblock := self.currentBlock
|
||||
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
|
||||
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
|
||||
if block.Td.Cmp(self.Td()) > 0 {
|
||||
// chain fork
|
||||
if block.ParentHash() != cblock.Hash() {
|
||||
// during split we merge two different chains and create the new canonical chain
|
||||
err := self.merge(cblock, block)
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
queue[i] = ChainSplitEvent{block, logs}
|
||||
queueEvent.splitCount++
|
||||
}
|
||||
|
||||
self.mu.Lock()
|
||||
self.setTotalDifficulty(block.Td)
|
||||
self.insert(block)
|
||||
self.mu.Unlock()
|
||||
|
||||
jsonlogger.LogJson(&logger.EthChainNewHead{
|
||||
BlockHash: block.Hash().Hex(),
|
||||
BlockNumber: block.Number(),
|
||||
ChainHeadHash: cblock.Hash().Hex(),
|
||||
BlockPrevHash: block.ParentHash().Hex(),
|
||||
})
|
||||
|
||||
self.setTransState(state.New(block.Root(), self.stateDb))
|
||||
self.txState.SetState(state.New(block.Root(), self.stateDb))
|
||||
|
||||
queue[i] = ChainEvent{block, block.Hash(), logs}
|
||||
queueEvent.canonicalCount++
|
||||
|
||||
// write the block to the chain and get the status
|
||||
status, err := self.WriteBlock(block)
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
switch status {
|
||||
case canonStatTy:
|
||||
if glog.V(logger.Debug) {
|
||||
glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
|
||||
}
|
||||
} else {
|
||||
queue[i] = ChainEvent{block, block.Hash(), logs}
|
||||
queueEvent.canonicalCount++
|
||||
case sideStatTy:
|
||||
if glog.V(logger.Detail) {
|
||||
glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
|
||||
}
|
||||
|
||||
queue[i] = ChainSideEvent{block, logs}
|
||||
queueEvent.sideCount++
|
||||
case splitStatTy:
|
||||
queue[i] = ChainSplitEvent{block, logs}
|
||||
queueEvent.splitCount++
|
||||
}
|
||||
// Write block to database. Eventually we'll have to improve on this and throw away blocks that are
|
||||
// not in the canonical chain.
|
||||
self.write(block)
|
||||
// Delete from future blocks
|
||||
self.futureBlocks.Delete(block.Hash())
|
||||
|
||||
stats.processed++
|
||||
blockInsertTimer.UpdateSince(bstart)
|
||||
}
|
||||
|
||||
if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) {
|
||||
@ -752,9 +773,9 @@ func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, e
|
||||
}
|
||||
}
|
||||
|
||||
if glog.V(logger.Info) {
|
||||
if glog.V(logger.Debug) {
|
||||
commonHash := commonBlock.Hash()
|
||||
glog.Infof("Fork detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
|
||||
glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
|
||||
}
|
||||
|
||||
return newChain, nil
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -62,12 +63,11 @@ func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big
|
||||
if bi1 != bi2 {
|
||||
t.Fatal("chains do not have the same hash at height", i)
|
||||
}
|
||||
|
||||
bman2.bc.SetProcessor(bman2)
|
||||
|
||||
// extend the fork
|
||||
parent := bman2.bc.CurrentBlock()
|
||||
chainB := makeChain(bman2, parent, N, db, ForkSeed)
|
||||
chainB := makeChain(parent, N, db, forkSeed)
|
||||
_, err = bman2.bc.InsertChain(chainB)
|
||||
if err != nil {
|
||||
t.Fatal("Insert chain error for fork:", err)
|
||||
@ -109,7 +109,8 @@ func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
|
||||
|
||||
bman.bc.mu.Lock()
|
||||
{
|
||||
bman.bc.write(block)
|
||||
bman.bc.enqueueForWrite(block)
|
||||
//bman.bc.write(block)
|
||||
}
|
||||
bman.bc.mu.Unlock()
|
||||
}
|
||||
@ -117,7 +118,7 @@ func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
|
||||
}
|
||||
|
||||
func loadChain(fn string, t *testing.T) (types.Blocks, error) {
|
||||
fh, err := os.OpenFile(filepath.Join(os.Getenv("GOPATH"), "src", "github.com", "ethereum", "go-ethereum", "_data", fn), os.O_RDONLY, os.ModePerm)
|
||||
fh, err := os.OpenFile(filepath.Join("..", "_data", fn), os.O_RDONLY, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -256,7 +257,7 @@ func TestBrokenChain(t *testing.T) {
|
||||
}
|
||||
bman2.bc.SetProcessor(bman2)
|
||||
parent := bman2.bc.CurrentBlock()
|
||||
chainB := makeChain(bman2, parent, 5, db2, ForkSeed)
|
||||
chainB := makeChain(parent, 5, db2, forkSeed)
|
||||
chainB = chainB[1:]
|
||||
_, err = testChain(chainB, bman)
|
||||
if err == nil {
|
||||
@ -265,7 +266,7 @@ func TestBrokenChain(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainInsertions(t *testing.T) {
|
||||
t.Skip() // travil fails.
|
||||
t.Skip("Skipped: outdated test files")
|
||||
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
@ -303,7 +304,7 @@ func TestChainInsertions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainMultipleInsertions(t *testing.T) {
|
||||
t.Skip() // travil fails.
|
||||
t.Skip("Skipped: outdated test files")
|
||||
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
||||
@ -346,8 +347,8 @@ func TestChainMultipleInsertions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAncestors(t *testing.T) {
|
||||
t.Skip() // travil fails.
|
||||
func TestGetBlocksFromHash(t *testing.T) {
|
||||
t.Skip("Skipped: outdated test files")
|
||||
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
chainMan := theChainManager(db, t)
|
||||
@ -361,8 +362,8 @@ func TestGetAncestors(t *testing.T) {
|
||||
chainMan.write(block)
|
||||
}
|
||||
|
||||
ancestors := chainMan.GetAncestors(chain[len(chain)-1], 4)
|
||||
fmt.Println(ancestors)
|
||||
blocks := chainMan.GetBlocksFromHash(chain[len(chain)-1].Hash(), 4)
|
||||
fmt.Println(blocks)
|
||||
}
|
||||
|
||||
type bproc struct{}
|
||||
@ -372,15 +373,17 @@ func (bproc) Process(*types.Block) (state.Logs, error) { return nil, nil }
|
||||
func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
|
||||
var chain []*types.Block
|
||||
for i, difficulty := range d {
|
||||
header := &types.Header{Number: big.NewInt(int64(i + 1)), Difficulty: big.NewInt(int64(difficulty))}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
copy(block.HeaderHash[:2], []byte{byte(i + 1), seed})
|
||||
if i == 0 {
|
||||
block.ParentHeaderHash = genesis.Hash()
|
||||
} else {
|
||||
copy(block.ParentHeaderHash[:2], []byte{byte(i), seed})
|
||||
header := &types.Header{
|
||||
Coinbase: common.Address{seed},
|
||||
Number: big.NewInt(int64(i + 1)),
|
||||
Difficulty: big.NewInt(int64(difficulty)),
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
header.ParentHash = genesis.Hash()
|
||||
} else {
|
||||
header.ParentHash = chain[i-1].Hash()
|
||||
}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
chain = append(chain, block)
|
||||
}
|
||||
return chain
|
||||
@ -389,8 +392,8 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
|
||||
func chm(genesis *types.Block, db common.Database) *ChainManager {
|
||||
var eventMux event.TypeMux
|
||||
bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
|
||||
bc.cache = NewBlockCache(100)
|
||||
bc.futureBlocks = NewBlockCache(100)
|
||||
bc.cache, _ = lru.New(100)
|
||||
bc.futureBlocks, _ = lru.New(100)
|
||||
bc.processor = bproc{}
|
||||
bc.ResetWithGenesisBlock(genesis)
|
||||
bc.txState = state.ManageState(bc.State())
|
||||
@ -399,7 +402,6 @@ func chm(genesis *types.Block, db common.Database) *ChainManager {
|
||||
}
|
||||
|
||||
func TestReorgLongest(t *testing.T) {
|
||||
t.Skip("skipped while cache is removed")
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
genesis := GenesisBlock(0, db)
|
||||
bc := chm(genesis, db)
|
||||
@ -419,7 +421,6 @@ func TestReorgLongest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReorgShortest(t *testing.T) {
|
||||
t.Skip("skipped while cache is removed")
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
genesis := GenesisBlock(0, db)
|
||||
bc := chm(genesis, db)
|
||||
@ -444,7 +445,7 @@ func TestInsertNonceError(t *testing.T) {
|
||||
genesis := GenesisBlock(0, db)
|
||||
bc := chm(genesis, db)
|
||||
bc.processor = NewBlockProcessor(db, db, bc.pow, bc, bc.eventMux)
|
||||
blocks := makeChain(bc.processor.(*BlockProcessor), bc.currentBlock, i, db, 0)
|
||||
blocks := makeChain(bc.currentBlock, i, db, 0)
|
||||
|
||||
fail := rand.Int() % len(blocks)
|
||||
failblock := blocks[fail]
|
||||
|
@ -3,6 +3,7 @@ package core
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -11,38 +12,18 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
/*
|
||||
* This is the special genesis block.
|
||||
*/
|
||||
|
||||
var ZeroHash256 = make([]byte, 32)
|
||||
var ZeroHash160 = make([]byte, 20)
|
||||
var ZeroHash512 = make([]byte, 64)
|
||||
|
||||
// GenesisBlock creates a genesis block with the given nonce.
|
||||
func GenesisBlock(nonce uint64, db common.Database) *types.Block {
|
||||
genesis := types.NewBlock(common.Hash{}, common.Address{}, common.Hash{}, params.GenesisDifficulty, nonce, nil)
|
||||
genesis.Header().Number = common.Big0
|
||||
genesis.Header().GasLimit = params.GenesisGasLimit
|
||||
genesis.Header().GasUsed = common.Big0
|
||||
genesis.Header().Time = 0
|
||||
|
||||
genesis.Td = common.Big0
|
||||
|
||||
genesis.SetUncles([]*types.Header{})
|
||||
genesis.SetTransactions(types.Transactions{})
|
||||
genesis.SetReceipts(types.Receipts{})
|
||||
|
||||
var accounts map[string]struct {
|
||||
Balance string
|
||||
Code string
|
||||
}
|
||||
err := json.Unmarshal(GenesisAccounts, &accounts)
|
||||
if err != nil {
|
||||
fmt.Println("enable to decode genesis json data:", err)
|
||||
fmt.Println("unable to decode genesis json data:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
statedb := state.New(genesis.Root(), db)
|
||||
statedb := state.New(common.Hash{}, db)
|
||||
for addr, account := range accounts {
|
||||
codedAddr := common.Hex2Bytes(addr)
|
||||
accountState := statedb.CreateAccount(common.BytesToAddress(codedAddr))
|
||||
@ -51,10 +32,15 @@ func GenesisBlock(nonce uint64, db common.Database) *types.Block {
|
||||
statedb.UpdateStateObject(accountState)
|
||||
}
|
||||
statedb.Sync()
|
||||
genesis.Header().Root = statedb.Root()
|
||||
genesis.Td = params.GenesisDifficulty
|
||||
|
||||
return genesis
|
||||
block := types.NewBlock(&types.Header{
|
||||
Difficulty: params.GenesisDifficulty,
|
||||
GasLimit: params.GenesisGasLimit,
|
||||
Nonce: types.EncodeNonce(nonce),
|
||||
Root: statedb.Root(),
|
||||
}, nil, nil, nil)
|
||||
block.Td = params.GenesisDifficulty
|
||||
return block
|
||||
}
|
||||
|
||||
var GenesisAccounts = []byte(`{
|
||||
@ -71,3 +57,20 @@ var GenesisAccounts = []byte(`{
|
||||
"e6716f9544a56c530d868e4bfbacb172315bdead": {"balance": "1606938044258990275541962092341162602522202993782792835301376"},
|
||||
"1a26338f0d905e295fccb71fa9ea849ffa12aaf4": {"balance": "1606938044258990275541962092341162602522202993782792835301376"}
|
||||
}`)
|
||||
|
||||
// GenesisBlockForTesting creates a block in which addr has the given wei balance.
|
||||
// The state trie of the block is written to db.
|
||||
func GenesisBlockForTesting(db common.Database, addr common.Address, balance *big.Int) *types.Block {
|
||||
statedb := state.New(common.Hash{}, db)
|
||||
obj := statedb.GetOrNewStateObject(addr)
|
||||
obj.SetBalance(balance)
|
||||
statedb.Update()
|
||||
statedb.Sync()
|
||||
block := types.NewBlock(&types.Header{
|
||||
Difficulty: params.GenesisDifficulty,
|
||||
GasLimit: params.GenesisGasLimit,
|
||||
Root: statedb.Root(),
|
||||
}, nil, nil, nil)
|
||||
block.Td = params.GenesisDifficulty
|
||||
return block
|
||||
}
|
||||
|
@ -13,10 +13,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
const tryJit = false
|
||||
|
||||
var ()
|
||||
|
||||
/*
|
||||
* The State transitioning model
|
||||
*
|
||||
@ -69,20 +65,24 @@ func MessageCreatesContract(msg Message) bool {
|
||||
return msg.To() == nil
|
||||
}
|
||||
|
||||
func MessageGasValue(msg Message) *big.Int {
|
||||
return new(big.Int).Mul(msg.Gas(), msg.GasPrice())
|
||||
}
|
||||
|
||||
func IntrinsicGas(msg Message) *big.Int {
|
||||
// IntrinsicGas computes the 'intrisic gas' for a message
|
||||
// with the given data.
|
||||
func IntrinsicGas(data []byte) *big.Int {
|
||||
igas := new(big.Int).Set(params.TxGas)
|
||||
for _, byt := range msg.Data() {
|
||||
if byt != 0 {
|
||||
igas.Add(igas, params.TxDataNonZeroGas)
|
||||
} else {
|
||||
igas.Add(igas, params.TxDataZeroGas)
|
||||
if len(data) > 0 {
|
||||
var nz int64
|
||||
for _, byt := range data {
|
||||
if byt != 0 {
|
||||
nz++
|
||||
}
|
||||
}
|
||||
m := big.NewInt(nz)
|
||||
m.Mul(m, params.TxDataNonZeroGas)
|
||||
igas.Add(igas, m)
|
||||
m.SetInt64(int64(len(data)) - nz)
|
||||
m.Mul(m, params.TxDataZeroGas)
|
||||
igas.Add(igas, m)
|
||||
}
|
||||
|
||||
return igas
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ func NewStateTransition(env vm.Environment, msg Message, coinbase *state.StateOb
|
||||
env: env,
|
||||
msg: msg,
|
||||
gas: new(big.Int),
|
||||
gasPrice: new(big.Int).Set(msg.GasPrice()),
|
||||
gasPrice: msg.GasPrice(),
|
||||
initialGas: new(big.Int),
|
||||
value: msg.Value(),
|
||||
data: msg.Data(),
|
||||
@ -140,26 +140,22 @@ func (self *StateTransition) AddGas(amount *big.Int) {
|
||||
}
|
||||
|
||||
func (self *StateTransition) BuyGas() error {
|
||||
var err error
|
||||
mgas := self.msg.Gas()
|
||||
mgval := new(big.Int).Mul(mgas, self.gasPrice)
|
||||
|
||||
sender, err := self.From()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sender.Balance().Cmp(MessageGasValue(self.msg)) < 0 {
|
||||
return fmt.Errorf("insufficient ETH for gas (%x). Req %v, has %v", sender.Address().Bytes()[:4], MessageGasValue(self.msg), sender.Balance())
|
||||
if sender.Balance().Cmp(mgval) < 0 {
|
||||
return fmt.Errorf("insufficient ETH for gas (%x). Req %v, has %v", sender.Address().Bytes()[:4], mgval, sender.Balance())
|
||||
}
|
||||
|
||||
coinbase := self.Coinbase()
|
||||
err = coinbase.SubGas(self.msg.Gas(), self.msg.GasPrice())
|
||||
if err != nil {
|
||||
if err = self.Coinbase().SubGas(mgas, self.gasPrice); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.AddGas(self.msg.Gas())
|
||||
self.initialGas.Set(self.msg.Gas())
|
||||
sender.SubBalance(MessageGasValue(self.msg))
|
||||
|
||||
self.AddGas(mgas)
|
||||
self.initialGas.Set(mgas)
|
||||
sender.SubBalance(mgval)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -195,14 +191,14 @@ func (self *StateTransition) transitionState() (ret []byte, usedGas *big.Int, er
|
||||
sender, _ := self.From() // err checked in preCheck
|
||||
|
||||
// Pay intrinsic gas
|
||||
if err = self.UseGas(IntrinsicGas(self.msg)); err != nil {
|
||||
if err = self.UseGas(IntrinsicGas(self.data)); err != nil {
|
||||
return nil, nil, InvalidTxError(err)
|
||||
}
|
||||
|
||||
vmenv := self.env
|
||||
var ref vm.ContextRef
|
||||
if MessageCreatesContract(msg) {
|
||||
ret, err, ref = vmenv.Create(sender, self.msg.Data(), self.gas, self.gasPrice, self.value)
|
||||
ret, err, ref = vmenv.Create(sender, self.data, self.gas, self.gasPrice, self.value)
|
||||
if err == nil {
|
||||
dataGas := big.NewInt(int64(len(ret)))
|
||||
dataGas.Mul(dataGas, params.CreateDataGas)
|
||||
@ -216,7 +212,7 @@ func (self *StateTransition) transitionState() (ret []byte, usedGas *big.Int, er
|
||||
} else {
|
||||
// Increment the nonce for the next transaction
|
||||
self.state.SetNonce(sender.Address(), sender.Nonce()+1)
|
||||
ret, err = vmenv.Call(sender, self.To().Address(), self.msg.Data(), self.gas, self.gasPrice, self.value)
|
||||
ret, err = vmenv.Call(sender, self.To().Address(), self.data, self.gas, self.gasPrice, self.value)
|
||||
}
|
||||
|
||||
if err != nil && IsValueTransferErr(err) {
|
||||
@ -237,15 +233,15 @@ func (self *StateTransition) refundGas() {
|
||||
coinbase := self.Coinbase()
|
||||
sender, _ := self.From() // err already checked
|
||||
// Return remaining gas
|
||||
remaining := new(big.Int).Mul(self.gas, self.msg.GasPrice())
|
||||
remaining := new(big.Int).Mul(self.gas, self.gasPrice)
|
||||
sender.AddBalance(remaining)
|
||||
|
||||
uhalf := new(big.Int).Div(self.gasUsed(), common.Big2)
|
||||
uhalf := remaining.Div(self.gasUsed(), common.Big2)
|
||||
refund := common.BigMin(uhalf, self.state.Refunds())
|
||||
self.gas.Add(self.gas, refund)
|
||||
self.state.AddBalance(sender.Address(), refund.Mul(refund, self.msg.GasPrice()))
|
||||
self.state.AddBalance(sender.Address(), refund.Mul(refund, self.gasPrice))
|
||||
|
||||
coinbase.AddGas(self.gas, self.msg.GasPrice())
|
||||
coinbase.AddGas(self.gas, self.gasPrice)
|
||||
}
|
||||
|
||||
func (self *StateTransition) gasUsed() *big.Int {
|
||||
|
@ -162,27 +162,25 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
|
||||
|
||||
// Check the transaction doesn't exceed the current
|
||||
// block limit gas.
|
||||
if pool.gasLimit().Cmp(tx.GasLimit) < 0 {
|
||||
if pool.gasLimit().Cmp(tx.Gas()) < 0 {
|
||||
return ErrGasLimit
|
||||
}
|
||||
|
||||
// Transactions can't be negative. This may never happen
|
||||
// using RLP decoded transactions but may occur if you create
|
||||
// a transaction using the RPC for example.
|
||||
if tx.Amount.Cmp(common.Big0) < 0 {
|
||||
if tx.Value().Cmp(common.Big0) < 0 {
|
||||
return ErrNegativeValue
|
||||
}
|
||||
|
||||
// Transactor should have enough funds to cover the costs
|
||||
// cost == V + GP * GL
|
||||
total := new(big.Int).Mul(tx.Price, tx.GasLimit)
|
||||
total.Add(total, tx.Value())
|
||||
if pool.currentState().GetBalance(from).Cmp(total) < 0 {
|
||||
if pool.currentState().GetBalance(from).Cmp(tx.Cost()) < 0 {
|
||||
return ErrInsufficientFunds
|
||||
}
|
||||
|
||||
// Should supply enough intrinsic gas
|
||||
if tx.GasLimit.Cmp(IntrinsicGas(tx)) < 0 {
|
||||
if tx.Gas().Cmp(IntrinsicGas(tx.Data())) < 0 {
|
||||
return ErrIntrinsicGas
|
||||
}
|
||||
|
||||
@ -238,7 +236,7 @@ func (pool *TxPool) addTx(hash common.Hash, addr common.Address, tx *types.Trans
|
||||
|
||||
// Increment the nonce on the pending state. This can only happen if
|
||||
// the nonce is +1 to the previous one.
|
||||
pool.pendingState.SetNonce(addr, tx.AccountNonce+1)
|
||||
pool.pendingState.SetNonce(addr, tx.Nonce()+1)
|
||||
// Notify the subscribers. This event is posted in a goroutine
|
||||
// because it's possible that somewhere during the post "Remove transaction"
|
||||
// gets called which will then wait for the global tx pool lock and deadlock.
|
||||
@ -341,7 +339,7 @@ func (pool *TxPool) checkQueue() {
|
||||
trueNonce := pool.currentState().GetNonce(address)
|
||||
addq := addq[:0]
|
||||
for hash, tx := range txs {
|
||||
if tx.AccountNonce < trueNonce {
|
||||
if tx.Nonce() < trueNonce {
|
||||
// Drop queued transactions whose nonce is lower than
|
||||
// the account nonce because they have been processed.
|
||||
delete(txs, hash)
|
||||
@ -362,8 +360,7 @@ func (pool *TxPool) checkQueue() {
|
||||
delete(pool.queue[address], e.hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if e.AccountNonce > guessedNonce {
|
||||
if e.Nonce() > guessedNonce {
|
||||
break
|
||||
}
|
||||
delete(txs, e.hash)
|
||||
@ -418,4 +415,4 @@ type txQueueEntry struct {
|
||||
|
||||
func (q txQueue) Len() int { return len(q) }
|
||||
func (q txQueue) Swap(i, j int) { q[i], q[j] = q[j], q[i] }
|
||||
func (q txQueue) Less(i, j int) bool { return q[i].AccountNonce < q[j].AccountNonce }
|
||||
func (q txQueue) Less(i, j int) bool { return q[i].Nonce() < q[j].Nonce() }
|
||||
|
@ -13,8 +13,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
func transaction() *types.Transaction {
|
||||
return types.NewTransactionMessage(common.Address{}, big.NewInt(100), big.NewInt(100), big.NewInt(100), nil)
|
||||
func transaction(nonce uint64, gaslimit *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
|
||||
tx, _ := types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, big.NewInt(1), nil).SignECDSA(key)
|
||||
return tx
|
||||
}
|
||||
|
||||
func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
|
||||
@ -29,43 +30,34 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
|
||||
func TestInvalidTransactions(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
|
||||
tx := transaction()
|
||||
tx.SignECDSA(key)
|
||||
err := pool.Add(tx)
|
||||
if err != ErrNonExistentAccount {
|
||||
tx := transaction(0, big.NewInt(100), key)
|
||||
if err := pool.Add(tx); err != ErrNonExistentAccount {
|
||||
t.Error("expected", ErrNonExistentAccount)
|
||||
}
|
||||
|
||||
from, _ := tx.From()
|
||||
pool.currentState().AddBalance(from, big.NewInt(1))
|
||||
err = pool.Add(tx)
|
||||
if err != ErrInsufficientFunds {
|
||||
if err := pool.Add(tx); err != ErrInsufficientFunds {
|
||||
t.Error("expected", ErrInsufficientFunds)
|
||||
}
|
||||
|
||||
balance := new(big.Int).Add(tx.Value(), new(big.Int).Mul(tx.Gas(), tx.GasPrice()))
|
||||
pool.currentState().AddBalance(from, balance)
|
||||
err = pool.Add(tx)
|
||||
if err != ErrIntrinsicGas {
|
||||
if err := pool.Add(tx); err != ErrIntrinsicGas {
|
||||
t.Error("expected", ErrIntrinsicGas, "got", err)
|
||||
}
|
||||
|
||||
pool.currentState().SetNonce(from, 1)
|
||||
pool.currentState().AddBalance(from, big.NewInt(0xffffffffffffff))
|
||||
tx.GasLimit = big.NewInt(100000)
|
||||
tx.Price = big.NewInt(1)
|
||||
tx.SignECDSA(key)
|
||||
|
||||
err = pool.Add(tx)
|
||||
if err != ErrNonce {
|
||||
tx = transaction(0, big.NewInt(100000), key)
|
||||
if err := pool.Add(tx); err != ErrNonce {
|
||||
t.Error("expected", ErrNonce)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransactionQueue(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
tx := transaction()
|
||||
tx.SignECDSA(key)
|
||||
tx := transaction(0, big.NewInt(100), key)
|
||||
from, _ := tx.From()
|
||||
pool.currentState().AddBalance(from, big.NewInt(1))
|
||||
pool.queueTx(tx.Hash(), tx)
|
||||
@ -75,9 +67,7 @@ func TestTransactionQueue(t *testing.T) {
|
||||
t.Error("expected valid txs to be 1 is", len(pool.pending))
|
||||
}
|
||||
|
||||
tx = transaction()
|
||||
tx.SetNonce(1)
|
||||
tx.SignECDSA(key)
|
||||
tx = transaction(1, big.NewInt(100), key)
|
||||
from, _ = tx.From()
|
||||
pool.currentState().SetNonce(from, 2)
|
||||
pool.queueTx(tx.Hash(), tx)
|
||||
@ -91,12 +81,9 @@ func TestTransactionQueue(t *testing.T) {
|
||||
}
|
||||
|
||||
pool, key = setupTxPool()
|
||||
tx1, tx2, tx3 := transaction(), transaction(), transaction()
|
||||
tx2.SetNonce(10)
|
||||
tx3.SetNonce(11)
|
||||
tx1.SignECDSA(key)
|
||||
tx2.SignECDSA(key)
|
||||
tx3.SignECDSA(key)
|
||||
tx1 := transaction(0, big.NewInt(100), key)
|
||||
tx2 := transaction(10, big.NewInt(100), key)
|
||||
tx3 := transaction(11, big.NewInt(100), key)
|
||||
pool.queueTx(tx1.Hash(), tx1)
|
||||
pool.queueTx(tx2.Hash(), tx2)
|
||||
pool.queueTx(tx3.Hash(), tx3)
|
||||
@ -114,8 +101,7 @@ func TestTransactionQueue(t *testing.T) {
|
||||
|
||||
func TestRemoveTx(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
tx := transaction()
|
||||
tx.SignECDSA(key)
|
||||
tx := transaction(0, big.NewInt(100), key)
|
||||
from, _ := tx.From()
|
||||
pool.currentState().AddBalance(from, big.NewInt(1))
|
||||
pool.queueTx(tx.Hash(), tx)
|
||||
@ -142,13 +128,10 @@ func TestRemoveTx(t *testing.T) {
|
||||
func TestNegativeValue(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
|
||||
tx := transaction()
|
||||
tx.Value().Set(big.NewInt(-1))
|
||||
tx.SignECDSA(key)
|
||||
tx, _ := types.NewTransaction(0, common.Address{}, big.NewInt(-1), big.NewInt(100), big.NewInt(1), nil).SignECDSA(key)
|
||||
from, _ := tx.From()
|
||||
pool.currentState().AddBalance(from, big.NewInt(1))
|
||||
err := pool.Add(tx)
|
||||
if err != ErrNegativeValue {
|
||||
if err := pool.Add(tx); err != ErrNegativeValue {
|
||||
t.Error("expected", ErrNegativeValue, "got", err)
|
||||
}
|
||||
}
|
||||
@ -165,20 +148,15 @@ func TestTransactionChainFork(t *testing.T) {
|
||||
}
|
||||
resetState()
|
||||
|
||||
tx := transaction()
|
||||
tx.GasLimit = big.NewInt(100000)
|
||||
tx.SignECDSA(key)
|
||||
|
||||
err := pool.add(tx)
|
||||
if err != nil {
|
||||
tx := transaction(0, big.NewInt(100000), key)
|
||||
if err := pool.add(tx); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
}
|
||||
pool.RemoveTransactions([]*types.Transaction{tx})
|
||||
|
||||
// reset the pool's internal state
|
||||
resetState()
|
||||
err = pool.add(tx)
|
||||
if err != nil {
|
||||
if err := pool.add(tx); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
}
|
||||
}
|
||||
@ -195,24 +173,14 @@ func TestTransactionDoubleNonce(t *testing.T) {
|
||||
}
|
||||
resetState()
|
||||
|
||||
tx := transaction()
|
||||
tx.GasLimit = big.NewInt(100000)
|
||||
tx.SignECDSA(key)
|
||||
|
||||
err := pool.add(tx)
|
||||
if err != nil {
|
||||
tx := transaction(0, big.NewInt(100000), key)
|
||||
tx2 := transaction(0, big.NewInt(1000000), key)
|
||||
if err := pool.add(tx); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
}
|
||||
|
||||
tx2 := transaction()
|
||||
tx2.GasLimit = big.NewInt(1000000)
|
||||
tx2.SignECDSA(key)
|
||||
|
||||
err = pool.add(tx2)
|
||||
if err != nil {
|
||||
if err := pool.add(tx2); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
}
|
||||
|
||||
if len(pool.pending) != 2 {
|
||||
t.Error("expected 2 pending txs. Got", len(pool.pending))
|
||||
}
|
||||
@ -222,20 +190,13 @@ func TestMissingNonce(t *testing.T) {
|
||||
pool, key := setupTxPool()
|
||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||
pool.currentState().AddBalance(addr, big.NewInt(100000000000000))
|
||||
tx := transaction()
|
||||
tx.AccountNonce = 1
|
||||
tx.GasLimit = big.NewInt(100000)
|
||||
tx.SignECDSA(key)
|
||||
|
||||
err := pool.add(tx)
|
||||
if err != nil {
|
||||
tx := transaction(1, big.NewInt(100000), key)
|
||||
if err := pool.add(tx); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
}
|
||||
|
||||
if len(pool.pending) != 0 {
|
||||
t.Error("expected 0 pending transactions, got", len(pool.pending))
|
||||
}
|
||||
|
||||
if len(pool.queue[addr]) != 1 {
|
||||
t.Error("expected 1 queued transaction, got", len(pool.queue[addr]))
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -15,71 +16,59 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// A BlockNonce is a 64-bit hash which proves (combined with the
|
||||
// mix-hash) that a suffcient amount of computation has been carried
|
||||
// out on a block.
|
||||
type BlockNonce [8]byte
|
||||
|
||||
func EncodeNonce(i uint64) BlockNonce {
|
||||
var n BlockNonce
|
||||
binary.BigEndian.PutUint64(n[:], i)
|
||||
return n
|
||||
}
|
||||
|
||||
func (n BlockNonce) Uint64() uint64 {
|
||||
return binary.BigEndian.Uint64(n[:])
|
||||
}
|
||||
|
||||
type Header struct {
|
||||
// Hash to the previous block
|
||||
ParentHash common.Hash
|
||||
// Uncles of this block
|
||||
UncleHash common.Hash
|
||||
// The coin base address
|
||||
Coinbase common.Address
|
||||
// Block Trie state
|
||||
Root common.Hash
|
||||
// Tx sha
|
||||
TxHash common.Hash
|
||||
// Receipt sha
|
||||
ReceiptHash common.Hash
|
||||
// Bloom
|
||||
Bloom Bloom
|
||||
// Difficulty for the current block
|
||||
Difficulty *big.Int
|
||||
// The block number
|
||||
Number *big.Int
|
||||
// Gas limit
|
||||
GasLimit *big.Int
|
||||
// Gas used
|
||||
GasUsed *big.Int
|
||||
// Creation time
|
||||
Time uint64
|
||||
// Extra data
|
||||
Extra []byte
|
||||
// Mix digest for quick checking to prevent DOS
|
||||
MixDigest common.Hash
|
||||
// Nonce
|
||||
Nonce [8]byte
|
||||
ParentHash common.Hash // Hash to the previous block
|
||||
UncleHash common.Hash // Uncles of this block
|
||||
Coinbase common.Address // The coin base address
|
||||
Root common.Hash // Block Trie state
|
||||
TxHash common.Hash // Tx sha
|
||||
ReceiptHash common.Hash // Receipt sha
|
||||
Bloom Bloom // Bloom
|
||||
Difficulty *big.Int // Difficulty for the current block
|
||||
Number *big.Int // The block number
|
||||
GasLimit *big.Int // Gas limit
|
||||
GasUsed *big.Int // Gas used
|
||||
Time uint64 // Creation time
|
||||
Extra []byte // Extra data
|
||||
MixDigest common.Hash // for quick difficulty verification
|
||||
Nonce BlockNonce
|
||||
}
|
||||
|
||||
func (self *Header) Hash() common.Hash {
|
||||
return rlpHash(self.rlpData(true))
|
||||
func (h *Header) Hash() common.Hash {
|
||||
return rlpHash(h)
|
||||
}
|
||||
|
||||
func (self *Header) HashNoNonce() common.Hash {
|
||||
return rlpHash(self.rlpData(false))
|
||||
}
|
||||
|
||||
func (self *Header) rlpData(withNonce bool) []interface{} {
|
||||
fields := []interface{}{
|
||||
self.ParentHash,
|
||||
self.UncleHash,
|
||||
self.Coinbase,
|
||||
self.Root,
|
||||
self.TxHash,
|
||||
self.ReceiptHash,
|
||||
self.Bloom,
|
||||
self.Difficulty,
|
||||
self.Number,
|
||||
self.GasLimit,
|
||||
self.GasUsed,
|
||||
self.Time,
|
||||
self.Extra,
|
||||
}
|
||||
if withNonce {
|
||||
fields = append(fields, self.MixDigest, self.Nonce)
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
func (self *Header) RlpData() interface{} {
|
||||
return self.rlpData(true)
|
||||
func (h *Header) HashNoNonce() common.Hash {
|
||||
return rlpHash([]interface{}{
|
||||
h.ParentHash,
|
||||
h.UncleHash,
|
||||
h.Coinbase,
|
||||
h.Root,
|
||||
h.TxHash,
|
||||
h.ReceiptHash,
|
||||
h.Bloom,
|
||||
h.Difficulty,
|
||||
h.Number,
|
||||
h.GasLimit,
|
||||
h.GasUsed,
|
||||
h.Time,
|
||||
h.Extra,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Header) UnmarshalJSON(data []byte) error {
|
||||
@ -112,20 +101,21 @@ func rlpHash(x interface{}) (h common.Hash) {
|
||||
}
|
||||
|
||||
type Block struct {
|
||||
// Preset Hash for mock (Tests)
|
||||
HeaderHash common.Hash
|
||||
ParentHeaderHash common.Hash
|
||||
// ^^^^ ignore ^^^^
|
||||
|
||||
header *Header
|
||||
uncles []*Header
|
||||
transactions Transactions
|
||||
Td *big.Int
|
||||
queued bool // flag for blockpool to skip TD check
|
||||
receipts Receipts
|
||||
|
||||
// caches
|
||||
hash atomic.Value
|
||||
size atomic.Value
|
||||
|
||||
// Td is used by package core to store the total difficulty
|
||||
// of the chain up to and including the block.
|
||||
Td *big.Int
|
||||
|
||||
// ReceivedAt is used by package eth to track block propagation time.
|
||||
ReceivedAt time.Time
|
||||
|
||||
receipts Receipts
|
||||
}
|
||||
|
||||
// StorageBlock defines the RLP encoding of a Block stored in the
|
||||
@ -148,43 +138,90 @@ type storageblock struct {
|
||||
TD *big.Int
|
||||
}
|
||||
|
||||
func NewBlock(parentHash common.Hash, coinbase common.Address, root common.Hash, difficulty *big.Int, nonce uint64, extra []byte) *Block {
|
||||
header := &Header{
|
||||
Root: root,
|
||||
ParentHash: parentHash,
|
||||
Coinbase: coinbase,
|
||||
Difficulty: difficulty,
|
||||
Time: uint64(time.Now().Unix()),
|
||||
Extra: extra,
|
||||
GasUsed: new(big.Int),
|
||||
GasLimit: new(big.Int),
|
||||
Number: new(big.Int),
|
||||
var (
|
||||
emptyRootHash = DeriveSha(Transactions{})
|
||||
emptyUncleHash = CalcUncleHash(nil)
|
||||
)
|
||||
|
||||
// NewBlock creates a new block. The input data is copied,
|
||||
// changes to header and to the field values will not affect the
|
||||
// block.
|
||||
//
|
||||
// The values of TxHash, UncleHash, ReceiptHash and Bloom in header
|
||||
// are ignored and set to values derived from the given txs, uncles
|
||||
// and receipts.
|
||||
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
|
||||
b := &Block{header: copyHeader(header), Td: new(big.Int)}
|
||||
|
||||
// TODO: panic if len(txs) != len(receipts)
|
||||
if len(txs) == 0 {
|
||||
b.header.TxHash = emptyRootHash
|
||||
} else {
|
||||
b.header.TxHash = DeriveSha(Transactions(txs))
|
||||
b.transactions = make(Transactions, len(txs))
|
||||
copy(b.transactions, txs)
|
||||
}
|
||||
header.SetNonce(nonce)
|
||||
block := &Block{header: header}
|
||||
block.Td = new(big.Int)
|
||||
|
||||
return block
|
||||
}
|
||||
|
||||
func (self *Header) SetNonce(nonce uint64) {
|
||||
binary.BigEndian.PutUint64(self.Nonce[:], nonce)
|
||||
if len(receipts) == 0 {
|
||||
b.header.ReceiptHash = emptyRootHash
|
||||
} else {
|
||||
b.header.ReceiptHash = DeriveSha(Receipts(receipts))
|
||||
b.header.Bloom = CreateBloom(receipts)
|
||||
b.receipts = make([]*Receipt, len(receipts))
|
||||
copy(b.receipts, receipts)
|
||||
}
|
||||
|
||||
if len(uncles) == 0 {
|
||||
b.header.UncleHash = emptyUncleHash
|
||||
} else {
|
||||
b.header.UncleHash = CalcUncleHash(uncles)
|
||||
b.uncles = make([]*Header, len(uncles))
|
||||
for i := range uncles {
|
||||
b.uncles[i] = copyHeader(uncles[i])
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// NewBlockWithHeader creates a block with the given header data. The
|
||||
// header data is copied, changes to header and to the field values
|
||||
// will not affect the block.
|
||||
func NewBlockWithHeader(header *Header) *Block {
|
||||
return &Block{header: header}
|
||||
return &Block{header: copyHeader(header)}
|
||||
}
|
||||
|
||||
func (self *Block) ValidateFields() error {
|
||||
if self.header == nil {
|
||||
func copyHeader(h *Header) *Header {
|
||||
cpy := *h
|
||||
if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
|
||||
cpy.Difficulty.Set(h.Difficulty)
|
||||
}
|
||||
if cpy.Number = new(big.Int); h.Number != nil {
|
||||
cpy.Number.Set(h.Number)
|
||||
}
|
||||
if cpy.GasLimit = new(big.Int); h.GasLimit != nil {
|
||||
cpy.GasLimit.Set(h.GasLimit)
|
||||
}
|
||||
if cpy.GasUsed = new(big.Int); h.GasUsed != nil {
|
||||
cpy.GasUsed.Set(h.GasUsed)
|
||||
}
|
||||
if len(h.Extra) > 0 {
|
||||
cpy.Extra = make([]byte, len(h.Extra))
|
||||
copy(cpy.Extra, h.Extra)
|
||||
}
|
||||
return &cpy
|
||||
}
|
||||
|
||||
func (b *Block) ValidateFields() error {
|
||||
if b.header == nil {
|
||||
return fmt.Errorf("header is nil")
|
||||
}
|
||||
for i, transaction := range self.transactions {
|
||||
for i, transaction := range b.transactions {
|
||||
if transaction == nil {
|
||||
return fmt.Errorf("transaction %d is nil", i)
|
||||
}
|
||||
}
|
||||
for i, uncle := range self.uncles {
|
||||
for i, uncle := range b.uncles {
|
||||
if uncle == nil {
|
||||
return fmt.Errorf("uncle %d is nil", i)
|
||||
}
|
||||
@ -192,64 +229,50 @@ func (self *Block) ValidateFields() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Block) DecodeRLP(s *rlp.Stream) error {
|
||||
func (b *Block) DecodeRLP(s *rlp.Stream) error {
|
||||
var eb extblock
|
||||
_, size, _ := s.Kind()
|
||||
if err := s.Decode(&eb); err != nil {
|
||||
return err
|
||||
}
|
||||
self.header, self.uncles, self.transactions = eb.Header, eb.Uncles, eb.Txs
|
||||
b.header, b.uncles, b.transactions = eb.Header, eb.Uncles, eb.Txs
|
||||
b.size.Store(common.StorageSize(rlp.ListSize(size)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self Block) EncodeRLP(w io.Writer) error {
|
||||
func (b Block) EncodeRLP(w io.Writer) error {
|
||||
return rlp.Encode(w, extblock{
|
||||
Header: self.header,
|
||||
Txs: self.transactions,
|
||||
Uncles: self.uncles,
|
||||
Header: b.header,
|
||||
Txs: b.transactions,
|
||||
Uncles: b.uncles,
|
||||
})
|
||||
}
|
||||
|
||||
func (self *StorageBlock) DecodeRLP(s *rlp.Stream) error {
|
||||
func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
|
||||
var sb storageblock
|
||||
if err := s.Decode(&sb); err != nil {
|
||||
return err
|
||||
}
|
||||
self.header, self.uncles, self.transactions, self.Td = sb.Header, sb.Uncles, sb.Txs, sb.TD
|
||||
b.header, b.uncles, b.transactions, b.Td = sb.Header, sb.Uncles, sb.Txs, sb.TD
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self StorageBlock) EncodeRLP(w io.Writer) error {
|
||||
func (b StorageBlock) EncodeRLP(w io.Writer) error {
|
||||
return rlp.Encode(w, storageblock{
|
||||
Header: self.header,
|
||||
Txs: self.transactions,
|
||||
Uncles: self.uncles,
|
||||
TD: self.Td,
|
||||
Header: b.header,
|
||||
Txs: b.transactions,
|
||||
Uncles: b.uncles,
|
||||
TD: b.Td,
|
||||
})
|
||||
}
|
||||
|
||||
func (self *Block) Header() *Header {
|
||||
return self.header
|
||||
}
|
||||
// TODO: copies
|
||||
func (b *Block) Uncles() []*Header { return b.uncles }
|
||||
func (b *Block) Transactions() Transactions { return b.transactions }
|
||||
func (b *Block) Receipts() Receipts { return b.receipts }
|
||||
|
||||
func (self *Block) Uncles() []*Header {
|
||||
return self.uncles
|
||||
}
|
||||
|
||||
func (self *Block) CalculateUnclesHash() common.Hash {
|
||||
return rlpHash(self.uncles)
|
||||
}
|
||||
|
||||
func (self *Block) SetUncles(uncleHeaders []*Header) {
|
||||
self.uncles = uncleHeaders
|
||||
self.header.UncleHash = rlpHash(uncleHeaders)
|
||||
}
|
||||
|
||||
func (self *Block) Transactions() Transactions {
|
||||
return self.transactions
|
||||
}
|
||||
|
||||
func (self *Block) Transaction(hash common.Hash) *Transaction {
|
||||
for _, transaction := range self.transactions {
|
||||
func (b *Block) Transaction(hash common.Hash) *Transaction {
|
||||
for _, transaction := range b.transactions {
|
||||
if transaction.Hash() == hash {
|
||||
return transaction
|
||||
}
|
||||
@ -257,74 +280,37 @@ func (self *Block) Transaction(hash common.Hash) *Transaction {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Block) SetTransactions(transactions Transactions) {
|
||||
self.transactions = transactions
|
||||
self.header.TxHash = DeriveSha(transactions)
|
||||
}
|
||||
func (self *Block) AddTransaction(transaction *Transaction) {
|
||||
self.transactions = append(self.transactions, transaction)
|
||||
self.SetTransactions(self.transactions)
|
||||
func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) }
|
||||
func (b *Block) GasLimit() *big.Int { return new(big.Int).Set(b.header.GasLimit) }
|
||||
func (b *Block) GasUsed() *big.Int { return new(big.Int).Set(b.header.GasUsed) }
|
||||
func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) }
|
||||
|
||||
func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() }
|
||||
func (b *Block) MixDigest() common.Hash { return b.header.MixDigest }
|
||||
func (b *Block) Nonce() uint64 { return binary.BigEndian.Uint64(b.header.Nonce[:]) }
|
||||
func (b *Block) Bloom() Bloom { return b.header.Bloom }
|
||||
func (b *Block) Coinbase() common.Address { return b.header.Coinbase }
|
||||
func (b *Block) Time() int64 { return int64(b.header.Time) }
|
||||
func (b *Block) Root() common.Hash { return b.header.Root }
|
||||
func (b *Block) ParentHash() common.Hash { return b.header.ParentHash }
|
||||
func (b *Block) TxHash() common.Hash { return b.header.TxHash }
|
||||
func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
|
||||
func (b *Block) UncleHash() common.Hash { return b.header.UncleHash }
|
||||
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
|
||||
|
||||
func (b *Block) Header() *Header { return copyHeader(b.header) }
|
||||
|
||||
func (b *Block) HashNoNonce() common.Hash {
|
||||
return b.header.HashNoNonce()
|
||||
}
|
||||
|
||||
func (self *Block) Receipts() Receipts {
|
||||
return self.receipts
|
||||
}
|
||||
|
||||
func (self *Block) SetReceipts(receipts Receipts) {
|
||||
self.receipts = receipts
|
||||
self.header.ReceiptHash = DeriveSha(receipts)
|
||||
self.header.Bloom = CreateBloom(receipts)
|
||||
}
|
||||
func (self *Block) AddReceipt(receipt *Receipt) {
|
||||
self.receipts = append(self.receipts, receipt)
|
||||
self.SetReceipts(self.receipts)
|
||||
}
|
||||
|
||||
func (self *Block) RlpData() interface{} {
|
||||
return []interface{}{self.header, self.transactions, self.uncles}
|
||||
}
|
||||
|
||||
func (self *Block) RlpDataForStorage() interface{} {
|
||||
return []interface{}{self.header, self.transactions, self.uncles, self.Td /* TODO receipts */}
|
||||
}
|
||||
|
||||
// Header accessors (add as you need them)
|
||||
func (self *Block) Number() *big.Int { return self.header.Number }
|
||||
func (self *Block) NumberU64() uint64 { return self.header.Number.Uint64() }
|
||||
func (self *Block) MixDigest() common.Hash { return self.header.MixDigest }
|
||||
func (self *Block) Nonce() uint64 {
|
||||
return binary.BigEndian.Uint64(self.header.Nonce[:])
|
||||
}
|
||||
func (self *Block) SetNonce(nonce uint64) {
|
||||
self.header.SetNonce(nonce)
|
||||
}
|
||||
|
||||
func (self *Block) Queued() bool { return self.queued }
|
||||
func (self *Block) SetQueued(q bool) { self.queued = q }
|
||||
|
||||
func (self *Block) Bloom() Bloom { return self.header.Bloom }
|
||||
func (self *Block) Coinbase() common.Address { return self.header.Coinbase }
|
||||
func (self *Block) Time() int64 { return int64(self.header.Time) }
|
||||
func (self *Block) GasLimit() *big.Int { return self.header.GasLimit }
|
||||
func (self *Block) GasUsed() *big.Int { return self.header.GasUsed }
|
||||
func (self *Block) Root() common.Hash { return self.header.Root }
|
||||
func (self *Block) SetRoot(root common.Hash) { self.header.Root = root }
|
||||
func (self *Block) GetTransaction(i int) *Transaction {
|
||||
if len(self.transactions) > i {
|
||||
return self.transactions[i]
|
||||
func (b *Block) Size() common.StorageSize {
|
||||
if size := b.size.Load(); size != nil {
|
||||
return size.(common.StorageSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (self *Block) GetUncle(i int) *Header {
|
||||
if len(self.uncles) > i {
|
||||
return self.uncles[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Block) Size() common.StorageSize {
|
||||
c := writeCounter(0)
|
||||
rlp.Encode(&c, self)
|
||||
rlp.Encode(&c, b)
|
||||
b.size.Store(common.StorageSize(c))
|
||||
return common.StorageSize(c)
|
||||
}
|
||||
|
||||
@ -335,48 +321,37 @@ func (c *writeCounter) Write(b []byte) (int, error) {
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func CalcUncleHash(uncles []*Header) common.Hash {
|
||||
return rlpHash(uncles)
|
||||
}
|
||||
|
||||
// WithMiningResult returns a new block with the data from b
|
||||
// where nonce and mix digest are set to the provided values.
|
||||
func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
|
||||
cpy := *b.header
|
||||
binary.BigEndian.PutUint64(cpy.Nonce[:], nonce)
|
||||
cpy.MixDigest = mixDigest
|
||||
return &Block{
|
||||
header: &cpy,
|
||||
transactions: b.transactions,
|
||||
receipts: b.receipts,
|
||||
uncles: b.uncles,
|
||||
Td: b.Td,
|
||||
}
|
||||
}
|
||||
|
||||
// Implement pow.Block
|
||||
func (self *Block) Difficulty() *big.Int { return self.header.Difficulty }
|
||||
func (self *Block) HashNoNonce() common.Hash { return self.header.HashNoNonce() }
|
||||
|
||||
func (self *Block) Hash() common.Hash {
|
||||
if (self.HeaderHash != common.Hash{}) {
|
||||
return self.HeaderHash
|
||||
} else {
|
||||
return self.header.Hash()
|
||||
func (b *Block) Hash() common.Hash {
|
||||
if hash := b.hash.Load(); hash != nil {
|
||||
return hash.(common.Hash)
|
||||
}
|
||||
v := rlpHash(b.header)
|
||||
b.hash.Store(v)
|
||||
return v
|
||||
}
|
||||
|
||||
func (self *Block) ParentHash() common.Hash {
|
||||
if (self.ParentHeaderHash != common.Hash{}) {
|
||||
return self.ParentHeaderHash
|
||||
} else {
|
||||
return self.header.ParentHash
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Block) Copy() *Block {
|
||||
block := NewBlock(self.header.ParentHash, self.Coinbase(), self.Root(), new(big.Int), self.Nonce(), self.header.Extra)
|
||||
block.header.Bloom = self.header.Bloom
|
||||
block.header.TxHash = self.header.TxHash
|
||||
block.transactions = self.transactions
|
||||
block.header.UncleHash = self.header.UncleHash
|
||||
block.uncles = self.uncles
|
||||
block.header.GasLimit.Set(self.header.GasLimit)
|
||||
block.header.GasUsed.Set(self.header.GasUsed)
|
||||
block.header.ReceiptHash = self.header.ReceiptHash
|
||||
block.header.Difficulty.Set(self.header.Difficulty)
|
||||
block.header.Number.Set(self.header.Number)
|
||||
block.header.Time = self.header.Time
|
||||
block.header.MixDigest = self.header.MixDigest
|
||||
if self.Td != nil {
|
||||
block.Td.Set(self.Td)
|
||||
}
|
||||
|
||||
return block
|
||||
}
|
||||
|
||||
func (self *Block) String() string {
|
||||
func (b *Block) String() string {
|
||||
str := fmt.Sprintf(`Block(#%v): Size: %v TD: %v {
|
||||
MinerHash: %x
|
||||
%v
|
||||
@ -385,20 +360,11 @@ Transactions:
|
||||
Uncles:
|
||||
%v
|
||||
}
|
||||
`, self.Number(), self.Size(), self.Td, self.header.HashNoNonce(), self.header, self.transactions, self.uncles)
|
||||
|
||||
if (self.HeaderHash != common.Hash{}) {
|
||||
str += fmt.Sprintf("\nFake hash = %x", self.HeaderHash)
|
||||
}
|
||||
|
||||
if (self.ParentHeaderHash != common.Hash{}) {
|
||||
str += fmt.Sprintf("\nFake parent hash = %x", self.ParentHeaderHash)
|
||||
}
|
||||
|
||||
`, b.Number(), b.Size(), b.Td, b.header.HashNoNonce(), b.header, b.transactions, b.uncles)
|
||||
return str
|
||||
}
|
||||
|
||||
func (self *Header) String() string {
|
||||
func (h *Header) String() string {
|
||||
return fmt.Sprintf(`Header(%x):
|
||||
[
|
||||
ParentHash: %x
|
||||
@ -414,9 +380,9 @@ func (self *Header) String() string {
|
||||
GasUsed: %v
|
||||
Time: %v
|
||||
Extra: %s
|
||||
MixDigest: %x
|
||||
MixDigest: %x
|
||||
Nonce: %x
|
||||
]`, self.Hash(), self.ParentHash, self.UncleHash, self.Coinbase, self.Root, self.TxHash, self.ReceiptHash, self.Bloom, self.Difficulty, self.Number, self.GasLimit, self.GasUsed, self.Time, self.Extra, self.MixDigest, self.Nonce)
|
||||
]`, h.Hash(), h.ParentHash, h.UncleHash, h.Coinbase, h.Root, h.TxHash, h.ReceiptHash, h.Bloom, h.Difficulty, h.Number, h.GasLimit, h.GasUsed, h.Time, h.Extra, h.MixDigest, h.Nonce)
|
||||
}
|
||||
|
||||
type Blocks []*Block
|
||||
@ -442,4 +408,4 @@ func (self blockSorter) Swap(i, j int) {
|
||||
}
|
||||
func (self blockSorter) Less(i, j int) bool { return self.by(self.blocks[i], self.blocks[j]) }
|
||||
|
||||
func Number(b1, b2 *Block) bool { return b1.Header().Number.Cmp(b2.Header().Number) < 0 }
|
||||
func Number(b1, b2 *Block) bool { return b1.header.Number.Cmp(b2.header.Number) < 0 }
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
// from bcValidBlockTest.json, "SimpleTx"
|
||||
func TestBlockEncoding(t *testing.T) {
|
||||
blockEnc := common.FromHex("f90260f901f9a083cafc574e1f51ba9dc0568fc617a08ea2429fb384059c972f13b19fa1c8dd55a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a05fe50b260da6308036625b850b5d6ced6d0a9f814c0688bc91ffb7b7a3a54b67a0bc37d79753ad738a6dac4921e57392f145d8887476de3f783dfa7edae9283e52b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd8825208845506eb0780a0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff49888a13a5a8c8f2bb1c4f861f85f800a82c35094095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba09bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094fa08a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1c0")
|
||||
|
||||
var block Block
|
||||
if err := rlp.DecodeBytes(blockEnc, &block); err != nil {
|
||||
t.Fatal("decode error: ", err)
|
||||
@ -35,20 +34,10 @@ func TestBlockEncoding(t *testing.T) {
|
||||
check("Time", block.Time(), int64(1426516743))
|
||||
check("Size", block.Size(), common.StorageSize(len(blockEnc)))
|
||||
|
||||
to := common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87")
|
||||
check("Transactions", block.Transactions(), Transactions{
|
||||
{
|
||||
Payload: []byte{},
|
||||
Amount: big.NewInt(10),
|
||||
Price: big.NewInt(10),
|
||||
GasLimit: big.NewInt(50000),
|
||||
AccountNonce: 0,
|
||||
V: 27,
|
||||
R: common.String2Big("0x9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f"),
|
||||
S: common.String2Big("0x8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1"),
|
||||
Recipient: &to,
|
||||
},
|
||||
})
|
||||
tx1 := NewTransaction(0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), big.NewInt(10), big.NewInt(50000), big.NewInt(10), nil)
|
||||
tx1, _ = tx1.WithSignature(common.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100"))
|
||||
check("len(Transactions)", len(block.Transactions()), 1)
|
||||
check("Transactions[0].Hash", block.Transactions()[0].Hash(), tx1.Hash())
|
||||
|
||||
ourBlockEnc, err := rlp.EncodeToBytes(&block)
|
||||
if err != nil {
|
||||
|
@ -4,7 +4,9 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -18,38 +20,63 @@ func IsContractAddr(addr []byte) bool {
|
||||
}
|
||||
|
||||
type Transaction struct {
|
||||
AccountNonce uint64
|
||||
Price *big.Int
|
||||
GasLimit *big.Int
|
||||
Recipient *common.Address `rlp:"nil"` // nil means contract creation
|
||||
Amount *big.Int
|
||||
Payload []byte
|
||||
V byte
|
||||
R, S *big.Int
|
||||
data txdata
|
||||
// caches
|
||||
hash atomic.Value
|
||||
size atomic.Value
|
||||
from atomic.Value
|
||||
}
|
||||
|
||||
func NewContractCreationTx(amount, gasLimit, gasPrice *big.Int, data []byte) *Transaction {
|
||||
return &Transaction{
|
||||
Recipient: nil,
|
||||
Amount: amount,
|
||||
GasLimit: gasLimit,
|
||||
Price: gasPrice,
|
||||
Payload: data,
|
||||
R: new(big.Int),
|
||||
S: new(big.Int),
|
||||
}
|
||||
type txdata struct {
|
||||
AccountNonce uint64
|
||||
Price, GasLimit *big.Int
|
||||
Recipient *common.Address `rlp:"nil"` // nil means contract creation
|
||||
Amount *big.Int
|
||||
Payload []byte
|
||||
V byte // signature
|
||||
R, S *big.Int // signature
|
||||
}
|
||||
|
||||
func NewTransactionMessage(to common.Address, amount, gasAmount, gasPrice *big.Int, data []byte) *Transaction {
|
||||
return &Transaction{
|
||||
Recipient: &to,
|
||||
Amount: amount,
|
||||
GasLimit: gasAmount,
|
||||
Price: gasPrice,
|
||||
Payload: data,
|
||||
R: new(big.Int),
|
||||
S: new(big.Int),
|
||||
func NewContractCreation(nonce uint64, amount, gasLimit, gasPrice *big.Int, data []byte) *Transaction {
|
||||
if len(data) > 0 {
|
||||
data = common.CopyBytes(data)
|
||||
}
|
||||
return &Transaction{data: txdata{
|
||||
AccountNonce: nonce,
|
||||
Recipient: nil,
|
||||
Amount: new(big.Int).Set(amount),
|
||||
GasLimit: new(big.Int).Set(gasLimit),
|
||||
Price: new(big.Int).Set(gasPrice),
|
||||
Payload: data,
|
||||
R: new(big.Int),
|
||||
S: new(big.Int),
|
||||
}}
|
||||
}
|
||||
|
||||
func NewTransaction(nonce uint64, to common.Address, amount, gasLimit, gasPrice *big.Int, data []byte) *Transaction {
|
||||
if len(data) > 0 {
|
||||
data = common.CopyBytes(data)
|
||||
}
|
||||
d := txdata{
|
||||
AccountNonce: nonce,
|
||||
Recipient: &to,
|
||||
Payload: data,
|
||||
Amount: new(big.Int),
|
||||
GasLimit: new(big.Int),
|
||||
Price: new(big.Int),
|
||||
R: new(big.Int),
|
||||
S: new(big.Int),
|
||||
}
|
||||
if amount != nil {
|
||||
d.Amount.Set(amount)
|
||||
}
|
||||
if gasLimit != nil {
|
||||
d.GasLimit.Set(gasLimit)
|
||||
}
|
||||
if gasPrice != nil {
|
||||
d.Price.Set(gasPrice)
|
||||
}
|
||||
return &Transaction{data: d}
|
||||
}
|
||||
|
||||
func NewTransactionFromBytes(data []byte) *Transaction {
|
||||
@ -61,112 +88,128 @@ func NewTransactionFromBytes(data []byte) *Transaction {
|
||||
return tx
|
||||
}
|
||||
|
||||
func (tx *Transaction) Hash() common.Hash {
|
||||
return rlpHash([]interface{}{
|
||||
tx.AccountNonce, tx.Price, tx.GasLimit, tx.Recipient, tx.Amount, tx.Payload,
|
||||
})
|
||||
func (tx *Transaction) EncodeRLP(w io.Writer) error {
|
||||
return rlp.Encode(w, &tx.data)
|
||||
}
|
||||
|
||||
// Size returns the encoded RLP size of tx.
|
||||
func (self *Transaction) Size() common.StorageSize {
|
||||
func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
|
||||
_, size, _ := s.Kind()
|
||||
err := s.Decode(&tx.data)
|
||||
if err == nil {
|
||||
tx.size.Store(common.StorageSize(rlp.ListSize(size)))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx *Transaction) Data() []byte { return common.CopyBytes(tx.data.Payload) }
|
||||
func (tx *Transaction) Gas() *big.Int { return new(big.Int).Set(tx.data.GasLimit) }
|
||||
func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.data.Price) }
|
||||
func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.data.Amount) }
|
||||
func (tx *Transaction) Nonce() uint64 { return tx.data.AccountNonce }
|
||||
|
||||
func (tx *Transaction) To() *common.Address {
|
||||
if tx.data.Recipient == nil {
|
||||
return nil
|
||||
} else {
|
||||
to := *tx.data.Recipient
|
||||
return &to
|
||||
}
|
||||
}
|
||||
|
||||
func (tx *Transaction) Hash() common.Hash {
|
||||
if hash := tx.hash.Load(); hash != nil {
|
||||
return hash.(common.Hash)
|
||||
}
|
||||
v := rlpHash([]interface{}{
|
||||
tx.data.AccountNonce,
|
||||
tx.data.Price,
|
||||
tx.data.GasLimit,
|
||||
tx.data.Recipient,
|
||||
tx.data.Amount,
|
||||
tx.data.Payload,
|
||||
})
|
||||
tx.hash.Store(v)
|
||||
return v
|
||||
}
|
||||
|
||||
func (tx *Transaction) Size() common.StorageSize {
|
||||
if size := tx.size.Load(); size != nil {
|
||||
return size.(common.StorageSize)
|
||||
}
|
||||
c := writeCounter(0)
|
||||
rlp.Encode(&c, self)
|
||||
rlp.Encode(&c, &tx.data)
|
||||
tx.size.Store(common.StorageSize(c))
|
||||
return common.StorageSize(c)
|
||||
}
|
||||
|
||||
func (self *Transaction) Data() []byte {
|
||||
return self.Payload
|
||||
}
|
||||
|
||||
func (self *Transaction) Gas() *big.Int {
|
||||
return self.GasLimit
|
||||
}
|
||||
|
||||
func (self *Transaction) GasPrice() *big.Int {
|
||||
return self.Price
|
||||
}
|
||||
|
||||
func (self *Transaction) Value() *big.Int {
|
||||
return self.Amount
|
||||
}
|
||||
|
||||
func (self *Transaction) Nonce() uint64 {
|
||||
return self.AccountNonce
|
||||
}
|
||||
|
||||
func (self *Transaction) SetNonce(AccountNonce uint64) {
|
||||
self.AccountNonce = AccountNonce
|
||||
}
|
||||
|
||||
func (self *Transaction) From() (common.Address, error) {
|
||||
pubkey, err := self.PublicKey()
|
||||
func (tx *Transaction) From() (common.Address, error) {
|
||||
if from := tx.from.Load(); from != nil {
|
||||
return from.(common.Address), nil
|
||||
}
|
||||
pubkey, err := tx.publicKey()
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
var addr common.Address
|
||||
copy(addr[:], crypto.Sha3(pubkey[1:])[12:])
|
||||
tx.from.Store(addr)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// To returns the recipient of the transaction.
|
||||
// If transaction is a contract creation (with no recipient address)
|
||||
// To returns nil.
|
||||
func (tx *Transaction) To() *common.Address {
|
||||
return tx.Recipient
|
||||
// Cost returns amount + gasprice * gaslimit.
|
||||
func (tx *Transaction) Cost() *big.Int {
|
||||
total := new(big.Int).Mul(tx.data.Price, tx.data.GasLimit)
|
||||
total.Add(total, tx.data.Amount)
|
||||
return total
|
||||
}
|
||||
|
||||
func (tx *Transaction) GetSignatureValues() (v byte, r []byte, s []byte) {
|
||||
v = byte(tx.V)
|
||||
r = common.LeftPadBytes(tx.R.Bytes(), 32)
|
||||
s = common.LeftPadBytes(tx.S.Bytes(), 32)
|
||||
return
|
||||
func (tx *Transaction) SignatureValues() (v byte, r *big.Int, s *big.Int) {
|
||||
return tx.data.V, new(big.Int).Set(tx.data.R), new(big.Int).Set(tx.data.S)
|
||||
}
|
||||
|
||||
func (tx *Transaction) PublicKey() ([]byte, error) {
|
||||
if !crypto.ValidateSignatureValues(tx.V, tx.R, tx.S) {
|
||||
func (tx *Transaction) publicKey() ([]byte, error) {
|
||||
if !crypto.ValidateSignatureValues(tx.data.V, tx.data.R, tx.data.S) {
|
||||
return nil, errors.New("invalid v, r, s values")
|
||||
}
|
||||
|
||||
hash := tx.Hash()
|
||||
v, r, s := tx.GetSignatureValues()
|
||||
sig := append(r, s...)
|
||||
sig = append(sig, v-27)
|
||||
// encode the signature in uncompressed format
|
||||
r, s := tx.data.R.Bytes(), tx.data.S.Bytes()
|
||||
sig := make([]byte, 65)
|
||||
copy(sig[32-len(r):32], r)
|
||||
copy(sig[64-len(s):64], s)
|
||||
sig[64] = tx.data.V - 27
|
||||
|
||||
p, err := crypto.SigToPub(hash[:], sig)
|
||||
// recover the public key from the signature
|
||||
hash := tx.Hash()
|
||||
pub, err := crypto.Ecrecover(hash[:], sig)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Could not get pubkey from signature: ", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubkey := crypto.FromECDSAPub(p)
|
||||
if len(pubkey) == 0 || pubkey[0] != 4 {
|
||||
if len(pub) == 0 || pub[0] != 4 {
|
||||
return nil, errors.New("invalid public key")
|
||||
}
|
||||
return pubkey, nil
|
||||
return pub, nil
|
||||
}
|
||||
|
||||
func (tx *Transaction) SetSignatureValues(sig []byte) error {
|
||||
tx.R = common.Bytes2Big(sig[:32])
|
||||
tx.S = common.Bytes2Big(sig[32:64])
|
||||
tx.V = sig[64] + 27
|
||||
return nil
|
||||
func (tx *Transaction) WithSignature(sig []byte) (*Transaction, error) {
|
||||
if len(sig) != 65 {
|
||||
panic(fmt.Sprintf("wrong size for signature: got %d, want 65", len(sig)))
|
||||
}
|
||||
cpy := &Transaction{data: tx.data}
|
||||
cpy.data.R = new(big.Int).SetBytes(sig[:32])
|
||||
cpy.data.S = new(big.Int).SetBytes(sig[32:64])
|
||||
cpy.data.V = sig[64] + 27
|
||||
return cpy, nil
|
||||
}
|
||||
|
||||
func (tx *Transaction) SignECDSA(prv *ecdsa.PrivateKey) error {
|
||||
func (tx *Transaction) SignECDSA(prv *ecdsa.PrivateKey) (*Transaction, error) {
|
||||
h := tx.Hash()
|
||||
sig, err := crypto.Sign(h[:], prv)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
tx.SetSignatureValues(sig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: remove
|
||||
func (tx *Transaction) RlpData() interface{} {
|
||||
data := []interface{}{tx.AccountNonce, tx.Price, tx.GasLimit, tx.Recipient, tx.Amount, tx.Payload}
|
||||
return append(data, tx.V, tx.R.Bytes(), tx.S.Bytes())
|
||||
return tx.WithSignature(sig)
|
||||
}
|
||||
|
||||
func (tx *Transaction) String() string {
|
||||
@ -176,12 +219,12 @@ func (tx *Transaction) String() string {
|
||||
} else {
|
||||
from = fmt.Sprintf("%x", f[:])
|
||||
}
|
||||
if t := tx.To(); t == nil {
|
||||
if tx.data.Recipient == nil {
|
||||
to = "[contract creation]"
|
||||
} else {
|
||||
to = fmt.Sprintf("%x", t[:])
|
||||
to = fmt.Sprintf("%x", tx.data.Recipient[:])
|
||||
}
|
||||
enc, _ := rlp.EncodeToBytes(tx)
|
||||
enc, _ := rlp.EncodeToBytes(&tx.data)
|
||||
return fmt.Sprintf(`
|
||||
TX(%x)
|
||||
Contract: %v
|
||||
@ -198,36 +241,24 @@ func (tx *Transaction) String() string {
|
||||
Hex: %x
|
||||
`,
|
||||
tx.Hash(),
|
||||
len(tx.Recipient) == 0,
|
||||
len(tx.data.Recipient) == 0,
|
||||
from,
|
||||
to,
|
||||
tx.AccountNonce,
|
||||
tx.Price,
|
||||
tx.GasLimit,
|
||||
tx.Amount,
|
||||
tx.Payload,
|
||||
tx.V,
|
||||
tx.R,
|
||||
tx.S,
|
||||
tx.data.AccountNonce,
|
||||
tx.data.Price,
|
||||
tx.data.GasLimit,
|
||||
tx.data.Amount,
|
||||
tx.data.Payload,
|
||||
tx.data.V,
|
||||
tx.data.R,
|
||||
tx.data.S,
|
||||
enc,
|
||||
)
|
||||
}
|
||||
|
||||
// Transaction slice type for basic sorting
|
||||
// Transaction slice type for basic sorting.
|
||||
type Transactions []*Transaction
|
||||
|
||||
// TODO: remove
|
||||
func (self Transactions) RlpData() interface{} {
|
||||
// Marshal the transactions of this block
|
||||
enc := make([]interface{}, len(self))
|
||||
for i, tx := range self {
|
||||
// Cast it to a string (safe)
|
||||
enc[i] = tx.RlpData()
|
||||
}
|
||||
|
||||
return enc
|
||||
}
|
||||
|
||||
func (s Transactions) Len() int { return len(s) }
|
||||
func (s Transactions) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
@ -239,5 +270,5 @@ func (s Transactions) GetRlp(i int) []byte {
|
||||
type TxByNonce struct{ Transactions }
|
||||
|
||||
func (s TxByNonce) Less(i, j int) bool {
|
||||
return s.Transactions[i].AccountNonce < s.Transactions[j].AccountNonce
|
||||
return s.Transactions[i].data.AccountNonce < s.Transactions[j].data.AccountNonce
|
||||
}
|
||||
|
@ -15,40 +15,35 @@ import (
|
||||
// at github.com/ethereum/tests.
|
||||
|
||||
var (
|
||||
emptyTx = NewTransactionMessage(
|
||||
emptyTx = NewTransaction(
|
||||
0,
|
||||
common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
|
||||
big.NewInt(0), big.NewInt(0), big.NewInt(0),
|
||||
nil,
|
||||
)
|
||||
|
||||
rightvrsRecipient = common.HexToAddress("b94f5374fce5edbc8e2a8697c15331677e6ebf0b")
|
||||
rightvrsTx = &Transaction{
|
||||
Recipient: &rightvrsRecipient,
|
||||
AccountNonce: 3,
|
||||
Price: big.NewInt(1),
|
||||
GasLimit: big.NewInt(2000),
|
||||
Amount: big.NewInt(10),
|
||||
Payload: common.FromHex("5544"),
|
||||
V: 28,
|
||||
R: common.String2Big("0x98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a"),
|
||||
S: common.String2Big("0x8887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3"),
|
||||
}
|
||||
rightvrsTx, _ = NewTransaction(
|
||||
3,
|
||||
common.HexToAddress("b94f5374fce5edbc8e2a8697c15331677e6ebf0b"),
|
||||
big.NewInt(10),
|
||||
big.NewInt(2000),
|
||||
big.NewInt(1),
|
||||
common.FromHex("5544"),
|
||||
).WithSignature(
|
||||
common.Hex2Bytes("98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a8887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a301"),
|
||||
)
|
||||
)
|
||||
|
||||
func TestTransactionHash(t *testing.T) {
|
||||
// "EmptyTransaction"
|
||||
if emptyTx.Hash() != common.HexToHash("c775b99e7ad12f50d819fcd602390467e28141316969f4b57f0626f74fe3b386") {
|
||||
t.Errorf("empty transaction hash mismatch, got %x", emptyTx.Hash())
|
||||
}
|
||||
|
||||
// "RightVRSTest"
|
||||
if rightvrsTx.Hash() != common.HexToHash("fe7a79529ed5f7c3375d06b26b186a8644e0e16c373d7a12be41c62d6042b77a") {
|
||||
t.Errorf("RightVRS transaction hash mismatch, got %x", rightvrsTx.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransactionEncode(t *testing.T) {
|
||||
// "RightVRSTest"
|
||||
txb, err := rlp.EncodeToBytes(rightvrsTx)
|
||||
if err != nil {
|
||||
t.Fatalf("encode error: %v", err)
|
||||
@ -72,19 +67,16 @@ func defaultTestKey() (*ecdsa.PrivateKey, common.Address) {
|
||||
|
||||
func TestRecipientEmpty(t *testing.T) {
|
||||
_, addr := defaultTestKey()
|
||||
|
||||
tx, err := decodeTx(common.Hex2Bytes("f8498080808080011ca09b16de9d5bdee2cf56c28d16275a4da68cd30273e2525f3959f5d62557489921a0372ebd8fb3345f7db7b5a86d42e24d36e983e259b0664ceb8c227ec9af572f3d"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
from, err := tx.From()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if addr != from {
|
||||
t.Error("derived address doesn't match")
|
||||
}
|
||||
|
@ -10,32 +10,32 @@ import (
|
||||
)
|
||||
|
||||
type VMEnv struct {
|
||||
state *state.StateDB
|
||||
block *types.Block
|
||||
msg Message
|
||||
depth int
|
||||
chain *ChainManager
|
||||
typ vm.Type
|
||||
state *state.StateDB
|
||||
header *types.Header
|
||||
msg Message
|
||||
depth int
|
||||
chain *ChainManager
|
||||
typ vm.Type
|
||||
// structured logging
|
||||
logs []vm.StructLog
|
||||
}
|
||||
|
||||
func NewEnv(state *state.StateDB, chain *ChainManager, msg Message, block *types.Block) *VMEnv {
|
||||
func NewEnv(state *state.StateDB, chain *ChainManager, msg Message, header *types.Header) *VMEnv {
|
||||
return &VMEnv{
|
||||
chain: chain,
|
||||
state: state,
|
||||
block: block,
|
||||
msg: msg,
|
||||
typ: vm.StdVmTy,
|
||||
chain: chain,
|
||||
state: state,
|
||||
header: header,
|
||||
msg: msg,
|
||||
typ: vm.StdVmTy,
|
||||
}
|
||||
}
|
||||
|
||||
func (self *VMEnv) Origin() common.Address { f, _ := self.msg.From(); return f }
|
||||
func (self *VMEnv) BlockNumber() *big.Int { return self.block.Number() }
|
||||
func (self *VMEnv) Coinbase() common.Address { return self.block.Coinbase() }
|
||||
func (self *VMEnv) Time() int64 { return self.block.Time() }
|
||||
func (self *VMEnv) Difficulty() *big.Int { return self.block.Difficulty() }
|
||||
func (self *VMEnv) GasLimit() *big.Int { return self.block.GasLimit() }
|
||||
func (self *VMEnv) BlockNumber() *big.Int { return self.header.Number }
|
||||
func (self *VMEnv) Coinbase() common.Address { return self.header.Coinbase }
|
||||
func (self *VMEnv) Time() int64 { return int64(self.header.Time) }
|
||||
func (self *VMEnv) Difficulty() *big.Int { return self.header.Difficulty }
|
||||
func (self *VMEnv) GasLimit() *big.Int { return self.header.GasLimit }
|
||||
func (self *VMEnv) Value() *big.Int { return self.msg.Value() }
|
||||
func (self *VMEnv) State() *state.StateDB { return self.state }
|
||||
func (self *VMEnv) Depth() int { return self.depth }
|
||||
|
@ -1,7 +1,7 @@
|
||||
package downloader
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
@ -12,58 +12,47 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
var (
|
||||
knownHash = common.Hash{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
||||
unknownHash = common.Hash{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}
|
||||
bannedHash = common.Hash{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}
|
||||
|
||||
genesis = createBlock(1, common.Hash{}, knownHash)
|
||||
testdb, _ = ethdb.NewMemDatabase()
|
||||
genesis = core.GenesisBlockForTesting(testdb, common.Address{}, big.NewInt(0))
|
||||
)
|
||||
|
||||
// idCounter is used by the createHashes method the generate deterministic but unique hashes
|
||||
var idCounter = int64(2) // #1 is the genesis block
|
||||
|
||||
// createHashes generates a batch of hashes rooted at a specific point in the chain.
|
||||
func createHashes(amount int, root common.Hash) (hashes []common.Hash) {
|
||||
hashes = make([]common.Hash, amount+1)
|
||||
hashes[len(hashes)-1] = root
|
||||
|
||||
for i := 0; i < len(hashes)-1; i++ {
|
||||
binary.BigEndian.PutUint64(hashes[i][:8], uint64(idCounter))
|
||||
idCounter++
|
||||
// makeChain creates a chain of n blocks starting at and including
|
||||
// parent. the returned hash chain is ordered head->parent.
|
||||
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
|
||||
blocks := core.GenerateChain(parent, testdb, n, func(i int, gen *core.BlockGen) {
|
||||
gen.SetCoinbase(common.Address{seed})
|
||||
})
|
||||
hashes := make([]common.Hash, n+1)
|
||||
hashes[len(hashes)-1] = parent.Hash()
|
||||
blockm := make(map[common.Hash]*types.Block, n+1)
|
||||
blockm[parent.Hash()] = parent
|
||||
for i, b := range blocks {
|
||||
hashes[len(hashes)-i-2] = b.Hash()
|
||||
blockm[b.Hash()] = b
|
||||
}
|
||||
return
|
||||
return hashes, blockm
|
||||
}
|
||||
|
||||
// createBlock assembles a new block at the given chain height.
|
||||
func createBlock(i int, parent, hash common.Hash) *types.Block {
|
||||
header := &types.Header{Number: big.NewInt(int64(i))}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
block.HeaderHash = hash
|
||||
block.ParentHeaderHash = parent
|
||||
return block
|
||||
}
|
||||
|
||||
// copyBlock makes a deep copy of a block suitable for local modifications.
|
||||
func copyBlock(block *types.Block) *types.Block {
|
||||
return createBlock(int(block.Number().Int64()), block.ParentHeaderHash, block.HeaderHash)
|
||||
}
|
||||
|
||||
// createBlocksFromHashes assembles a collection of blocks, each having a correct
|
||||
// place in the given hash chain.
|
||||
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
|
||||
blocks := make(map[common.Hash]*types.Block)
|
||||
for i := 0; i < len(hashes); i++ {
|
||||
parent := knownHash
|
||||
if i < len(hashes)-1 {
|
||||
parent = hashes[i+1]
|
||||
}
|
||||
blocks[hashes[i]] = createBlock(len(hashes)-i, parent, hashes[i])
|
||||
// makeChainFork creates two chains of length n, such that h1[:f] and
|
||||
// h2[:f] are different but have a common suffix of length n-f.
|
||||
func makeChainFork(n, f int, parent *types.Block) (h1, h2 []common.Hash, b1, b2 map[common.Hash]*types.Block) {
|
||||
// Create the common suffix.
|
||||
h, b := makeChain(n-f-1, 0, parent)
|
||||
// Create the forks.
|
||||
h1, b1 = makeChain(f, 1, b[h[0]])
|
||||
h1 = append(h1, h[1:]...)
|
||||
h2, b2 = makeChain(f, 2, b[h[0]])
|
||||
h2 = append(h2, h[1:]...)
|
||||
for hash, block := range b {
|
||||
b1[hash] = block
|
||||
b2[hash] = block
|
||||
}
|
||||
return blocks
|
||||
return h1, h2, b1, b2
|
||||
}
|
||||
|
||||
// downloadTester is a test simulator for mocking out local block chain.
|
||||
@ -81,8 +70,8 @@ type downloadTester struct {
|
||||
// newTester creates a new downloader test mocker.
|
||||
func newTester() *downloadTester {
|
||||
tester := &downloadTester{
|
||||
ownHashes: []common.Hash{knownHash},
|
||||
ownBlocks: map[common.Hash]*types.Block{knownHash: genesis},
|
||||
ownHashes: []common.Hash{genesis.Hash()},
|
||||
ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
peerHashes: make(map[string][]common.Hash),
|
||||
peerBlocks: make(map[string]map[common.Hash]*types.Block),
|
||||
}
|
||||
@ -136,10 +125,9 @@ func (dl *downloadTester) newSlowPeer(id string, hashes []common.Hash, blocks ma
|
||||
// Assign the owned hashes and blocks to the peer (deep copy)
|
||||
dl.peerHashes[id] = make([]common.Hash, len(hashes))
|
||||
copy(dl.peerHashes[id], hashes)
|
||||
|
||||
dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
|
||||
for hash, block := range blocks {
|
||||
dl.peerBlocks[id][hash] = copyBlock(block)
|
||||
dl.peerBlocks[id][hash] = block
|
||||
}
|
||||
}
|
||||
return err
|
||||
@ -210,8 +198,7 @@ func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([
|
||||
func TestSynchronisation(t *testing.T) {
|
||||
// Create a small enough block chain to download and the tester
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes := createHashes(targetBlocks, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", hashes, blocks)
|
||||
@ -242,8 +229,7 @@ func TestInactiveDownloader(t *testing.T) {
|
||||
func TestCancel(t *testing.T) {
|
||||
// Create a small enough block chain to download and the tester
|
||||
targetBlocks := blockCacheLimit - 15
|
||||
hashes := createHashes(targetBlocks, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", hashes, blocks)
|
||||
@ -270,8 +256,7 @@ func TestCancel(t *testing.T) {
|
||||
func TestThrottling(t *testing.T) {
|
||||
// Create a long block chain to download and the tester
|
||||
targetBlocks := 8 * blockCacheLimit
|
||||
hashes := createHashes(targetBlocks, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
tester.newPeer("peer", hashes, blocks)
|
||||
@ -327,9 +312,7 @@ func TestMultiSynchronisation(t *testing.T) {
|
||||
// Create various peers with various parts of the chain
|
||||
targetPeers := 16
|
||||
targetBlocks := targetPeers*blockCacheLimit - 15
|
||||
|
||||
hashes := createHashes(targetBlocks, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
for i := 0; i < targetPeers; i++ {
|
||||
@ -362,9 +345,7 @@ func TestSlowSynchronisation(t *testing.T) {
|
||||
targetCycles := 2
|
||||
targetBlocks := targetCycles*blockCacheLimit - 15
|
||||
targetIODelay := time.Second
|
||||
|
||||
hashes := createHashes(targetBlocks, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester.newSlowPeer("fast", hashes, blocks, 0)
|
||||
tester.newSlowPeer("slow", hashes, blocks, targetIODelay)
|
||||
@ -389,14 +370,12 @@ func TestSlowSynchronisation(t *testing.T) {
|
||||
func TestNonExistingParentAttack(t *testing.T) {
|
||||
tester := newTester()
|
||||
|
||||
// Forge a single-link chain with a forged header
|
||||
hashes := createHashes(1, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(1, 0, genesis)
|
||||
tester.newPeer("valid", hashes, blocks)
|
||||
|
||||
hashes = createHashes(1, knownHash)
|
||||
blocks = createBlocksFromHashes(hashes)
|
||||
blocks[hashes[0]].ParentHeaderHash = unknownHash
|
||||
wrongblock := types.NewBlock(&types.Header{}, nil, nil, nil)
|
||||
wrongblock.Td = blocks[hashes[0]].Td
|
||||
hashes, blocks = makeChain(1, 0, wrongblock)
|
||||
tester.newPeer("attack", hashes, blocks)
|
||||
|
||||
// Try and sync with the malicious node and check that it fails
|
||||
@ -421,8 +400,7 @@ func TestRepeatingHashAttack(t *testing.T) { // TODO: Is this thing valid??
|
||||
tester := newTester()
|
||||
|
||||
// Create a valid chain, but drop the last link
|
||||
hashes := createHashes(blockCacheLimit, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(blockCacheLimit, 0, genesis)
|
||||
tester.newPeer("valid", hashes, blocks)
|
||||
tester.newPeer("attack", hashes[:len(hashes)-1], blocks)
|
||||
|
||||
@ -452,11 +430,10 @@ func TestNonExistingBlockAttack(t *testing.T) {
|
||||
tester := newTester()
|
||||
|
||||
// Create a valid chain, but forge the last link
|
||||
hashes := createHashes(blockCacheLimit, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(blockCacheLimit, 0, genesis)
|
||||
tester.newPeer("valid", hashes, blocks)
|
||||
|
||||
hashes[len(hashes)/2] = unknownHash
|
||||
hashes[len(hashes)/2] = common.Hash{}
|
||||
tester.newPeer("attack", hashes, blocks)
|
||||
|
||||
// Try and sync with the malicious node and check that it fails
|
||||
@ -475,8 +452,7 @@ func TestInvalidHashOrderAttack(t *testing.T) {
|
||||
tester := newTester()
|
||||
|
||||
// Create a valid long chain, but reverse some hashes within
|
||||
hashes := createHashes(4*blockCacheLimit, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(4*blockCacheLimit, 0, genesis)
|
||||
tester.newPeer("valid", hashes, blocks)
|
||||
|
||||
chunk1 := make([]common.Hash, blockCacheLimit)
|
||||
@ -506,11 +482,15 @@ func TestMadeupHashChainAttack(t *testing.T) {
|
||||
crossCheckCycle = 25 * time.Millisecond
|
||||
|
||||
// Create a long chain of hashes without backing blocks
|
||||
hashes := createHashes(4*blockCacheLimit, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(4*blockCacheLimit, 0, genesis)
|
||||
|
||||
randomHashes := make([]common.Hash, 1024*blockCacheLimit)
|
||||
for i := range randomHashes {
|
||||
rand.Read(randomHashes[i][:])
|
||||
}
|
||||
|
||||
tester.newPeer("valid", hashes, blocks)
|
||||
tester.newPeer("attack", createHashes(1024*blockCacheLimit, knownHash), nil)
|
||||
tester.newPeer("attack", randomHashes, nil)
|
||||
|
||||
// Try and sync with the malicious node and check that it fails
|
||||
if err := tester.sync("attack"); err != errCrossCheckFailed {
|
||||
@ -528,12 +508,16 @@ func TestMadeupHashChainAttack(t *testing.T) {
|
||||
// one by one prevents reliable block/parent verification.
|
||||
func TestMadeupHashChainDrippingAttack(t *testing.T) {
|
||||
// Create a random chain of hashes to drip
|
||||
hashes := createHashes(16*blockCacheLimit, knownHash)
|
||||
randomHashes := make([]common.Hash, 16*blockCacheLimit)
|
||||
for i := range randomHashes {
|
||||
rand.Read(randomHashes[i][:])
|
||||
}
|
||||
randomHashes[len(randomHashes)-1] = genesis.Hash()
|
||||
tester := newTester()
|
||||
|
||||
// Try and sync with the attacker, one hash at a time
|
||||
tester.maxHashFetch = 1
|
||||
tester.newPeer("attack", hashes, nil)
|
||||
tester.newPeer("attack", randomHashes, nil)
|
||||
if err := tester.sync("attack"); err != errStallingPeer {
|
||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
|
||||
}
|
||||
@ -549,9 +533,7 @@ func TestMadeupBlockChainAttack(t *testing.T) {
|
||||
crossCheckCycle = 25 * time.Millisecond
|
||||
|
||||
// Create a long chain of blocks and simulate an invalid chain by dropping every second
|
||||
hashes := createHashes(16*blockCacheLimit, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
|
||||
hashes, blocks := makeChain(16*blockCacheLimit, 0, genesis)
|
||||
gapped := make([]common.Hash, len(hashes)/2)
|
||||
for i := 0; i < len(gapped); i++ {
|
||||
gapped[i] = hashes[2*i]
|
||||
@ -572,65 +554,26 @@ func TestMadeupBlockChainAttack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Advanced form of the above forged blockchain attack, where not only does the
|
||||
// attacker make up a valid hashes for random blocks, but also forges the block
|
||||
// parents to point to existing hashes.
|
||||
func TestMadeupParentBlockChainAttack(t *testing.T) {
|
||||
tester := newTester()
|
||||
|
||||
defaultBlockTTL := blockSoftTTL
|
||||
defaultCrossCheckCycle := crossCheckCycle
|
||||
|
||||
blockSoftTTL = 100 * time.Millisecond
|
||||
crossCheckCycle = 25 * time.Millisecond
|
||||
|
||||
// Create a long chain of blocks and simulate an invalid chain by dropping every second
|
||||
hashes := createHashes(16*blockCacheLimit, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
tester.newPeer("valid", hashes, blocks)
|
||||
|
||||
for _, block := range blocks {
|
||||
block.ParentHeaderHash = knownHash // Simulate pointing to already known hash
|
||||
}
|
||||
tester.newPeer("attack", hashes, blocks)
|
||||
|
||||
// Try and sync with the malicious node and check that it fails
|
||||
if err := tester.sync("attack"); err != errCrossCheckFailed {
|
||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
||||
}
|
||||
// Ensure that a valid chain can still pass sync
|
||||
blockSoftTTL = defaultBlockTTL
|
||||
crossCheckCycle = defaultCrossCheckCycle
|
||||
|
||||
if err := tester.sync("valid"); err != nil {
|
||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that if one/multiple malicious peers try to feed a banned blockchain to
|
||||
// tests that if one/multiple malicious peers try to feed a banned blockchain to
|
||||
// the downloader, it will not keep refetching the same chain indefinitely, but
|
||||
// gradually block pieces of it, until it's head is also blocked.
|
||||
// gradually block pieces of it, until its head is also blocked.
|
||||
func TestBannedChainStarvationAttack(t *testing.T) {
|
||||
// Create the tester and ban the selected hash
|
||||
n := 8 * blockCacheLimit
|
||||
fork := n/2 - 23
|
||||
hashes, forkHashes, blocks, forkBlocks := makeChainFork(n, fork, genesis)
|
||||
|
||||
// Create the tester and ban the selected hash.
|
||||
tester := newTester()
|
||||
tester.downloader.banned.Add(bannedHash)
|
||||
|
||||
// Construct a valid chain, for it and ban the fork
|
||||
hashes := createHashes(8*blockCacheLimit, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
tester.downloader.banned.Add(forkHashes[fork-1])
|
||||
tester.newPeer("valid", hashes, blocks)
|
||||
|
||||
fork := len(hashes)/2 - 23
|
||||
hashes = append(createHashes(4*blockCacheLimit, bannedHash), hashes[fork:]...)
|
||||
blocks = createBlocksFromHashes(hashes)
|
||||
tester.newPeer("attack", hashes, blocks)
|
||||
tester.newPeer("attack", forkHashes, forkBlocks)
|
||||
|
||||
// Iteratively try to sync, and verify that the banned hash list grows until
|
||||
// the head of the invalid chain is blocked too.
|
||||
for banned := tester.downloader.banned.Size(); ; {
|
||||
// Try to sync with the attacker, check hash chain failure
|
||||
if err := tester.sync("attack"); err != errInvalidChain {
|
||||
if tester.downloader.banned.Has(hashes[0]) && err == errBannedHead {
|
||||
if tester.downloader.banned.Has(forkHashes[0]) && err == errBannedHead {
|
||||
break
|
||||
}
|
||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
|
||||
@ -643,7 +586,7 @@ func TestBannedChainStarvationAttack(t *testing.T) {
|
||||
banned = bans
|
||||
}
|
||||
// Check that after banning an entire chain, bad peers get dropped
|
||||
if err := tester.newPeer("new attacker", hashes, blocks); err != errBannedHead {
|
||||
if err := tester.newPeer("new attacker", forkHashes, forkBlocks); err != errBannedHead {
|
||||
t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead)
|
||||
}
|
||||
if peer := tester.downloader.peers.Peer("new attacker"); peer != nil {
|
||||
@ -659,9 +602,14 @@ func TestBannedChainStarvationAttack(t *testing.T) {
|
||||
// gradually banned, it will have an upper limit on the consumed memory and also
|
||||
// the origin bad hashes will not be evacuated.
|
||||
func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
|
||||
// Create the tester and ban the selected hash
|
||||
// Construct a banned chain with more chunks than the ban limit
|
||||
n := 8 * blockCacheLimit
|
||||
fork := n/2 - 23
|
||||
hashes, forkHashes, blocks, forkBlocks := makeChainFork(n, fork, genesis)
|
||||
|
||||
// Create the tester and ban the root hash of the fork.
|
||||
tester := newTester()
|
||||
tester.downloader.banned.Add(bannedHash)
|
||||
tester.downloader.banned.Add(forkHashes[fork-1])
|
||||
|
||||
// Reduce the test size a bit
|
||||
defaultMaxBlockFetch := MaxBlockFetch
|
||||
@ -670,15 +618,8 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
|
||||
MaxBlockFetch = 4
|
||||
maxBannedHashes = 256
|
||||
|
||||
// Construct a banned chain with more chunks than the ban limit
|
||||
hashes := createHashes(8*blockCacheLimit, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
tester.newPeer("valid", hashes, blocks)
|
||||
|
||||
fork := len(hashes)/2 - 23
|
||||
hashes = append(createHashes(maxBannedHashes*MaxBlockFetch, bannedHash), hashes[fork:]...)
|
||||
blocks = createBlocksFromHashes(hashes)
|
||||
tester.newPeer("attack", hashes, blocks)
|
||||
tester.newPeer("attack", forkHashes, forkBlocks)
|
||||
|
||||
// Iteratively try to sync, and verify that the banned hash list grows until
|
||||
// the head of the invalid chain is blocked too.
|
||||
@ -687,8 +628,8 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
|
||||
if err := tester.sync("attack"); err != errInvalidChain {
|
||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
|
||||
}
|
||||
// Short circuit if the entire chain was banned
|
||||
if tester.downloader.banned.Has(hashes[0]) {
|
||||
// Short circuit if the entire chain was banned.
|
||||
if tester.downloader.banned.Has(forkHashes[0]) {
|
||||
break
|
||||
}
|
||||
// Otherwise ensure we never exceed the memory allowance and the hard coded bans are untouched
|
||||
@ -719,8 +660,7 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
|
||||
func TestOverlappingDeliveryAttack(t *testing.T) {
|
||||
// Create an arbitrary batch of blocks ( < cache-size not to block)
|
||||
targetBlocks := blockCacheLimit - 23
|
||||
hashes := createHashes(targetBlocks, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
// Register an attacker that always returns non-requested blocks too
|
||||
tester := newTester()
|
||||
@ -772,7 +712,7 @@ func TestHashAttackerDropping(t *testing.T) {
|
||||
for i, tt := range tests {
|
||||
// Register a new peer and ensure it's presence
|
||||
id := fmt.Sprintf("test %d", i)
|
||||
if err := tester.newPeer(id, []common.Hash{knownHash}, nil); err != nil {
|
||||
if err := tester.newPeer(id, []common.Hash{genesis.Hash()}, nil); err != nil {
|
||||
t.Fatalf("test %d: failed to register new peer: %v", i, err)
|
||||
}
|
||||
if _, ok := tester.peerHashes[id]; !ok {
|
||||
@ -781,7 +721,7 @@ func TestHashAttackerDropping(t *testing.T) {
|
||||
// Simulate a synchronisation and check the required result
|
||||
tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
|
||||
|
||||
tester.downloader.Synchronise(id, knownHash)
|
||||
tester.downloader.Synchronise(id, genesis.Hash())
|
||||
if _, ok := tester.peerHashes[id]; !ok != tt.drop {
|
||||
t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
|
||||
}
|
||||
@ -794,7 +734,10 @@ func TestBlockAttackerDropping(t *testing.T) {
|
||||
tests := []struct {
|
||||
failure bool
|
||||
drop bool
|
||||
}{{true, true}, {false, false}}
|
||||
}{
|
||||
{true, true},
|
||||
{false, false},
|
||||
}
|
||||
|
||||
// Run the tests and check disconnection status
|
||||
tester := newTester()
|
||||
@ -808,9 +751,10 @@ func TestBlockAttackerDropping(t *testing.T) {
|
||||
t.Fatalf("test %d: registered peer not found", i)
|
||||
}
|
||||
// Assemble a good or bad block, depending of the test
|
||||
raw := createBlock(1, knownHash, common.Hash{})
|
||||
raw := core.GenerateChain(genesis, testdb, 1, nil)[0]
|
||||
if tt.failure {
|
||||
raw = createBlock(1, unknownHash, common.Hash{})
|
||||
parent := types.NewBlock(&types.Header{}, nil, nil, nil)
|
||||
raw = core.GenerateChain(parent, testdb, 1, nil)[0]
|
||||
}
|
||||
block := &Block{OriginPeer: id, RawBlock: raw}
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
package fetcher
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/big"
|
||||
"sync"
|
||||
@ -10,58 +9,32 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
)
|
||||
|
||||
var (
|
||||
knownHash = common.Hash{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
||||
unknownHash = common.Hash{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}
|
||||
bannedHash = common.Hash{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}
|
||||
|
||||
genesis = createBlock(1, common.Hash{}, knownHash)
|
||||
testdb, _ = ethdb.NewMemDatabase()
|
||||
genesis = core.GenesisBlockForTesting(testdb, common.Address{}, big.NewInt(0))
|
||||
unknownBlock = types.NewBlock(&types.Header{}, nil, nil, nil)
|
||||
)
|
||||
|
||||
// idCounter is used by the createHashes method the generate deterministic but unique hashes
|
||||
var idCounter = int64(2) // #1 is the genesis block
|
||||
|
||||
// createHashes generates a batch of hashes rooted at a specific point in the chain.
|
||||
func createHashes(amount int, root common.Hash) (hashes []common.Hash) {
|
||||
hashes = make([]common.Hash, amount+1)
|
||||
hashes[len(hashes)-1] = root
|
||||
|
||||
for i := 0; i < len(hashes)-1; i++ {
|
||||
binary.BigEndian.PutUint64(hashes[i][:8], uint64(idCounter))
|
||||
idCounter++
|
||||
// makeChain creates a chain of n blocks starting at and including parent.
|
||||
// the returned hash chain is ordered head->parent.
|
||||
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
|
||||
blocks := core.GenerateChain(parent, testdb, n, func(i int, gen *core.BlockGen) {
|
||||
gen.SetCoinbase(common.Address{seed})
|
||||
})
|
||||
hashes := make([]common.Hash, n+1)
|
||||
hashes[len(hashes)-1] = parent.Hash()
|
||||
blockm := make(map[common.Hash]*types.Block, n+1)
|
||||
blockm[parent.Hash()] = parent
|
||||
for i, b := range blocks {
|
||||
hashes[len(hashes)-i-2] = b.Hash()
|
||||
blockm[b.Hash()] = b
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// createBlock assembles a new block at the given chain height.
|
||||
func createBlock(i int, parent, hash common.Hash) *types.Block {
|
||||
header := &types.Header{Number: big.NewInt(int64(i))}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
block.HeaderHash = hash
|
||||
block.ParentHeaderHash = parent
|
||||
return block
|
||||
}
|
||||
|
||||
// copyBlock makes a deep copy of a block suitable for local modifications.
|
||||
func copyBlock(block *types.Block) *types.Block {
|
||||
return createBlock(int(block.Number().Int64()), block.ParentHeaderHash, block.HeaderHash)
|
||||
}
|
||||
|
||||
// createBlocksFromHashes assembles a collection of blocks, each having a correct
|
||||
// place in the given hash chain.
|
||||
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
|
||||
blocks := make(map[common.Hash]*types.Block)
|
||||
for i := 0; i < len(hashes); i++ {
|
||||
parent := knownHash
|
||||
if i < len(hashes)-1 {
|
||||
parent = hashes[i+1]
|
||||
}
|
||||
blocks[hashes[i]] = createBlock(len(hashes)-i, parent, hashes[i])
|
||||
}
|
||||
return blocks
|
||||
return hashes, blockm
|
||||
}
|
||||
|
||||
// fetcherTester is a test simulator for mocking out local block chain.
|
||||
@ -77,8 +50,8 @@ type fetcherTester struct {
|
||||
// newTester creates a new fetcher test mocker.
|
||||
func newTester() *fetcherTester {
|
||||
tester := &fetcherTester{
|
||||
hashes: []common.Hash{knownHash},
|
||||
blocks: map[common.Hash]*types.Block{knownHash: genesis},
|
||||
hashes: []common.Hash{genesis.Hash()},
|
||||
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||
}
|
||||
tester.fetcher = New(tester.getBlock, tester.verifyBlock, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
||||
tester.fetcher.Start()
|
||||
@ -138,10 +111,9 @@ func (f *fetcherTester) dropPeer(peer string) {
|
||||
|
||||
// peerFetcher retrieves a fetcher associated with a simulated peer.
|
||||
func (f *fetcherTester) makeFetcher(blocks map[common.Hash]*types.Block) blockRequesterFn {
|
||||
// Copy all the blocks to ensure they are not tampered with
|
||||
closure := make(map[common.Hash]*types.Block)
|
||||
for hash, block := range blocks {
|
||||
closure[hash] = copyBlock(block)
|
||||
closure[hash] = block
|
||||
}
|
||||
// Create a function that returns blocks from the closure
|
||||
return func(hashes []common.Hash) error {
|
||||
@ -195,8 +167,7 @@ func verifyImportDone(t *testing.T, imported chan *types.Block) {
|
||||
func TestSequentialAnnouncements(t *testing.T) {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 4 * hashLimit
|
||||
hashes := createHashes(targetBlocks, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
@ -217,8 +188,7 @@ func TestSequentialAnnouncements(t *testing.T) {
|
||||
func TestConcurrentAnnouncements(t *testing.T) {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 4 * hashLimit
|
||||
hashes := createHashes(targetBlocks, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
// Assemble a tester with a built in counter for the requests
|
||||
tester := newTester()
|
||||
@ -253,8 +223,7 @@ func TestConcurrentAnnouncements(t *testing.T) {
|
||||
func TestOverlappingAnnouncements(t *testing.T) {
|
||||
// Create a chain of blocks to import
|
||||
targetBlocks := 4 * hashLimit
|
||||
hashes := createHashes(targetBlocks, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester()
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
@ -280,8 +249,7 @@ func TestOverlappingAnnouncements(t *testing.T) {
|
||||
// Tests that announces already being retrieved will not be duplicated.
|
||||
func TestPendingDeduplication(t *testing.T) {
|
||||
// Create a hash and corresponding block
|
||||
hashes := createHashes(1, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(1, 0, genesis)
|
||||
|
||||
// Assemble a tester with a built in counter and delayed fetcher
|
||||
tester := newTester()
|
||||
@ -319,9 +287,9 @@ func TestPendingDeduplication(t *testing.T) {
|
||||
// imported when all the gaps are filled in.
|
||||
func TestRandomArrivalImport(t *testing.T) {
|
||||
// Create a chain of blocks to import, and choose one to delay
|
||||
hashes := createHashes(maxQueueDist, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
skip := maxQueueDist / 2
|
||||
targetBlocks := maxQueueDist
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
skip := targetBlocks / 2
|
||||
|
||||
tester := newTester()
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
@ -345,9 +313,9 @@ func TestRandomArrivalImport(t *testing.T) {
|
||||
// are correctly schedule, filling and import queue gaps.
|
||||
func TestQueueGapFill(t *testing.T) {
|
||||
// Create a chain of blocks to import, and choose one to not announce at all
|
||||
hashes := createHashes(maxQueueDist, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
skip := maxQueueDist / 2
|
||||
targetBlocks := maxQueueDist
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
skip := targetBlocks / 2
|
||||
|
||||
tester := newTester()
|
||||
fetcher := tester.makeFetcher(blocks)
|
||||
@ -371,8 +339,7 @@ func TestQueueGapFill(t *testing.T) {
|
||||
// announces, etc) do not get scheduled for import multiple times.
|
||||
func TestImportDeduplication(t *testing.T) {
|
||||
// Create two blocks to import (one for duplication, the other for stalling)
|
||||
hashes := createHashes(2, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
hashes, blocks := makeChain(2, 0, genesis)
|
||||
|
||||
// Create the tester and wrap the importer with a counter
|
||||
tester := newTester()
|
||||
@ -410,9 +377,7 @@ func TestImportDeduplication(t *testing.T) {
|
||||
// discarded no prevent wasting resources on useless blocks from faulty peers.
|
||||
func TestDistantDiscarding(t *testing.T) {
|
||||
// Create a long chain to import
|
||||
hashes := createHashes(3*maxQueueDist, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
|
||||
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
|
||||
head := hashes[len(hashes)/2]
|
||||
|
||||
// Create a tester and simulate a head block being the middle of the above chain
|
||||
@ -445,11 +410,11 @@ func TestHashMemoryExhaustionAttack(t *testing.T) {
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
// Create a valid chain and an infinite junk chain
|
||||
hashes := createHashes(hashLimit+2*maxQueueDist, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
targetBlocks := hashLimit + 2*maxQueueDist
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
valid := tester.makeFetcher(blocks)
|
||||
|
||||
attack := createHashes(hashLimit+2*maxQueueDist, unknownHash)
|
||||
attack, _ := makeChain(targetBlocks, 0, unknownBlock)
|
||||
attacker := tester.makeFetcher(nil)
|
||||
|
||||
// Feed the tester a huge hashset from the attacker, and a limited from the valid peer
|
||||
@ -484,13 +449,11 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
|
||||
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
||||
|
||||
// Create a valid chain and a batch of dangling (but in range) blocks
|
||||
hashes := createHashes(blockLimit+2*maxQueueDist, knownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
|
||||
targetBlocks := hashLimit + 2*maxQueueDist
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
attack := make(map[common.Hash]*types.Block)
|
||||
for len(attack) < blockLimit+2*maxQueueDist {
|
||||
hashes := createHashes(maxQueueDist-1, unknownHash)
|
||||
blocks := createBlocksFromHashes(hashes)
|
||||
for i := byte(0); len(attack) < blockLimit+2*maxQueueDist; i++ {
|
||||
hashes, blocks := makeChain(maxQueueDist-1, i, unknownBlock)
|
||||
for _, hash := range hashes[:maxQueueDist-2] {
|
||||
attack[hash] = blocks[hash]
|
||||
}
|
||||
@ -499,7 +462,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
|
||||
for _, block := range attack {
|
||||
tester.fetcher.Enqueue("attacker", block)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
if queued := tester.fetcher.queue.Size(); queued != blockLimit {
|
||||
t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit)
|
||||
}
|
||||
|
@ -47,14 +47,21 @@ func NewGasPriceOracle(eth *Ethereum) (self *GasPriceOracle) {
|
||||
}
|
||||
|
||||
func (self *GasPriceOracle) processPastBlocks() {
|
||||
last := self.chain.CurrentBlock().NumberU64()
|
||||
first := uint64(0)
|
||||
last := int64(-1)
|
||||
cblock := self.chain.CurrentBlock()
|
||||
if cblock != nil {
|
||||
last = int64(cblock.NumberU64())
|
||||
}
|
||||
first := int64(0)
|
||||
if last > gpoProcessPastBlocks {
|
||||
first = last - gpoProcessPastBlocks
|
||||
}
|
||||
self.firstProcessed = first
|
||||
self.firstProcessed = uint64(first)
|
||||
for i := first; i <= last; i++ {
|
||||
self.processBlock(self.chain.GetBlockByNumber(i))
|
||||
block := self.chain.GetBlockByNumber(uint64(i))
|
||||
if block != nil {
|
||||
self.processBlock(block)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@ -133,20 +140,20 @@ func (self *GasPriceOracle) lowestPrice(block *types.Block) *big.Int {
|
||||
gasUsed = recepits[len(recepits)-1].CumulativeGasUsed
|
||||
}
|
||||
|
||||
if new(big.Int).Mul(gasUsed, big.NewInt(100)).Cmp(new(big.Int).Mul(block.Header().GasLimit,
|
||||
if new(big.Int).Mul(gasUsed, big.NewInt(100)).Cmp(new(big.Int).Mul(block.GasLimit(),
|
||||
big.NewInt(int64(self.eth.GpoFullBlockRatio)))) < 0 {
|
||||
// block is not full, could have posted a tx with MinGasPrice
|
||||
return self.eth.GpoMinGasPrice
|
||||
}
|
||||
|
||||
if len(block.Transactions()) < 1 {
|
||||
txs := block.Transactions()
|
||||
if len(txs) == 0 {
|
||||
return self.eth.GpoMinGasPrice
|
||||
}
|
||||
|
||||
// block is full, find smallest gasPrice
|
||||
minPrice := block.Transactions()[0].GasPrice()
|
||||
for i := 1; i < len(block.Transactions()); i++ {
|
||||
price := block.Transactions()[i].GasPrice()
|
||||
minPrice := txs[0].GasPrice()
|
||||
for i := 1; i < len(txs); i++ {
|
||||
price := txs[i].GasPrice()
|
||||
if price.Cmp(minPrice) < 0 {
|
||||
minPrice = price
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
|
||||
manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.InsertChain, manager.removePeer)
|
||||
|
||||
validator := func(block *types.Block, parent *types.Block) error {
|
||||
return core.ValidateHeader(pow, block.Header(), parent.Header(), true)
|
||||
return core.ValidateHeader(pow, block.Header(), parent, true)
|
||||
}
|
||||
heighter := func() uint64 {
|
||||
return manager.chainman.CurrentBlock().NumberU64()
|
||||
|
@ -234,7 +234,7 @@ func (pool *fakeTxPool) GetTransactions() types.Transactions {
|
||||
|
||||
func newtx(from *crypto.Key, nonce uint64, datasize int) *types.Transaction {
|
||||
data := make([]byte, datasize)
|
||||
tx := types.NewTransactionMessage(common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), data)
|
||||
tx.SetNonce(nonce)
|
||||
tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), data)
|
||||
tx, _ = tx.SignECDSA(from.PrivateKey)
|
||||
return tx
|
||||
}
|
||||
|
@ -115,3 +115,7 @@ func (self *LDBDatabase) Close() {
|
||||
self.db.Close()
|
||||
glog.V(logger.Error).Infoln("flushed and closed db:", self.fn)
|
||||
}
|
||||
|
||||
func (self *LDBDatabase) LDB() *leveldb.DB {
|
||||
return self.db
|
||||
}
|
||||
|
@ -90,15 +90,13 @@ done:
|
||||
}
|
||||
}
|
||||
|
||||
func (self *CpuAgent) mine(block *types.Block, stop <- chan struct{}) {
|
||||
func (self *CpuAgent) mine(block *types.Block, stop <-chan struct{}) {
|
||||
glog.V(logger.Debug).Infof("(re)started agent[%d]. mining...\n", self.index)
|
||||
|
||||
// Mine
|
||||
nonce, mixDigest := self.pow.Search(block, stop)
|
||||
if nonce != 0 {
|
||||
block.SetNonce(nonce)
|
||||
block.Header().MixDigest = common.BytesToHash(mixDigest)
|
||||
self.returnCh <- block
|
||||
self.returnCh <- block.WithMiningResult(nonce, common.BytesToHash(mixDigest))
|
||||
} else {
|
||||
self.returnCh <- nil
|
||||
}
|
||||
|
@ -81,9 +81,7 @@ func (a *RemoteAgent) SubmitWork(nonce uint64, mixDigest, seedHash common.Hash)
|
||||
|
||||
// Make sure the external miner was working on the right hash
|
||||
if a.currentWork != nil && a.work != nil {
|
||||
a.currentWork.SetNonce(nonce)
|
||||
a.currentWork.Header().MixDigest = mixDigest
|
||||
a.returnCh <- a.currentWork
|
||||
a.returnCh <- a.currentWork.WithMiningResult(nonce, mixDigest)
|
||||
//a.returnCh <- Work{a.currentWork.Number().Uint64(), nonce, mixDigest.Bytes(), seedHash.Bytes()}
|
||||
return true
|
||||
}
|
||||
|
243
miner/worker.go
243
miner/worker.go
@ -49,10 +49,8 @@ type uint64RingBuffer struct {
|
||||
// environment is the workers current environment and holds
|
||||
// all of the current state information
|
||||
type environment struct {
|
||||
totalUsedGas *big.Int // total gas usage in the cycle
|
||||
state *state.StateDB // apply state changes here
|
||||
coinbase *state.StateObject // the miner's account
|
||||
block *types.Block // the new block
|
||||
ancestors *set.Set // ancestor set (used for checking uncle parent validity)
|
||||
family *set.Set // family set (used for checking uncle invalidity)
|
||||
uncles *set.Set // uncle set
|
||||
@ -63,22 +61,12 @@ type environment struct {
|
||||
ownedAccounts *set.Set
|
||||
lowGasTxs types.Transactions
|
||||
localMinedBlocks *uint64RingBuffer // the most recent block numbers that were mined locally (used to check block inclusion)
|
||||
}
|
||||
|
||||
// env returns a new environment for the current cycle
|
||||
func env(block *types.Block, eth core.Backend) *environment {
|
||||
state := state.New(block.Root(), eth.StateDb())
|
||||
env := &environment{
|
||||
totalUsedGas: new(big.Int),
|
||||
state: state,
|
||||
block: block,
|
||||
ancestors: set.New(),
|
||||
family: set.New(),
|
||||
uncles: set.New(),
|
||||
coinbase: state.GetOrNewStateObject(block.Coinbase()),
|
||||
}
|
||||
block *types.Block // the new block
|
||||
|
||||
return env
|
||||
header *types.Header
|
||||
txs []*types.Transaction
|
||||
receipts []*types.Receipt
|
||||
}
|
||||
|
||||
// worker is the main object which takes care of applying messages to the new state
|
||||
@ -137,14 +125,20 @@ func newWorker(coinbase common.Address, eth core.Backend) *worker {
|
||||
func (self *worker) pendingState() *state.StateDB {
|
||||
self.currentMu.Lock()
|
||||
defer self.currentMu.Unlock()
|
||||
|
||||
return self.current.state
|
||||
}
|
||||
|
||||
func (self *worker) pendingBlock() *types.Block {
|
||||
self.currentMu.Lock()
|
||||
defer self.currentMu.Unlock()
|
||||
|
||||
if atomic.LoadInt32(&self.mining) == 0 {
|
||||
return types.NewBlock(
|
||||
self.current.header,
|
||||
self.current.txs,
|
||||
nil,
|
||||
self.current.receipts,
|
||||
)
|
||||
}
|
||||
return self.current.block
|
||||
}
|
||||
|
||||
@ -206,7 +200,7 @@ out:
|
||||
// Apply transaction to the pending state if we're not mining
|
||||
if atomic.LoadInt32(&self.mining) == 0 {
|
||||
self.mu.Lock()
|
||||
self.commitTransactions(types.Transactions{ev.Tx})
|
||||
self.current.commitTransactions(types.Transactions{ev.Tx}, self.gasPrice, self.proc)
|
||||
self.mu.Unlock()
|
||||
}
|
||||
}
|
||||
@ -239,46 +233,46 @@ func (self *worker) wait() {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := self.chain.InsertChain(types.Blocks{block}); err == nil {
|
||||
for _, uncle := range block.Uncles() {
|
||||
delete(self.possibleUncles, uncle.Hash())
|
||||
}
|
||||
self.mux.Post(core.NewMinedBlockEvent{block})
|
||||
|
||||
var stale, confirm string
|
||||
canonBlock := self.chain.GetBlockByNumber(block.NumberU64())
|
||||
if canonBlock != nil && canonBlock.Hash() != block.Hash() {
|
||||
stale = "stale "
|
||||
} else {
|
||||
confirm = "Wait 5 blocks for confirmation"
|
||||
self.current.localMinedBlocks = newLocalMinedBlock(block.Number().Uint64(), self.current.localMinedBlocks)
|
||||
}
|
||||
|
||||
glog.V(logger.Info).Infof("🔨 Mined %sblock (#%v / %x). %s", stale, block.Number(), block.Hash().Bytes()[:4], confirm)
|
||||
|
||||
jsonlogger.LogJson(&logger.EthMinerNewBlock{
|
||||
BlockHash: block.Hash().Hex(),
|
||||
BlockNumber: block.Number(),
|
||||
ChainHeadHash: block.ParentHeaderHash.Hex(),
|
||||
BlockPrevHash: block.ParentHeaderHash.Hex(),
|
||||
})
|
||||
} else {
|
||||
self.commitNewWork()
|
||||
_, err := self.chain.WriteBlock(block)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infoln("error writing block to chain", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// check staleness and display confirmation
|
||||
var stale, confirm string
|
||||
canonBlock := self.chain.GetBlockByNumber(block.NumberU64())
|
||||
if canonBlock != nil && canonBlock.Hash() != block.Hash() {
|
||||
stale = "stale "
|
||||
} else {
|
||||
confirm = "Wait 5 blocks for confirmation"
|
||||
self.current.localMinedBlocks = newLocalMinedBlock(block.Number().Uint64(), self.current.localMinedBlocks)
|
||||
}
|
||||
|
||||
glog.V(logger.Info).Infof("🔨 Mined %sblock (#%v / %x). %s", stale, block.Number(), block.Hash().Bytes()[:4], confirm)
|
||||
|
||||
// broadcast before waiting for validation
|
||||
go self.mux.Post(core.NewMinedBlockEvent{block})
|
||||
|
||||
self.commitNewWork()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (self *worker) push() {
|
||||
if atomic.LoadInt32(&self.mining) == 1 {
|
||||
self.current.block.SetRoot(self.current.state.Root())
|
||||
if core.Canary(self.current.state) {
|
||||
glog.Infoln("Toxicity levels rising to deadly levels. Your canary has died. You can go back or continue down the mineshaft --more--")
|
||||
glog.Infoln("You turn back and abort mining")
|
||||
return
|
||||
}
|
||||
|
||||
// push new work to agents
|
||||
for _, agent := range self.agents {
|
||||
atomic.AddInt32(&self.atWork, 1)
|
||||
|
||||
if agent.Work() != nil {
|
||||
agent.Work() <- self.current.block.Copy()
|
||||
agent.Work() <- self.current.block
|
||||
} else {
|
||||
common.Report(fmt.Sprintf("%v %T\n", agent, agent))
|
||||
}
|
||||
@ -286,22 +280,20 @@ func (self *worker) push() {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *worker) makeCurrent() {
|
||||
block := self.chain.NewBlock(self.coinbase)
|
||||
parent := self.chain.GetBlock(block.ParentHash())
|
||||
// TMP fix for build server ...
|
||||
if parent == nil {
|
||||
return
|
||||
// makeCurrent creates a new environment for the current cycle.
|
||||
func (self *worker) makeCurrent(parent *types.Block, header *types.Header) {
|
||||
state := state.New(parent.Root(), self.eth.StateDb())
|
||||
current := &environment{
|
||||
state: state,
|
||||
ancestors: set.New(),
|
||||
family: set.New(),
|
||||
uncles: set.New(),
|
||||
header: header,
|
||||
coinbase: state.GetOrNewStateObject(self.coinbase),
|
||||
}
|
||||
|
||||
if block.Time() <= parent.Time() {
|
||||
block.Header().Time = parent.Header().Time + 1
|
||||
}
|
||||
block.Header().Extra = self.extra
|
||||
|
||||
// when 08 is processed ancestors contain 07 (quick block)
|
||||
current := env(block, self.eth)
|
||||
for _, ancestor := range self.chain.GetAncestors(block, 7) {
|
||||
for _, ancestor := range self.chain.GetBlocksFromHash(parent.Hash(), 7) {
|
||||
for _, uncle := range ancestor.Uncles() {
|
||||
current.family.Add(uncle.Hash())
|
||||
}
|
||||
@ -309,6 +301,7 @@ func (self *worker) makeCurrent() {
|
||||
current.ancestors.Add(ancestor.Hash())
|
||||
}
|
||||
accounts, _ := self.eth.AccountManager().Accounts()
|
||||
|
||||
// Keep track of transactions which return errors so they can be removed
|
||||
current.remove = set.New()
|
||||
current.tcount = 0
|
||||
@ -318,9 +311,6 @@ func (self *worker) makeCurrent() {
|
||||
if self.current != nil {
|
||||
current.localMinedBlocks = self.current.localMinedBlocks
|
||||
}
|
||||
|
||||
current.coinbase.SetGasLimit(core.CalcGasLimit(parent))
|
||||
|
||||
self.current = current
|
||||
}
|
||||
|
||||
@ -352,13 +342,13 @@ func (self *worker) isBlockLocallyMined(deepBlockNum uint64) bool {
|
||||
|
||||
//Does the block at {deepBlockNum} send earnings to my coinbase?
|
||||
var block = self.chain.GetBlockByNumber(deepBlockNum)
|
||||
return block != nil && block.Header().Coinbase == self.coinbase
|
||||
return block != nil && block.Coinbase() == self.coinbase
|
||||
}
|
||||
|
||||
func (self *worker) logLocalMinedBlocks(previous *environment) {
|
||||
if previous != nil && self.current.localMinedBlocks != nil {
|
||||
nextBlockNum := self.current.block.Number().Uint64()
|
||||
for checkBlockNum := previous.block.Number().Uint64(); checkBlockNum < nextBlockNum; checkBlockNum++ {
|
||||
nextBlockNum := self.current.block.NumberU64()
|
||||
for checkBlockNum := previous.block.NumberU64(); checkBlockNum < nextBlockNum; checkBlockNum++ {
|
||||
inspectBlockNum := checkBlockNum - miningLogAtDepth
|
||||
if self.isBlockLocallyMined(inspectBlockNum) {
|
||||
glog.V(logger.Info).Infof("🔨 🔗 Mined %d blocks back: block #%v", miningLogAtDepth, inspectBlockNum)
|
||||
@ -376,18 +366,42 @@ func (self *worker) commitNewWork() {
|
||||
defer self.currentMu.Unlock()
|
||||
|
||||
tstart := time.Now()
|
||||
parent := self.chain.CurrentBlock()
|
||||
tstamp := tstart.Unix()
|
||||
if tstamp <= parent.Time() {
|
||||
tstamp = parent.Time() + 1
|
||||
}
|
||||
// this will ensure we're not going off too far in the future
|
||||
if now := time.Now().Unix(); tstamp > now+4 {
|
||||
wait := time.Duration(tstamp-now) * time.Second
|
||||
glog.V(logger.Info).Infoln("We are too far in the future. Waiting for", wait)
|
||||
time.Sleep(wait)
|
||||
}
|
||||
|
||||
num := parent.Number()
|
||||
header := &types.Header{
|
||||
ParentHash: parent.Hash(),
|
||||
Number: num.Add(num, common.Big1),
|
||||
Difficulty: core.CalcDifficulty(tstamp, parent.Time(), parent.Difficulty()),
|
||||
GasLimit: core.CalcGasLimit(parent),
|
||||
GasUsed: new(big.Int),
|
||||
Coinbase: self.coinbase,
|
||||
Extra: self.extra,
|
||||
Time: uint64(tstamp),
|
||||
}
|
||||
|
||||
previous := self.current
|
||||
self.makeCurrent()
|
||||
self.makeCurrent(parent, header)
|
||||
current := self.current
|
||||
|
||||
// commit transactions for this run.
|
||||
transactions := self.eth.TxPool().GetTransactions()
|
||||
sort.Sort(types.TxByNonce{transactions})
|
||||
|
||||
// commit transactions for this run
|
||||
self.commitTransactions(transactions)
|
||||
current.coinbase.SetGasLimit(header.GasLimit)
|
||||
current.commitTransactions(transactions, self.gasPrice, self.proc)
|
||||
self.eth.TxPool().RemoveTransactions(current.lowGasTxs)
|
||||
|
||||
// compute uncles for the new block.
|
||||
var (
|
||||
uncles []*types.Header
|
||||
badUncles []common.Hash
|
||||
@ -396,88 +410,80 @@ func (self *worker) commitNewWork() {
|
||||
if len(uncles) == 2 {
|
||||
break
|
||||
}
|
||||
|
||||
if err := self.commitUncle(uncle.Header()); err != nil {
|
||||
if glog.V(logger.Ridiculousness) {
|
||||
glog.V(logger.Detail).Infof("Bad uncle found and will be removed (%x)\n", hash[:4])
|
||||
glog.V(logger.Detail).Infoln(uncle)
|
||||
}
|
||||
|
||||
badUncles = append(badUncles, hash)
|
||||
} else {
|
||||
glog.V(logger.Debug).Infof("commiting %x as uncle\n", hash[:4])
|
||||
uncles = append(uncles, uncle.Header())
|
||||
}
|
||||
}
|
||||
for _, hash := range badUncles {
|
||||
delete(self.possibleUncles, hash)
|
||||
}
|
||||
|
||||
// We only care about logging if we're actually mining
|
||||
if atomic.LoadInt32(&self.mining) == 1 {
|
||||
// commit state root after all state transitions.
|
||||
core.AccumulateRewards(self.current.state, header, uncles)
|
||||
current.state.Update()
|
||||
self.current.state.Sync()
|
||||
header.Root = current.state.Root()
|
||||
}
|
||||
|
||||
// create the new block whose nonce will be mined.
|
||||
current.block = types.NewBlock(header, current.txs, uncles, current.receipts)
|
||||
self.current.block.Td = new(big.Int).Set(core.CalcTD(self.current.block, self.chain.GetBlock(self.current.block.ParentHash())))
|
||||
|
||||
// We only care about logging if we're actually mining.
|
||||
if atomic.LoadInt32(&self.mining) == 1 {
|
||||
glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles. Took %v\n", current.block.Number(), current.tcount, len(uncles), time.Since(tstart))
|
||||
self.logLocalMinedBlocks(previous)
|
||||
}
|
||||
|
||||
for _, hash := range badUncles {
|
||||
delete(self.possibleUncles, hash)
|
||||
}
|
||||
|
||||
self.current.block.SetUncles(uncles)
|
||||
|
||||
core.AccumulateRewards(self.current.state, self.current.block)
|
||||
|
||||
self.current.state.Update()
|
||||
|
||||
self.push()
|
||||
}
|
||||
|
||||
var (
|
||||
inclusionReward = new(big.Int).Div(core.BlockReward, big.NewInt(32))
|
||||
_uncleReward = new(big.Int).Mul(core.BlockReward, big.NewInt(15))
|
||||
uncleReward = new(big.Int).Div(_uncleReward, big.NewInt(16))
|
||||
)
|
||||
|
||||
func (self *worker) commitUncle(uncle *types.Header) error {
|
||||
if self.current.uncles.Has(uncle.Hash()) {
|
||||
// Error not unique
|
||||
hash := uncle.Hash()
|
||||
if self.current.uncles.Has(hash) {
|
||||
return core.UncleError("Uncle not unique")
|
||||
}
|
||||
|
||||
if !self.current.ancestors.Has(uncle.ParentHash) {
|
||||
return core.UncleError(fmt.Sprintf("Uncle's parent unknown (%x)", uncle.ParentHash[0:4]))
|
||||
}
|
||||
|
||||
if self.current.family.Has(uncle.Hash()) {
|
||||
return core.UncleError(fmt.Sprintf("Uncle already in family (%x)", uncle.Hash()))
|
||||
if self.current.family.Has(hash) {
|
||||
return core.UncleError(fmt.Sprintf("Uncle already in family (%x)", hash))
|
||||
}
|
||||
self.current.uncles.Add(uncle.Hash())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *worker) commitTransactions(transactions types.Transactions) {
|
||||
current := self.current
|
||||
|
||||
func (env *environment) commitTransactions(transactions types.Transactions, gasPrice *big.Int, proc *core.BlockProcessor) {
|
||||
for _, tx := range transactions {
|
||||
// We can skip err. It has already been validated in the tx pool
|
||||
from, _ := tx.From()
|
||||
|
||||
// Check if it falls within margin. Txs from owned accounts are always processed.
|
||||
if tx.GasPrice().Cmp(self.gasPrice) < 0 && !current.ownedAccounts.Has(from) {
|
||||
if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
|
||||
// ignore the transaction and transactor. We ignore the transactor
|
||||
// because nonce will fail after ignoring this transaction so there's
|
||||
// no point
|
||||
current.lowGasTransactors.Add(from)
|
||||
env.lowGasTransactors.Add(from)
|
||||
|
||||
glog.V(logger.Info).Infof("transaction(%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(self.gasPrice), from[:4])
|
||||
glog.V(logger.Info).Infof("transaction(%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4])
|
||||
}
|
||||
|
||||
// Continue with the next transaction if the transaction sender is included in
|
||||
// the low gas tx set. This will also remove the tx and all sequential transaction
|
||||
// from this transactor
|
||||
if current.lowGasTransactors.Has(from) {
|
||||
if env.lowGasTransactors.Has(from) {
|
||||
// add tx to the low gas set. This will be removed at the end of the run
|
||||
// owned accounts are ignored
|
||||
if !current.ownedAccounts.Has(from) {
|
||||
current.lowGasTxs = append(current.lowGasTxs, tx)
|
||||
if !env.ownedAccounts.Has(from) {
|
||||
env.lowGasTxs = append(env.lowGasTxs, tx)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@ -487,46 +493,41 @@ func (self *worker) commitTransactions(transactions types.Transactions) {
|
||||
// the transaction is processed (that could potentially be included in the block) it
|
||||
// will throw a nonce error because the previous transaction hasn't been processed.
|
||||
// Therefor we need to ignore any transaction after the ignored one.
|
||||
if current.ignoredTransactors.Has(from) {
|
||||
if env.ignoredTransactors.Has(from) {
|
||||
continue
|
||||
}
|
||||
|
||||
self.current.state.StartRecord(tx.Hash(), common.Hash{}, 0)
|
||||
env.state.StartRecord(tx.Hash(), common.Hash{}, 0)
|
||||
|
||||
err := self.commitTransaction(tx)
|
||||
err := env.commitTransaction(tx, proc)
|
||||
switch {
|
||||
case core.IsNonceErr(err) || core.IsInvalidTxErr(err):
|
||||
current.remove.Add(tx.Hash())
|
||||
env.remove.Add(tx.Hash())
|
||||
|
||||
if glog.V(logger.Detail) {
|
||||
glog.Infof("TX (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err)
|
||||
}
|
||||
case state.IsGasLimitErr(err):
|
||||
from, _ := tx.From()
|
||||
// ignore the transactor so no nonce errors will be thrown for this account
|
||||
// next time the worker is run, they'll be picked up again.
|
||||
current.ignoredTransactors.Add(from)
|
||||
env.ignoredTransactors.Add(from)
|
||||
|
||||
glog.V(logger.Detail).Infof("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4])
|
||||
default:
|
||||
current.tcount++
|
||||
env.tcount++
|
||||
}
|
||||
}
|
||||
|
||||
self.current.block.Header().GasUsed = self.current.totalUsedGas
|
||||
}
|
||||
|
||||
func (self *worker) commitTransaction(tx *types.Transaction) error {
|
||||
snap := self.current.state.Copy()
|
||||
receipt, _, err := self.proc.ApplyTransaction(self.current.coinbase, self.current.state, self.current.block, tx, self.current.totalUsedGas, true)
|
||||
func (env *environment) commitTransaction(tx *types.Transaction, proc *core.BlockProcessor) error {
|
||||
snap := env.state.Copy()
|
||||
receipt, _, err := proc.ApplyTransaction(env.coinbase, env.state, env.header, tx, env.header.GasUsed, true)
|
||||
if err != nil && (core.IsNonceErr(err) || state.IsGasLimitErr(err) || core.IsInvalidTxErr(err)) {
|
||||
self.current.state.Set(snap)
|
||||
env.state.Set(snap)
|
||||
return err
|
||||
}
|
||||
|
||||
self.current.block.AddTransaction(tx)
|
||||
self.current.block.AddReceipt(receipt)
|
||||
|
||||
env.txs = append(env.txs, tx)
|
||||
env.receipts = append(env.receipts, receipt)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -5,12 +5,9 @@ import (
|
||||
"io"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// TODO: put encbufs in a sync.Pool.
|
||||
// Doing that requires zeroing the buffers after use.
|
||||
// encReader will need to drop it's buffer when done.
|
||||
|
||||
var (
|
||||
// Common encoded values.
|
||||
// These are useful when implementing EncodeRLP.
|
||||
@ -32,46 +29,10 @@ type Encoder interface {
|
||||
EncodeRLP(io.Writer) error
|
||||
}
|
||||
|
||||
// Flat wraps a value (which must encode as a list) so
|
||||
// it encodes as the list's elements.
|
||||
//
|
||||
// Example: suppose you have defined a type
|
||||
//
|
||||
// type foo struct { A, B uint }
|
||||
//
|
||||
// Under normal encoding rules,
|
||||
//
|
||||
// rlp.Encode(foo{1, 2}) --> 0xC20102
|
||||
//
|
||||
// This function can help you achieve the following encoding:
|
||||
//
|
||||
// rlp.Encode(rlp.Flat(foo{1, 2})) --> 0x0102
|
||||
func Flat(val interface{}) Encoder {
|
||||
return flatenc{val}
|
||||
}
|
||||
|
||||
type flatenc struct{ val interface{} }
|
||||
|
||||
func (e flatenc) EncodeRLP(out io.Writer) error {
|
||||
// record current output position
|
||||
var (
|
||||
eb = out.(*encbuf)
|
||||
prevstrsize = len(eb.str)
|
||||
prevnheads = len(eb.lheads)
|
||||
)
|
||||
if err := eb.encode(e.val); err != nil {
|
||||
return err
|
||||
}
|
||||
// check that a new list header has appeared
|
||||
if len(eb.lheads) == prevnheads || eb.lheads[prevnheads].offset == prevstrsize-1 {
|
||||
return fmt.Errorf("rlp.Flat: %T did not encode as list", e.val)
|
||||
}
|
||||
// remove the new list header
|
||||
newhead := eb.lheads[prevnheads]
|
||||
copy(eb.lheads[prevnheads:], eb.lheads[prevnheads+1:])
|
||||
eb.lheads = eb.lheads[:len(eb.lheads)-1]
|
||||
eb.lhsize -= headsize(uint64(newhead.size))
|
||||
return nil
|
||||
// ListSize returns the encoded size of an RLP list with the given
|
||||
// content size.
|
||||
func ListSize(contentSize uint64) uint64 {
|
||||
return uint64(headsize(contentSize)) + contentSize
|
||||
}
|
||||
|
||||
// Encode writes the RLP encoding of val to w. Note that Encode may
|
||||
@ -112,7 +73,9 @@ func Encode(w io.Writer, val interface{}) error {
|
||||
// Avoid copying by writing to the outer encbuf directly.
|
||||
return outer.encode(val)
|
||||
}
|
||||
eb := newencbuf()
|
||||
eb := encbufPool.Get().(*encbuf)
|
||||
eb.reset()
|
||||
defer encbufPool.Put(eb)
|
||||
if err := eb.encode(val); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -122,7 +85,9 @@ func Encode(w io.Writer, val interface{}) error {
|
||||
// EncodeBytes returns the RLP encoding of val.
|
||||
// Please see the documentation of Encode for the encoding rules.
|
||||
func EncodeToBytes(val interface{}) ([]byte, error) {
|
||||
eb := newencbuf()
|
||||
eb := encbufPool.Get().(*encbuf)
|
||||
eb.reset()
|
||||
defer encbufPool.Put(eb)
|
||||
if err := eb.encode(val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -135,7 +100,8 @@ func EncodeToBytes(val interface{}) ([]byte, error) {
|
||||
//
|
||||
// Please see the documentation of Encode for the encoding rules.
|
||||
func EncodeToReader(val interface{}) (size int, r io.Reader, err error) {
|
||||
eb := newencbuf()
|
||||
eb := encbufPool.Get().(*encbuf)
|
||||
eb.reset()
|
||||
if err := eb.encode(val); err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
@ -182,8 +148,19 @@ func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
|
||||
}
|
||||
}
|
||||
|
||||
func newencbuf() *encbuf {
|
||||
return &encbuf{sizebuf: make([]byte, 9)}
|
||||
// encbufs are pooled.
|
||||
var encbufPool = sync.Pool{
|
||||
New: func() interface{} { return &encbuf{sizebuf: make([]byte, 9)} },
|
||||
}
|
||||
|
||||
func (w *encbuf) reset() {
|
||||
w.lhsize = 0
|
||||
if w.str != nil {
|
||||
w.str = w.str[:0]
|
||||
}
|
||||
if w.lheads != nil {
|
||||
w.lheads = w.lheads[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// encbuf implements io.Writer so it can be passed it into EncodeRLP.
|
||||
@ -295,6 +272,8 @@ type encReader struct {
|
||||
func (r *encReader) Read(b []byte) (n int, err error) {
|
||||
for {
|
||||
if r.piece = r.next(); r.piece == nil {
|
||||
encbufPool.Put(r.buf)
|
||||
r.buf = nil
|
||||
return n, io.EOF
|
||||
}
|
||||
nn := copy(b[n:], r.piece)
|
||||
@ -313,6 +292,9 @@ func (r *encReader) Read(b []byte) (n int, err error) {
|
||||
// it returns nil at EOF.
|
||||
func (r *encReader) next() []byte {
|
||||
switch {
|
||||
case r.buf == nil:
|
||||
return nil
|
||||
|
||||
case r.piece != nil:
|
||||
// There is still data available for reading.
|
||||
return r.piece
|
||||
|
@ -189,15 +189,6 @@ var encTests = []encTest{
|
||||
{val: &recstruct{5, nil}, output: "C205C0"},
|
||||
{val: &recstruct{5, &recstruct{4, &recstruct{3, nil}}}, output: "C605C404C203C0"},
|
||||
|
||||
// flat
|
||||
{val: Flat(uint(1)), error: "rlp.Flat: uint did not encode as list"},
|
||||
{val: Flat(simplestruct{A: 3, B: "foo"}), output: "0383666F6F"},
|
||||
{
|
||||
// value generates more list headers after the Flat
|
||||
val: []interface{}{"foo", []uint{1, 2}, Flat([]uint{3, 4}), []uint{5, 6}, "bar"},
|
||||
output: "D083666F6FC201020304C2050683626172",
|
||||
},
|
||||
|
||||
// nil
|
||||
{val: (*uint)(nil), output: "80"},
|
||||
{val: (*string)(nil), output: "80"},
|
||||
|
@ -348,14 +348,6 @@ func (self *ethApi) GetBlockByNumber(req *shared.Request) (interface{}, error) {
|
||||
|
||||
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||
br := NewBlockRes(block, args.IncludeTxs)
|
||||
// If request was for "pending", nil nonsensical fields
|
||||
if args.BlockNumber == -2 {
|
||||
br.BlockHash = nil
|
||||
br.BlockNumber = nil
|
||||
br.Miner = nil
|
||||
br.Nonce = nil
|
||||
br.LogsBloom = nil
|
||||
}
|
||||
return br, nil
|
||||
}
|
||||
|
||||
|
@ -270,29 +270,31 @@ func NewBlockRes(block *types.Block, fullTx bool) *BlockRes {
|
||||
res.BlockHash = newHexData(block.Hash())
|
||||
res.ParentHash = newHexData(block.ParentHash())
|
||||
res.Nonce = newHexData(block.Nonce())
|
||||
res.Sha3Uncles = newHexData(block.Header().UncleHash)
|
||||
res.Sha3Uncles = newHexData(block.UncleHash())
|
||||
res.LogsBloom = newHexData(block.Bloom())
|
||||
res.TransactionRoot = newHexData(block.Header().TxHash)
|
||||
res.TransactionRoot = newHexData(block.TxHash())
|
||||
res.StateRoot = newHexData(block.Root())
|
||||
res.Miner = newHexData(block.Header().Coinbase)
|
||||
res.Miner = newHexData(block.Coinbase())
|
||||
res.Difficulty = newHexNum(block.Difficulty())
|
||||
res.TotalDifficulty = newHexNum(block.Td)
|
||||
res.Size = newHexNum(block.Size().Int64())
|
||||
res.ExtraData = newHexData(block.Header().Extra)
|
||||
res.ExtraData = newHexData(block.Extra())
|
||||
res.GasLimit = newHexNum(block.GasLimit())
|
||||
res.GasUsed = newHexNum(block.GasUsed())
|
||||
res.UnixTimestamp = newHexNum(block.Time())
|
||||
|
||||
res.Transactions = make([]*TransactionRes, len(block.Transactions()))
|
||||
for i, tx := range block.Transactions() {
|
||||
txs := block.Transactions()
|
||||
res.Transactions = make([]*TransactionRes, len(txs))
|
||||
for i, tx := range txs {
|
||||
res.Transactions[i] = NewTransactionRes(tx)
|
||||
res.Transactions[i].BlockHash = res.BlockHash
|
||||
res.Transactions[i].BlockNumber = res.BlockNumber
|
||||
res.Transactions[i].TxIndex = newHexNum(i)
|
||||
}
|
||||
|
||||
res.Uncles = make([]*UncleRes, len(block.Uncles()))
|
||||
for i, uncle := range block.Uncles() {
|
||||
uncles := block.Uncles()
|
||||
res.Uncles = make([]*UncleRes, len(uncles))
|
||||
for i, uncle := range uncles {
|
||||
res.Uncles[i] = NewUncleRes(uncle)
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ func (t *BlockTest) TryBlocksInsert(chainManager *core.ChainManager) error {
|
||||
if b.BlockHeader == nil {
|
||||
continue // OK - block is supposed to be invalid, continue with next block
|
||||
} else {
|
||||
return fmt.Errorf("Block RLP decoding failed when expected to succeed: ", err)
|
||||
return fmt.Errorf("Block RLP decoding failed when expected to succeed: %v", err)
|
||||
}
|
||||
}
|
||||
// RLP decoding worked, try to insert into chain:
|
||||
@ -254,7 +254,7 @@ func (t *BlockTest) TryBlocksInsert(chainManager *core.ChainManager) error {
|
||||
if b.BlockHeader == nil {
|
||||
continue // OK - block is supposed to be invalid, continue with next block
|
||||
} else {
|
||||
return fmt.Errorf("Block insertion into chain failed: ", err)
|
||||
return fmt.Errorf("Block insertion into chain failed: %v", err)
|
||||
}
|
||||
}
|
||||
if b.BlockHeader == nil {
|
||||
@ -262,7 +262,7 @@ func (t *BlockTest) TryBlocksInsert(chainManager *core.ChainManager) error {
|
||||
}
|
||||
err = t.validateBlockHeader(b.BlockHeader, cb.Header())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Block header validation failed: ", err)
|
||||
return fmt.Errorf("Block header validation failed: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -286,7 +286,7 @@ func (s *BlockTest) validateBlockHeader(h *btHeader, h2 *types.Header) error {
|
||||
|
||||
expectedNonce := mustConvertBytes(h.Nonce)
|
||||
if !bytes.Equal(expectedNonce, h2.Nonce[:]) {
|
||||
return fmt.Errorf("Nonce: expected: %v, decoded: %v", expectedNonce, h2.Nonce[:])
|
||||
return fmt.Errorf("Nonce: expected: %v, decoded: %v", expectedNonce, h2.Nonce)
|
||||
}
|
||||
|
||||
expectedNumber := mustConvertBigInt(h.Number, 16)
|
||||
@ -423,9 +423,8 @@ func mustConvertHeader(in btHeader) *types.Header {
|
||||
GasLimit: mustConvertBigInt(in.GasLimit, 16),
|
||||
Difficulty: mustConvertBigInt(in.Difficulty, 16),
|
||||
Time: mustConvertUint(in.Timestamp, 16),
|
||||
Nonce: types.EncodeNonce(mustConvertUint(in.Nonce, 16)),
|
||||
}
|
||||
// XXX cheats? :-)
|
||||
header.SetNonce(mustConvertUint(in.Nonce, 16))
|
||||
return header
|
||||
}
|
||||
|
||||
|
@ -152,54 +152,53 @@ func verifyTxFields(txTest TransactionTest, decodedTx *types.Transaction) (err e
|
||||
}
|
||||
|
||||
expectedData := mustConvertBytes(txTest.Transaction.Data)
|
||||
if !bytes.Equal(expectedData, decodedTx.Payload) {
|
||||
return fmt.Errorf("Tx input data mismatch: %#v %#v", expectedData, decodedTx.Payload)
|
||||
if !bytes.Equal(expectedData, decodedTx.Data()) {
|
||||
return fmt.Errorf("Tx input data mismatch: %#v %#v", expectedData, decodedTx.Data())
|
||||
}
|
||||
|
||||
expectedGasLimit := mustConvertBigInt(txTest.Transaction.GasLimit, 16)
|
||||
if expectedGasLimit.Cmp(decodedTx.GasLimit) != 0 {
|
||||
return fmt.Errorf("GasLimit mismatch: %v %v", expectedGasLimit, decodedTx.GasLimit)
|
||||
if expectedGasLimit.Cmp(decodedTx.Gas()) != 0 {
|
||||
return fmt.Errorf("GasLimit mismatch: %v %v", expectedGasLimit, decodedTx.Gas())
|
||||
}
|
||||
|
||||
expectedGasPrice := mustConvertBigInt(txTest.Transaction.GasPrice, 16)
|
||||
if expectedGasPrice.Cmp(decodedTx.Price) != 0 {
|
||||
return fmt.Errorf("GasPrice mismatch: %v %v", expectedGasPrice, decodedTx.Price)
|
||||
if expectedGasPrice.Cmp(decodedTx.GasPrice()) != 0 {
|
||||
return fmt.Errorf("GasPrice mismatch: %v %v", expectedGasPrice, decodedTx.GasPrice())
|
||||
}
|
||||
|
||||
expectedNonce := mustConvertUint(txTest.Transaction.Nonce, 16)
|
||||
if expectedNonce != decodedTx.AccountNonce {
|
||||
return fmt.Errorf("Nonce mismatch: %v %v", expectedNonce, decodedTx.AccountNonce)
|
||||
if expectedNonce != decodedTx.Nonce() {
|
||||
return fmt.Errorf("Nonce mismatch: %v %v", expectedNonce, decodedTx.Nonce())
|
||||
}
|
||||
|
||||
expectedR := common.Bytes2Big(mustConvertBytes(txTest.Transaction.R))
|
||||
if expectedR.Cmp(decodedTx.R) != 0 {
|
||||
return fmt.Errorf("R mismatch: %v %v", expectedR, decodedTx.R)
|
||||
v, r, s := decodedTx.SignatureValues()
|
||||
expectedR := mustConvertBigInt(txTest.Transaction.R, 16)
|
||||
if r.Cmp(expectedR) != 0 {
|
||||
return fmt.Errorf("R mismatch: %v %v", expectedR, r)
|
||||
}
|
||||
|
||||
expectedS := common.Bytes2Big(mustConvertBytes(txTest.Transaction.S))
|
||||
if expectedS.Cmp(decodedTx.S) != 0 {
|
||||
return fmt.Errorf("S mismatch: %v %v", expectedS, decodedTx.S)
|
||||
expectedS := mustConvertBigInt(txTest.Transaction.S, 16)
|
||||
if s.Cmp(expectedS) != 0 {
|
||||
return fmt.Errorf("S mismatch: %v %v", expectedS, s)
|
||||
}
|
||||
|
||||
expectedV := mustConvertUint(txTest.Transaction.V, 16)
|
||||
if expectedV != uint64(decodedTx.V) {
|
||||
return fmt.Errorf("V mismatch: %v %v", expectedV, uint64(decodedTx.V))
|
||||
if uint64(v) != expectedV {
|
||||
return fmt.Errorf("V mismatch: %v %v", expectedV, v)
|
||||
}
|
||||
|
||||
expectedTo := mustConvertAddress(txTest.Transaction.To)
|
||||
if decodedTx.Recipient == nil {
|
||||
if decodedTx.To() == nil {
|
||||
if expectedTo != common.BytesToAddress([]byte{}) { // "empty" or "zero" address
|
||||
return fmt.Errorf("To mismatch when recipient is nil (contract creation): %v", expectedTo)
|
||||
}
|
||||
} else {
|
||||
if expectedTo != *decodedTx.Recipient {
|
||||
return fmt.Errorf("To mismatch: %v %v", expectedTo, *decodedTx.Recipient)
|
||||
if expectedTo != *decodedTx.To() {
|
||||
return fmt.Errorf("To mismatch: %v %v", expectedTo, *decodedTx.To())
|
||||
}
|
||||
}
|
||||
|
||||
expectedValue := mustConvertBigInt(txTest.Transaction.Value, 16)
|
||||
if expectedValue.Cmp(decodedTx.Amount) != 0 {
|
||||
return fmt.Errorf("Value mismatch: %v %v", expectedValue, decodedTx.Amount)
|
||||
if expectedValue.Cmp(decodedTx.Value()) != 0 {
|
||||
return fmt.Errorf("Value mismatch: %v %v", expectedValue, decodedTx.Value())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -1,6 +1,11 @@
|
||||
package trie
|
||||
|
||||
import "github.com/ethereum/go-ethereum/logger/glog"
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/compression/rle"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
type Backend interface {
|
||||
Get([]byte) ([]byte, error)
|
||||
@ -8,12 +13,13 @@ type Backend interface {
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
batch *leveldb.Batch
|
||||
store map[string][]byte
|
||||
backend Backend
|
||||
}
|
||||
|
||||
func NewCache(backend Backend) *Cache {
|
||||
return &Cache{make(map[string][]byte), backend}
|
||||
return &Cache{new(leveldb.Batch), make(map[string][]byte), backend}
|
||||
}
|
||||
|
||||
func (self *Cache) Get(key []byte) []byte {
|
||||
@ -26,19 +32,23 @@ func (self *Cache) Get(key []byte) []byte {
|
||||
}
|
||||
|
||||
func (self *Cache) Put(key []byte, data []byte) {
|
||||
// write the data to the ldb batch
|
||||
self.batch.Put(key, rle.Compress(data))
|
||||
self.store[string(key)] = data
|
||||
}
|
||||
|
||||
// Flush flushes the trie to the backing layer. If this is a leveldb instance
|
||||
// we'll use a batched write, otherwise we'll use regular put.
|
||||
func (self *Cache) Flush() {
|
||||
for k, v := range self.store {
|
||||
if err := self.backend.Put([]byte(k), v); err != nil {
|
||||
if db, ok := self.backend.(*ethdb.LDBDatabase); ok {
|
||||
if err := db.LDB().Write(self.batch, nil); err != nil {
|
||||
glog.Fatal("db write err:", err)
|
||||
}
|
||||
} else {
|
||||
for k, v := range self.store {
|
||||
self.backend.Put([]byte(k), v)
|
||||
}
|
||||
}
|
||||
|
||||
// This will eventually grow too large. We'd could
|
||||
// do a make limit on storage and push out not-so-popular nodes.
|
||||
//self.Reset()
|
||||
}
|
||||
|
||||
func (self *Cache) Copy() *Cache {
|
||||
|
44
xeth/xeth.go
44
xeth/xeth.go
@ -209,8 +209,8 @@ func (self *XEth) AtStateNum(num int64) *XEth {
|
||||
// - could be removed in favour of mining on testdag (natspec e2e + networking)
|
||||
// + filters
|
||||
func (self *XEth) ApplyTestTxs(statedb *state.StateDB, address common.Address, txc uint64) (uint64, *XEth) {
|
||||
|
||||
block := self.backend.ChainManager().NewBlock(address)
|
||||
chain := self.backend.ChainManager()
|
||||
header := chain.CurrentBlock().Header()
|
||||
coinbase := statedb.GetStateObject(address)
|
||||
coinbase.SetGasLimit(big.NewInt(10000000))
|
||||
txs := self.backend.TxPool().GetQueuedTransactions()
|
||||
@ -218,7 +218,7 @@ func (self *XEth) ApplyTestTxs(statedb *state.StateDB, address common.Address, t
|
||||
for i := 0; i < len(txs); i++ {
|
||||
for _, tx := range txs {
|
||||
if tx.Nonce() == txc {
|
||||
_, _, err := core.ApplyMessage(core.NewEnv(statedb, self.backend.ChainManager(), tx, block), tx, coinbase)
|
||||
_, _, err := core.ApplyMessage(core.NewEnv(statedb, self.backend.ChainManager(), tx, header), tx, coinbase)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -845,8 +845,8 @@ func (self *XEth) Call(fromStr, toStr, valueStr, gasStr, gasPriceStr, dataStr st
|
||||
msg.gasPrice = self.DefaultGasPrice()
|
||||
}
|
||||
|
||||
block := self.CurrentBlock()
|
||||
vmenv := core.NewEnv(statedb, self.backend.ChainManager(), msg, block)
|
||||
header := self.CurrentBlock().Header()
|
||||
vmenv := core.NewEnv(statedb, self.backend.ChainManager(), msg, header)
|
||||
|
||||
res, gas, err := core.ApplyMessage(vmenv, msg, from)
|
||||
return common.ToHex(res), gas.String(), err
|
||||
@ -946,51 +946,45 @@ func (self *XEth) Transact(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceS
|
||||
|
||||
// TODO: align default values to have the same type, e.g. not depend on
|
||||
// common.Value conversions later on
|
||||
|
||||
var tx *types.Transaction
|
||||
if contractCreation {
|
||||
tx = types.NewContractCreationTx(value, gas, price, data)
|
||||
} else {
|
||||
tx = types.NewTransactionMessage(to, value, gas, price, data)
|
||||
}
|
||||
|
||||
state := self.backend.TxPool().State()
|
||||
|
||||
var nonce uint64
|
||||
if len(nonceStr) != 0 {
|
||||
nonce = common.Big(nonceStr).Uint64()
|
||||
} else {
|
||||
state := self.backend.TxPool().State()
|
||||
nonce = state.GetNonce(from)
|
||||
}
|
||||
tx.SetNonce(nonce)
|
||||
var tx *types.Transaction
|
||||
if contractCreation {
|
||||
tx = types.NewContractCreation(nonce, value, gas, price, data)
|
||||
} else {
|
||||
tx = types.NewTransaction(nonce, to, value, gas, price, data)
|
||||
}
|
||||
|
||||
if err := self.sign(tx, from, false); err != nil {
|
||||
signed, err := self.sign(tx, from, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := self.backend.TxPool().Add(tx); err != nil {
|
||||
if err = self.backend.TxPool().Add(signed); err != nil {
|
||||
return "", err
|
||||
}
|
||||
//state.SetNonce(from, nonce+1)
|
||||
|
||||
if contractCreation {
|
||||
addr := core.AddressFromMessage(tx)
|
||||
glog.V(logger.Info).Infof("Tx(%x) created: %x\n", tx.Hash(), addr)
|
||||
|
||||
return core.AddressFromMessage(tx).Hex(), nil
|
||||
return addr.Hex(), nil
|
||||
} else {
|
||||
glog.V(logger.Info).Infof("Tx(%x) to: %x\n", tx.Hash(), tx.To())
|
||||
}
|
||||
return tx.Hash().Hex(), nil
|
||||
}
|
||||
|
||||
func (self *XEth) sign(tx *types.Transaction, from common.Address, didUnlock bool) error {
|
||||
func (self *XEth) sign(tx *types.Transaction, from common.Address, didUnlock bool) (*types.Transaction, error) {
|
||||
hash := tx.Hash()
|
||||
sig, err := self.doSign(from, hash, didUnlock)
|
||||
if err != nil {
|
||||
return err
|
||||
return tx, err
|
||||
}
|
||||
tx.SetSignatureValues(sig)
|
||||
return nil
|
||||
return tx.WithSignature(sig)
|
||||
}
|
||||
|
||||
// callmsg is the message type used for call transations.
|
||||
|
Loading…
Reference in New Issue
Block a user