forked from cerc-io/plugeth
Merge tag 'v1.10.9' into develop
Notes: the AppendAncient plugin hook is broken by this commit. This adds CaptureEnter() and CaptureExit() as no-ops for interface compliance, but these capabilities should be added for plugin tracers soon.
This commit is contained in:
commit
5d4d973cc4
1
.gitmodules
vendored
1
.gitmodules
vendored
@ -1,3 +1,4 @@
|
|||||||
[submodule "tests"]
|
[submodule "tests"]
|
||||||
path = tests/testdata
|
path = tests/testdata
|
||||||
url = https://github.com/ethereum/tests
|
url = https://github.com/ethereum/tests
|
||||||
|
shallow = true
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# This file configures github.com/golangci/golangci-lint.
|
# This file configures github.com/golangci/golangci-lint.
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 3m
|
timeout: 5m
|
||||||
tests: true
|
tests: true
|
||||||
# default is true. Enables skipping of directories:
|
# default is true. Enables skipping of directories:
|
||||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||||
|
24
.travis.yml
24
.travis.yml
@ -5,7 +5,7 @@ jobs:
|
|||||||
allow_failures:
|
allow_failures:
|
||||||
- stage: build
|
- stage: build
|
||||||
os: osx
|
os: osx
|
||||||
go: 1.15.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- azure-osx
|
- azure-osx
|
||||||
- azure-ios
|
- azure-ios
|
||||||
@ -16,7 +16,7 @@ jobs:
|
|||||||
- stage: lint
|
- stage: lint
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- lint
|
- lint
|
||||||
git:
|
git:
|
||||||
@ -31,7 +31,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: amd64
|
arch: amd64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- docker
|
- docker
|
||||||
services:
|
services:
|
||||||
@ -48,7 +48,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: arm64
|
arch: arm64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- docker
|
- docker
|
||||||
services:
|
services:
|
||||||
@ -65,7 +65,7 @@ jobs:
|
|||||||
if: type = push
|
if: type = push
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- ubuntu-ppa
|
- ubuntu-ppa
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
@ -90,7 +90,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
sudo: required
|
sudo: required
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- azure-linux
|
- azure-linux
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
@ -127,7 +127,7 @@ jobs:
|
|||||||
dist: bionic
|
dist: bionic
|
||||||
services:
|
services:
|
||||||
- docker
|
- docker
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- azure-linux-mips
|
- azure-linux-mips
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
@ -192,7 +192,7 @@ jobs:
|
|||||||
- stage: build
|
- stage: build
|
||||||
if: type = push
|
if: type = push
|
||||||
os: osx
|
os: osx
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- azure-osx
|
- azure-osx
|
||||||
- azure-ios
|
- azure-ios
|
||||||
@ -224,7 +224,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: amd64
|
arch: amd64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
@ -235,7 +235,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: arm64
|
arch: arm64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
@ -244,7 +244,7 @@ jobs:
|
|||||||
- stage: build
|
- stage: build
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.15.x
|
go: 1.16.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
@ -255,7 +255,7 @@ jobs:
|
|||||||
if: type = cron
|
if: type = cron
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- azure-purge
|
- azure-purge
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
|||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.16-alpine as builder
|
FROM golang:1.17-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
|||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.16-alpine as builder
|
FROM golang:1.17-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
@ -12,6 +12,8 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
|
|||||||
| ------- | ------- | ----------- |
|
| ------- | ------- | ----------- |
|
||||||
| `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) |
|
| `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) |
|
||||||
| `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) |
|
| `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) |
|
||||||
|
| `Discv5` | 20191015 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2019-10-15_Discv5_audit_LeastAuthority.pdf) |
|
||||||
|
| `Discv5` | 20200124 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2020-01-24_DiscV5_audit_Cure53.pdf) |
|
||||||
|
|
||||||
## Reporting a Vulnerability
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{
|
|||||||
dst := reflect.ValueOf(v).Elem()
|
dst := reflect.ValueOf(v).Elem()
|
||||||
src := reflect.ValueOf(marshalledValues)
|
src := reflect.ValueOf(marshalledValues)
|
||||||
|
|
||||||
if dst.Kind() == reflect.Struct && src.Kind() != reflect.Struct {
|
if dst.Kind() == reflect.Struct {
|
||||||
return set(dst.Field(0), src)
|
return set(dst.Field(0), src)
|
||||||
}
|
}
|
||||||
return set(dst, src)
|
return set(dst, src)
|
||||||
|
@ -431,6 +431,9 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
|
|||||||
|
|
||||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||||
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
|
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
|
||||||
|
if log.Topics[0] != c.abi.Events[event].ID {
|
||||||
|
return fmt.Errorf("event signature mismatch")
|
||||||
|
}
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
|
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -447,6 +450,9 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
|
|||||||
|
|
||||||
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
||||||
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
|
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
|
||||||
|
if log.Topics[0] != c.abi.Events[event].ID {
|
||||||
|
return fmt.Errorf("event signature mismatch")
|
||||||
|
}
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
|
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -110,7 +110,7 @@ const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16
|
|||||||
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
||||||
hash := crypto.Keccak256Hash([]byte("testName"))
|
hash := crypto.Keccak256Hash([]byte("testName"))
|
||||||
topics := []common.Hash{
|
topics := []common.Hash{
|
||||||
common.HexToHash("0x0"),
|
crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")),
|
||||||
hash,
|
hash,
|
||||||
}
|
}
|
||||||
mockLog := newMockLog(topics, common.HexToHash("0x0"))
|
mockLog := newMockLog(topics, common.HexToHash("0x0"))
|
||||||
@ -135,7 +135,7 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
hash := crypto.Keccak256Hash(sliceBytes)
|
hash := crypto.Keccak256Hash(sliceBytes)
|
||||||
topics := []common.Hash{
|
topics := []common.Hash{
|
||||||
common.HexToHash("0x0"),
|
crypto.Keccak256Hash([]byte("received(string[],address,uint256,bytes)")),
|
||||||
hash,
|
hash,
|
||||||
}
|
}
|
||||||
mockLog := newMockLog(topics, common.HexToHash("0x0"))
|
mockLog := newMockLog(topics, common.HexToHash("0x0"))
|
||||||
@ -160,7 +160,7 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
hash := crypto.Keccak256Hash(arrBytes)
|
hash := crypto.Keccak256Hash(arrBytes)
|
||||||
topics := []common.Hash{
|
topics := []common.Hash{
|
||||||
common.HexToHash("0x0"),
|
crypto.Keccak256Hash([]byte("received(address[2],address,uint256,bytes)")),
|
||||||
hash,
|
hash,
|
||||||
}
|
}
|
||||||
mockLog := newMockLog(topics, common.HexToHash("0x0"))
|
mockLog := newMockLog(topics, common.HexToHash("0x0"))
|
||||||
@ -187,7 +187,7 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
|||||||
var functionTy [24]byte
|
var functionTy [24]byte
|
||||||
copy(functionTy[:], functionTyBytes[0:24])
|
copy(functionTy[:], functionTyBytes[0:24])
|
||||||
topics := []common.Hash{
|
topics := []common.Hash{
|
||||||
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
|
crypto.Keccak256Hash([]byte("received(function,address,uint256,bytes)")),
|
||||||
common.BytesToHash(functionTyBytes),
|
common.BytesToHash(functionTyBytes),
|
||||||
}
|
}
|
||||||
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
|
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
|
||||||
@ -208,7 +208,7 @@ func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
|||||||
bytes := []byte{1, 2, 3, 4, 5}
|
bytes := []byte{1, 2, 3, 4, 5}
|
||||||
hash := crypto.Keccak256Hash(bytes)
|
hash := crypto.Keccak256Hash(bytes)
|
||||||
topics := []common.Hash{
|
topics := []common.Hash{
|
||||||
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
|
crypto.Keccak256Hash([]byte("received(bytes,address,uint256,bytes)")),
|
||||||
hash,
|
hash,
|
||||||
}
|
}
|
||||||
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
|
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
|
||||||
|
@ -1785,6 +1785,77 @@ var bindTests = []struct {
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
|
// Test resolving single struct argument
|
||||||
|
{
|
||||||
|
`NewSingleStructArgument`,
|
||||||
|
`
|
||||||
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
contract NewSingleStructArgument {
|
||||||
|
struct MyStruct{
|
||||||
|
uint256 a;
|
||||||
|
uint256 b;
|
||||||
|
}
|
||||||
|
event StructEvent(MyStruct s);
|
||||||
|
function TestEvent() public {
|
||||||
|
emit StructEvent(MyStruct({a: 1, b: 2}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
[]string{"608060405234801561001057600080fd5b50610113806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806324ec1d3f14602d575b600080fd5b60336035565b005b7fb4b2ff75e30cb4317eaae16dd8a187dd89978df17565104caa6c2797caae27d460405180604001604052806001815260200160028152506040516078919060ba565b60405180910390a1565b6040820160008201516096600085018260ad565b50602082015160a7602085018260ad565b50505050565b60b48160d3565b82525050565b600060408201905060cd60008301846082565b92915050565b600081905091905056fea26469706673582212208823628796125bf9941ce4eda18da1be3cf2931b231708ab848e1bd7151c0c9a64736f6c63430008070033"},
|
||||||
|
[]string{`[{"anonymous":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"indexed":false,"internalType":"struct Test.MyStruct","name":"s","type":"tuple"}],"name":"StructEvent","type":"event"},{"inputs":[],"name":"TestEvent","outputs":[],"stateMutability":"nonpayable","type":"function"}]`},
|
||||||
|
`
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
|
`,
|
||||||
|
`
|
||||||
|
var (
|
||||||
|
key, _ = crypto.GenerateKey()
|
||||||
|
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
||||||
|
)
|
||||||
|
defer sim.Close()
|
||||||
|
|
||||||
|
_, _, d, err := DeployNewSingleStructArgument(user, sim)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to deploy contract %v", err)
|
||||||
|
}
|
||||||
|
sim.Commit()
|
||||||
|
|
||||||
|
_, err = d.TestEvent(user)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to call contract %v", err)
|
||||||
|
}
|
||||||
|
sim.Commit()
|
||||||
|
|
||||||
|
it, err := d.FilterStructEvent(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to filter contract event %v", err)
|
||||||
|
}
|
||||||
|
var count int
|
||||||
|
for it.Next() {
|
||||||
|
if it.Event.S.A.Cmp(big.NewInt(1)) != 0 {
|
||||||
|
t.Fatal("Unexpected contract event")
|
||||||
|
}
|
||||||
|
if it.Event.S.B.Cmp(big.NewInt(2)) != 0 {
|
||||||
|
t.Fatal("Unexpected contract event")
|
||||||
|
}
|
||||||
|
count += 1
|
||||||
|
}
|
||||||
|
if count != 1 {
|
||||||
|
t.Fatal("Unexpected contract event number")
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that packages generated by the binder can be successfully compiled and
|
// Tests that packages generated by the binder can be successfully compiled and
|
||||||
|
@ -762,20 +762,24 @@ func TestUnpackTuple(t *testing.T) {
|
|||||||
buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
|
buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
|
||||||
|
|
||||||
// If the result is single tuple, use struct as return value container directly.
|
// If the result is single tuple, use struct as return value container directly.
|
||||||
v := struct {
|
type v struct {
|
||||||
A *big.Int
|
A *big.Int
|
||||||
B *big.Int
|
B *big.Int
|
||||||
}{new(big.Int), new(big.Int)}
|
}
|
||||||
|
type r struct {
|
||||||
|
Result v
|
||||||
|
}
|
||||||
|
var ret0 = new(r)
|
||||||
|
err = abi.UnpackIntoInterface(ret0, "tuple", buff.Bytes())
|
||||||
|
|
||||||
err = abi.UnpackIntoInterface(&v, "tuple", buff.Bytes())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else {
|
} else {
|
||||||
if v.A.Cmp(big.NewInt(1)) != 0 {
|
if ret0.Result.A.Cmp(big.NewInt(1)) != 0 {
|
||||||
t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.A)
|
t.Errorf("unexpected value unpacked: want %x, got %x", 1, ret0.Result.A)
|
||||||
}
|
}
|
||||||
if v.B.Cmp(big.NewInt(-1)) != 0 {
|
if ret0.Result.B.Cmp(big.NewInt(-1)) != 0 {
|
||||||
t.Errorf("unexpected value unpacked: want %x, got %x", -1, v.B)
|
t.Errorf("unexpected value unpacked: want %x, got %x", -1, ret0.Result.B)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ func TestWatchNoDir(t *testing.T) {
|
|||||||
|
|
||||||
// Create ks but not the directory that it watches.
|
// Create ks but not the directory that it watches.
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
|
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
|
||||||
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
||||||
|
|
||||||
list := ks.Accounts()
|
list := ks.Accounts()
|
||||||
@ -322,7 +322,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
|
|
||||||
// Create a temporary kesytore to test with
|
// Create a temporary kesytore to test with
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
|
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
|
||||||
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
||||||
|
|
||||||
list := ks.Accounts()
|
list := ks.Accounts()
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build (darwin && !ios && cgo) || freebsd || (linux && !arm64) || netbsd || solaris
|
||||||
// +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris
|
// +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris
|
||||||
|
|
||||||
package keystore
|
package keystore
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build (darwin && !cgo) || ios || (linux && arm64) || windows || (!darwin && !freebsd && !linux && !netbsd && !solaris)
|
||||||
// +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris
|
// +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris
|
||||||
|
|
||||||
// This is the fallback implementation of directory watching.
|
// This is the fallback implementation of directory watching.
|
||||||
|
@ -25,6 +25,10 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// managerSubBufferSize determines how many incoming wallet events
|
||||||
|
// the manager will buffer in its channel.
|
||||||
|
const managerSubBufferSize = 50
|
||||||
|
|
||||||
// Config contains the settings of the global account manager.
|
// Config contains the settings of the global account manager.
|
||||||
//
|
//
|
||||||
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
|
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
|
||||||
@ -33,18 +37,27 @@ type Config struct {
|
|||||||
InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed
|
InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newBackendEvent lets the manager know it should
|
||||||
|
// track the given backend for wallet updates.
|
||||||
|
type newBackendEvent struct {
|
||||||
|
backend Backend
|
||||||
|
processed chan struct{} // Informs event emitter that backend has been integrated
|
||||||
|
}
|
||||||
|
|
||||||
// Manager is an overarching account manager that can communicate with various
|
// Manager is an overarching account manager that can communicate with various
|
||||||
// backends for signing transactions.
|
// backends for signing transactions.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
config *Config // Global account manager configurations
|
config *Config // Global account manager configurations
|
||||||
backends map[reflect.Type][]Backend // Index of backends currently registered
|
backends map[reflect.Type][]Backend // Index of backends currently registered
|
||||||
updaters []event.Subscription // Wallet update subscriptions for all backends
|
updaters []event.Subscription // Wallet update subscriptions for all backends
|
||||||
updates chan WalletEvent // Subscription sink for backend wallet changes
|
updates chan WalletEvent // Subscription sink for backend wallet changes
|
||||||
wallets []Wallet // Cache of all wallets from all registered backends
|
newBackends chan newBackendEvent // Incoming backends to be tracked by the manager
|
||||||
|
wallets []Wallet // Cache of all wallets from all registered backends
|
||||||
|
|
||||||
feed event.Feed // Wallet feed notifying of arrivals/departures
|
feed event.Feed // Wallet feed notifying of arrivals/departures
|
||||||
|
|
||||||
quit chan chan error
|
quit chan chan error
|
||||||
|
term chan struct{} // Channel is closed upon termination of the update loop
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,7 +70,7 @@ func NewManager(config *Config, backends ...Backend) *Manager {
|
|||||||
wallets = merge(wallets, backend.Wallets()...)
|
wallets = merge(wallets, backend.Wallets()...)
|
||||||
}
|
}
|
||||||
// Subscribe to wallet notifications from all backends
|
// Subscribe to wallet notifications from all backends
|
||||||
updates := make(chan WalletEvent, 4*len(backends))
|
updates := make(chan WalletEvent, managerSubBufferSize)
|
||||||
|
|
||||||
subs := make([]event.Subscription, len(backends))
|
subs := make([]event.Subscription, len(backends))
|
||||||
for i, backend := range backends {
|
for i, backend := range backends {
|
||||||
@ -65,12 +78,14 @@ func NewManager(config *Config, backends ...Backend) *Manager {
|
|||||||
}
|
}
|
||||||
// Assemble the account manager and return
|
// Assemble the account manager and return
|
||||||
am := &Manager{
|
am := &Manager{
|
||||||
config: config,
|
config: config,
|
||||||
backends: make(map[reflect.Type][]Backend),
|
backends: make(map[reflect.Type][]Backend),
|
||||||
updaters: subs,
|
updaters: subs,
|
||||||
updates: updates,
|
updates: updates,
|
||||||
wallets: wallets,
|
newBackends: make(chan newBackendEvent),
|
||||||
quit: make(chan chan error),
|
wallets: wallets,
|
||||||
|
quit: make(chan chan error),
|
||||||
|
term: make(chan struct{}),
|
||||||
}
|
}
|
||||||
for _, backend := range backends {
|
for _, backend := range backends {
|
||||||
kind := reflect.TypeOf(backend)
|
kind := reflect.TypeOf(backend)
|
||||||
@ -93,6 +108,14 @@ func (am *Manager) Config() *Config {
|
|||||||
return am.config
|
return am.config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddBackend starts the tracking of an additional backend for wallet updates.
|
||||||
|
// cmd/geth assumes once this func returns the backends have been already integrated.
|
||||||
|
func (am *Manager) AddBackend(backend Backend) {
|
||||||
|
done := make(chan struct{})
|
||||||
|
am.newBackends <- newBackendEvent{backend, done}
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
|
||||||
// update is the wallet event loop listening for notifications from the backends
|
// update is the wallet event loop listening for notifications from the backends
|
||||||
// and updating the cache of wallets.
|
// and updating the cache of wallets.
|
||||||
func (am *Manager) update() {
|
func (am *Manager) update() {
|
||||||
@ -122,10 +145,22 @@ func (am *Manager) update() {
|
|||||||
|
|
||||||
// Notify any listeners of the event
|
// Notify any listeners of the event
|
||||||
am.feed.Send(event)
|
am.feed.Send(event)
|
||||||
|
case event := <-am.newBackends:
|
||||||
|
am.lock.Lock()
|
||||||
|
// Update caches
|
||||||
|
backend := event.backend
|
||||||
|
am.wallets = merge(am.wallets, backend.Wallets()...)
|
||||||
|
am.updaters = append(am.updaters, backend.Subscribe(am.updates))
|
||||||
|
kind := reflect.TypeOf(backend)
|
||||||
|
am.backends[kind] = append(am.backends[kind], backend)
|
||||||
|
am.lock.Unlock()
|
||||||
|
close(event.processed)
|
||||||
case errc := <-am.quit:
|
case errc := <-am.quit:
|
||||||
// Manager terminating, return
|
// Manager terminating, return
|
||||||
errc <- nil
|
errc <- nil
|
||||||
|
// Signals event emitters the loop is not receiving values
|
||||||
|
// to prevent them from getting stuck.
|
||||||
|
close(am.term)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -133,6 +168,9 @@ func (am *Manager) update() {
|
|||||||
|
|
||||||
// Backends retrieves the backend(s) with the given type from the account manager.
|
// Backends retrieves the backend(s) with the given type from the account manager.
|
||||||
func (am *Manager) Backends(kind reflect.Type) []Backend {
|
func (am *Manager) Backends(kind reflect.Type) []Backend {
|
||||||
|
am.lock.RLock()
|
||||||
|
defer am.lock.RUnlock()
|
||||||
|
|
||||||
return am.backends[kind]
|
return am.backends[kind]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
60
appveyor.yml
60
appveyor.yml
@ -1,29 +1,57 @@
|
|||||||
os: Visual Studio 2019
|
|
||||||
clone_depth: 5
|
clone_depth: 5
|
||||||
version: "{branch}.{build}"
|
version: "{branch}.{build}"
|
||||||
|
|
||||||
|
image:
|
||||||
|
- Ubuntu
|
||||||
|
- Visual Studio 2019
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
matrix:
|
matrix:
|
||||||
# We use gcc from MSYS2 because it is the most recent compiler version available on
|
|
||||||
# AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is
|
|
||||||
# contained in PATH.
|
|
||||||
- GETH_ARCH: amd64
|
- GETH_ARCH: amd64
|
||||||
GETH_CC: C:\msys64\mingw64\bin\gcc.exe
|
GETH_MINGW: 'C:\msys64\mingw64'
|
||||||
PATH: C:\msys64\mingw64\bin;C:\Program Files (x86)\NSIS\;%PATH%
|
|
||||||
- GETH_ARCH: 386
|
- GETH_ARCH: 386
|
||||||
GETH_CC: C:\msys64\mingw32\bin\gcc.exe
|
GETH_MINGW: 'C:\msys64\mingw32'
|
||||||
PATH: C:\msys64\mingw32\bin;C:\Program Files (x86)\NSIS\;%PATH%
|
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- git submodule update --init --depth 1
|
- git submodule update --init --depth 1
|
||||||
- go version
|
- go version
|
||||||
- "%GETH_CC% --version"
|
|
||||||
|
|
||||||
build_script:
|
for:
|
||||||
- go run build\ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
|
# Linux has its own script without -arch and -cc.
|
||||||
|
# The linux builder also runs lint.
|
||||||
|
- matrix:
|
||||||
|
only:
|
||||||
|
- image: Ubuntu
|
||||||
|
build_script:
|
||||||
|
- go run build/ci.go lint
|
||||||
|
- go run build/ci.go install -dlgo
|
||||||
|
test_script:
|
||||||
|
- go run build/ci.go test -dlgo -coverage
|
||||||
|
|
||||||
after_build:
|
# linux/386 is disabled.
|
||||||
- go run build\ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
- matrix:
|
||||||
- go run build\ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
exclude:
|
||||||
|
- image: Ubuntu
|
||||||
|
GETH_ARCH: 386
|
||||||
|
|
||||||
test_script:
|
# Windows builds for amd64 + 386.
|
||||||
- go run build\ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage
|
- matrix:
|
||||||
|
only:
|
||||||
|
- image: Visual Studio 2019
|
||||||
|
environment:
|
||||||
|
# We use gcc from MSYS2 because it is the most recent compiler version available on
|
||||||
|
# AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is
|
||||||
|
# contained in PATH.
|
||||||
|
GETH_CC: '%GETH_MINGW%\bin\gcc.exe'
|
||||||
|
PATH: '%GETH_MINGW%\bin;C:\Program Files (x86)\NSIS\;%PATH%'
|
||||||
|
build_script:
|
||||||
|
- 'echo %GETH_ARCH%'
|
||||||
|
- 'echo %GETH_CC%'
|
||||||
|
- '%GETH_CC% --version'
|
||||||
|
- go run build/ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
|
||||||
|
after_build:
|
||||||
|
# Upload builds. Note that ci.go makes this a no-op PR builds.
|
||||||
|
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
||||||
|
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
||||||
|
test_script:
|
||||||
|
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage
|
||||||
|
@ -1,33 +1,37 @@
|
|||||||
# This file contains sha256 checksums of optional build dependencies.
|
# This file contains sha256 checksums of optional build dependencies.
|
||||||
|
|
||||||
ae4f6b6e2a1677d31817984655a762074b5356da50fb58722b99104870d43503 go1.16.4.src.tar.gz
|
3a70e5055509f347c0fb831ca07a2bf3b531068f349b14a3c652e9b5b67beb5d go1.17.src.tar.gz
|
||||||
18fe94775763db3878717393b6d41371b0b45206055e49b3838328120c977d13 go1.16.4.darwin-amd64.tar.gz
|
355bd544ce08d7d484d9d7de05a71b5c6f5bc10aa4b316688c2192aeb3dacfd1 go1.17.darwin-amd64.tar.gz
|
||||||
cb6b972cc42e669f3585c648198cd5b6f6d7a0811d413ad64b50c02ba06ccc3a go1.16.4.darwin-arm64.tar.gz
|
da4e3e3c194bf9eed081de8842a157120ef44a7a8d7c820201adae7b0e28b20b go1.17.darwin-arm64.tar.gz
|
||||||
cd1b146ef6e9006f27dd99e9687773e7fef30e8c985b7d41bff33e955a3bb53a go1.16.4.linux-386.tar.gz
|
6819a7a11b8351d5d5768f2fff666abde97577602394f132cb7f85b3a7151f05 go1.17.freebsd-386.tar.gz
|
||||||
7154e88f5a8047aad4b80ebace58a059e36e7e2e4eb3b383127a28c711b4ff59 go1.16.4.linux-amd64.tar.gz
|
15c184c83d99441d719da201b26256455eee85a808747c404b4183e9aa6c64b4 go1.17.freebsd-amd64.tar.gz
|
||||||
8b18eb05ddda2652d69ab1b1dd1f40dd731799f43c6a58b512ad01ae5b5bba21 go1.16.4.linux-arm64.tar.gz
|
c19e3227a6ac6329db91d1af77bbf239ccd760a259c16e6b9c932d527ff14848 go1.17.linux-386.tar.gz
|
||||||
a53391a800ddec749ee90d38992babb27b95cfb864027350c737b9aa8e069494 go1.16.4.linux-armv6l.tar.gz
|
6bf89fc4f5ad763871cf7eac80a2d594492de7a818303283f1366a7f6a30372d go1.17.linux-amd64.tar.gz
|
||||||
e75c0b114a09eb5499874162b208931dc260de0fedaeedac8621bf263c974605 go1.16.4.windows-386.zip
|
01a9af009ada22122d3fcb9816049c1d21842524b38ef5d5a0e2ee4b26d7c3e7 go1.17.linux-arm64.tar.gz
|
||||||
d40139b7ade8a3008e3240a6f86fe8f899a9c465c917e11dac8758af216f5eb0 go1.16.4.windows-amd64.zip
|
ae89d33f4e4acc222bdb04331933d5ece4ae71039812f6ccd7493cb3e8ddfb4e go1.17.linux-armv6l.tar.gz
|
||||||
7cf2bc8a175d6d656861165bfc554f92dc78d2abf5afe5631db3579555d97409 go1.16.4.freebsd-386.tar.gz
|
ee84350114d532bf15f096198c675aafae9ff091dc4cc69eb49e1817ff94dbd7 go1.17.linux-ppc64le.tar.gz
|
||||||
ccdd2b76de1941b60734408fda0d750aaa69330d8a07430eed4c56bdb3502f6f go1.16.4.freebsd-amd64.tar.gz
|
a50aaecf054f393575f969a9105d5c6864dd91afc5287d772449033fbafcf7e3 go1.17.linux-s390x.tar.gz
|
||||||
80cfac566e344096a8df8f37bbd21f89e76a6fbe601406565d71a87a665fc125 go1.16.4.linux-ppc64le.tar.gz
|
c5afdd2ea4969f2b44637e913b04f7c15265d7beb60924a28063722670a52feb go1.17.windows-386.zip
|
||||||
d6431881b3573dc29ecc24fbeab5e5ec25d8c9273aa543769c86a1a3bbac1ddf go1.16.4.linux-s390x.tar.gz
|
2a18bd65583e221be8b9b7c2fbe3696c40f6e27c2df689bbdcc939d49651d151 go1.17.windows-amd64.zip
|
||||||
|
5256f92f643d9022394ddc84de5c74fe8660c2151daaa199b12e60e542d694ae go1.17.windows-arm64.zip
|
||||||
|
|
||||||
7e9a47ab540aa3e8472fbf8120d28bed3b9d9cf625b955818e8bc69628d7187c golangci-lint-1.39.0-darwin-amd64.tar.gz
|
d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
|
||||||
574daa2c9c299b01672a6daeb1873b5f12e413cdb6dc0e30f2ff163956778064 golangci-lint-1.39.0-darwin-arm64.tar.gz
|
e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
|
||||||
6225f7014987324ab78e9b511f294e3f25be013728283c33918c67c8576d543e golangci-lint-1.39.0-freebsd-386.tar.gz
|
14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz
|
||||||
6b3e76e1e5eaf0159411c8e2727f8d533989d3bb19f10e9caa6e0b9619ee267d golangci-lint-1.39.0-freebsd-amd64.tar.gz
|
337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz
|
||||||
a301cacfff87ed9b00313d95278533c25a4527a06b040a17d969b4b7e1b8a90d golangci-lint-1.39.0-freebsd-armv7.tar.gz
|
6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz
|
||||||
25bfd96a29c3112f508d5e4fc860dbad7afce657233c343acfa20715717d51e7 golangci-lint-1.39.0-freebsd-armv6.tar.gz
|
878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz
|
||||||
9687e4ff15545cfc722b0e46107a94195166a505023b48a316579af25ad09505 golangci-lint-1.39.0-linux-armv7.tar.gz
|
42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz
|
||||||
a7fa7ab2bfc99cbe5e5bcbf5684f5a997f920afbbe2f253d2feb1001d5e3c8b3 golangci-lint-1.39.0-linux-armv6.tar.gz
|
6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz
|
||||||
c8f9634115beddb4ed9129c1f7ecd4c97c99d07aeef33e3707234097eeb51b7b golangci-lint-1.39.0-linux-mips64le.tar.gz
|
2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz
|
||||||
d1234c213b74751f1af413302dde0e9a6d4d29aecef034af7abb07dc1b6e887f golangci-lint-1.39.0-linux-arm64.tar.gz
|
08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz
|
||||||
df25d9267168323b163147acb823ab0215a8a3bb6898a4a9320afdfedde66817 golangci-lint-1.39.0-linux-386.tar.gz
|
c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz
|
||||||
1767e75fba357b7651b1a796d38453558f371c60af805505ec99e166908c04b5 golangci-lint-1.39.0-linux-ppc64le.tar.gz
|
3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz
|
||||||
25fd75bf3186b3d930ecae10185689968fd18fd8fa6f9f555d6beb04348c20f6 golangci-lint-1.39.0-linux-s390x.tar.gz
|
f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz
|
||||||
3a73aa7468087caa62673c8adea99b4e4dff846dc72707222db85f8679b40cbf golangci-lint-1.39.0-linux-amd64.tar.gz
|
1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz
|
||||||
578caceccf81739bda67dbfec52816709d03608c6878888ecdc0e186a094a41b golangci-lint-1.39.0-linux-mips64.tar.gz
|
8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz
|
||||||
494b66ba0e32c8ddf6c4f6b1d05729b110900f6017eda943057e43598c17d7a8 golangci-lint-1.39.0-windows-386.zip
|
5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz
|
||||||
52ec2e13a3cbb47147244dff8cfc35103563deb76e0459133058086fc35fb2c7 golangci-lint-1.39.0-windows-amd64.zip
|
e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip
|
||||||
|
7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip
|
||||||
|
59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip
|
||||||
|
65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip
|
||||||
|
14
build/ci.go
14
build/ci.go
@ -129,19 +129,13 @@ var (
|
|||||||
|
|
||||||
// Distros for which packages are created.
|
// Distros for which packages are created.
|
||||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||||
// Note: wily is unsupported because it was officially deprecated on Launchpad.
|
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
|
||||||
// Note: yakkety is unsupported because it was officially deprecated on Launchpad.
|
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy
|
||||||
// Note: zesty is unsupported because it was officially deprecated on Launchpad.
|
|
||||||
// Note: artful is unsupported because it was officially deprecated on Launchpad.
|
|
||||||
// Note: cosmic is unsupported because it was officially deprecated on Launchpad.
|
|
||||||
// Note: disco is unsupported because it was officially deprecated on Launchpad.
|
|
||||||
// Note: eoan is unsupported because it was officially deprecated on Launchpad.
|
|
||||||
debDistroGoBoots = map[string]string{
|
debDistroGoBoots = map[string]string{
|
||||||
"trusty": "golang-1.11",
|
"trusty": "golang-1.11",
|
||||||
"xenial": "golang-go",
|
"xenial": "golang-go",
|
||||||
"bionic": "golang-go",
|
"bionic": "golang-go",
|
||||||
"focal": "golang-go",
|
"focal": "golang-go",
|
||||||
"groovy": "golang-go",
|
|
||||||
"hirsute": "golang-go",
|
"hirsute": "golang-go",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,7 +147,7 @@ var (
|
|||||||
// This is the version of go that will be downloaded by
|
// This is the version of go that will be downloaded by
|
||||||
//
|
//
|
||||||
// go run ci.go install -dlgo
|
// go run ci.go install -dlgo
|
||||||
dlgoVersion = "1.16.4"
|
dlgoVersion = "1.17"
|
||||||
)
|
)
|
||||||
|
|
||||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||||
@ -330,7 +324,7 @@ func doLint(cmdline []string) {
|
|||||||
|
|
||||||
// downloadLinter downloads and unpacks golangci-lint.
|
// downloadLinter downloads and unpacks golangci-lint.
|
||||||
func downloadLinter(cachedir string) string {
|
func downloadLinter(cachedir string) string {
|
||||||
const version = "1.39.0"
|
const version = "1.42.0"
|
||||||
|
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
||||||
|
@ -39,16 +39,6 @@ type Chain struct {
|
|||||||
chainConfig *params.ChainConfig
|
chainConfig *params.ChainConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chain) WriteTo(writer io.Writer) error {
|
|
||||||
for _, block := range c.blocks {
|
|
||||||
if err := rlp.Encode(writer, block); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the length of the chain.
|
// Len returns the length of the chain.
|
||||||
func (c *Chain) Len() int {
|
func (c *Chain) Len() int {
|
||||||
return len(c.blocks)
|
return len(c.blocks)
|
||||||
|
@ -242,9 +242,17 @@ func (s *Suite) createSendAndRecvConns(isEth66 bool) (*Conn, *Conn, error) {
|
|||||||
return sendConn, recvConn, nil
|
return sendConn, recvConn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
|
||||||
|
if c.negotiatedProtoVersion == 66 {
|
||||||
|
_, msg := c.readAndServe66(chain, timeout)
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
return c.readAndServe65(chain, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
// readAndServe serves GetBlockHeaders requests while waiting
|
// readAndServe serves GetBlockHeaders requests while waiting
|
||||||
// on another message from the node.
|
// on another message from the node.
|
||||||
func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
|
func (c *Conn) readAndServe65(chain *Chain, timeout time.Duration) Message {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
for time.Since(start) < timeout {
|
for time.Since(start) < timeout {
|
||||||
c.SetReadDeadline(time.Now().Add(5 * time.Second))
|
c.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||||
@ -279,8 +287,8 @@ func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Mess
|
|||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
case *Ping:
|
case *Ping:
|
||||||
c.Write(&Pong{})
|
c.Write(&Pong{})
|
||||||
case *GetBlockHeaders:
|
case GetBlockHeaders:
|
||||||
headers, err := chain.GetHeaders(*msg)
|
headers, err := chain.GetHeaders(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errorf("could not get headers for inbound header request: %v", err)
|
return 0, errorf("could not get headers for inbound header request: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ func TestEthSuite(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not create new test suite: %v", err)
|
t.Fatalf("could not create new test suite: %v", err)
|
||||||
}
|
}
|
||||||
for _, test := range suite.AllEthTests() {
|
for _, test := range suite.Eth66Tests() {
|
||||||
t.Run(test.Name, func(t *testing.T) {
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
||||||
if result[0].Failed {
|
if result[0].Failed {
|
||||||
|
@ -208,7 +208,7 @@ Example:
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
When applying this, using a reward of `0x08`
|
When applying this, using a reward of `0x80`
|
||||||
Output:
|
Output:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
@ -46,7 +46,7 @@ func disasmCmd(ctx *cli.Context) error {
|
|||||||
case ctx.GlobalIsSet(InputFlag.Name):
|
case ctx.GlobalIsSet(InputFlag.Name):
|
||||||
in = ctx.GlobalString(InputFlag.Name)
|
in = ctx.GlobalString(InputFlag.Name)
|
||||||
default:
|
default:
|
||||||
return errors.New("Missing filename or --input value")
|
return errors.New("missing filename or --input value")
|
||||||
}
|
}
|
||||||
|
|
||||||
code := strings.TrimSpace(in)
|
code := strings.TrimSpace(in)
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/consensus/misc"
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
@ -46,13 +47,14 @@ type Prestate struct {
|
|||||||
// ExecutionResult contains the execution status after running a state test, any
|
// ExecutionResult contains the execution status after running a state test, any
|
||||||
// error that might have occurred and a dump of the final state if requested.
|
// error that might have occurred and a dump of the final state if requested.
|
||||||
type ExecutionResult struct {
|
type ExecutionResult struct {
|
||||||
StateRoot common.Hash `json:"stateRoot"`
|
StateRoot common.Hash `json:"stateRoot"`
|
||||||
TxRoot common.Hash `json:"txRoot"`
|
TxRoot common.Hash `json:"txRoot"`
|
||||||
ReceiptRoot common.Hash `json:"receiptRoot"`
|
ReceiptRoot common.Hash `json:"receiptRoot"`
|
||||||
LogsHash common.Hash `json:"logsHash"`
|
LogsHash common.Hash `json:"logsHash"`
|
||||||
Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
|
Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
|
||||||
Receipts types.Receipts `json:"receipts"`
|
Receipts types.Receipts `json:"receipts"`
|
||||||
Rejected []*rejectedTx `json:"rejected,omitempty"`
|
Rejected []*rejectedTx `json:"rejected,omitempty"`
|
||||||
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ommer struct {
|
type ommer struct {
|
||||||
@ -62,23 +64,28 @@ type ommer struct {
|
|||||||
|
|
||||||
//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
|
//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
|
||||||
type stEnv struct {
|
type stEnv struct {
|
||||||
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
|
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
|
||||||
Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"`
|
Difficulty *big.Int `json:"currentDifficulty"`
|
||||||
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
|
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
||||||
Number uint64 `json:"currentNumber" gencodec:"required"`
|
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
|
Number uint64 `json:"currentNumber" gencodec:"required"`
|
||||||
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
|
Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
|
||||||
Ommers []ommer `json:"ommers,omitempty"`
|
ParentTimestamp uint64 `json:"parentTimestamp,omitempty"`
|
||||||
BaseFee *big.Int `json:"currentBaseFee,omitempty"`
|
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
|
||||||
|
Ommers []ommer `json:"ommers,omitempty"`
|
||||||
|
BaseFee *big.Int `json:"currentBaseFee,omitempty"`
|
||||||
|
ParentUncleHash common.Hash `json:"parentUncleHash"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type stEnvMarshaling struct {
|
type stEnvMarshaling struct {
|
||||||
Coinbase common.UnprefixedAddress
|
Coinbase common.UnprefixedAddress
|
||||||
Difficulty *math.HexOrDecimal256
|
Difficulty *math.HexOrDecimal256
|
||||||
GasLimit math.HexOrDecimal64
|
ParentDifficulty *math.HexOrDecimal256
|
||||||
Number math.HexOrDecimal64
|
GasLimit math.HexOrDecimal64
|
||||||
Timestamp math.HexOrDecimal64
|
Number math.HexOrDecimal64
|
||||||
BaseFee *math.HexOrDecimal256
|
Timestamp math.HexOrDecimal64
|
||||||
|
ParentTimestamp math.HexOrDecimal64
|
||||||
|
BaseFee *math.HexOrDecimal256
|
||||||
}
|
}
|
||||||
|
|
||||||
type rejectedTx struct {
|
type rejectedTx struct {
|
||||||
@ -247,6 +254,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
LogsHash: rlpHash(statedb.Logs()),
|
LogsHash: rlpHash(statedb.Logs()),
|
||||||
Receipts: receipts,
|
Receipts: receipts,
|
||||||
Rejected: rejectedTxs,
|
Rejected: rejectedTxs,
|
||||||
|
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
|
||||||
}
|
}
|
||||||
return statedb, execRs, nil
|
return statedb, execRs, nil
|
||||||
}
|
}
|
||||||
@ -274,3 +282,23 @@ func rlpHash(x interface{}) (h common.Hash) {
|
|||||||
hw.Sum(h[:0])
|
hw.Sum(h[:0])
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// calcDifficulty is based on ethash.CalcDifficulty. This method is used in case
|
||||||
|
// the caller does not provide an explicit difficulty, but instead provides only
|
||||||
|
// parent timestamp + difficulty.
|
||||||
|
// Note: this method only works for ethash engine.
|
||||||
|
func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime uint64,
|
||||||
|
parentDifficulty *big.Int, parentUncleHash common.Hash) *big.Int {
|
||||||
|
uncleHash := parentUncleHash
|
||||||
|
if uncleHash == (common.Hash{}) {
|
||||||
|
uncleHash = types.EmptyUncleHash
|
||||||
|
}
|
||||||
|
parent := &types.Header{
|
||||||
|
ParentHash: common.Hash{},
|
||||||
|
UncleHash: uncleHash,
|
||||||
|
Difficulty: parentDifficulty,
|
||||||
|
Number: new(big.Int).SetUint64(number - 1),
|
||||||
|
Time: parentTime,
|
||||||
|
}
|
||||||
|
return ethash.CalcDifficulty(config, currentTime, parent)
|
||||||
|
}
|
||||||
|
@ -30,7 +30,7 @@ var (
|
|||||||
Name: "trace",
|
Name: "trace",
|
||||||
Usage: "Output full trace logs to files <txhash>.jsonl",
|
Usage: "Output full trace logs to files <txhash>.jsonl",
|
||||||
}
|
}
|
||||||
TraceDisableMemoryFlag = cli.BoolFlag{
|
TraceDisableMemoryFlag = cli.BoolTFlag{
|
||||||
Name: "trace.nomemory",
|
Name: "trace.nomemory",
|
||||||
Usage: "Disable full memory dump in traces",
|
Usage: "Disable full memory dump in traces",
|
||||||
}
|
}
|
||||||
@ -38,7 +38,7 @@ var (
|
|||||||
Name: "trace.nostack",
|
Name: "trace.nostack",
|
||||||
Usage: "Disable stack output in traces",
|
Usage: "Disable stack output in traces",
|
||||||
}
|
}
|
||||||
TraceDisableReturnDataFlag = cli.BoolFlag{
|
TraceDisableReturnDataFlag = cli.BoolTFlag{
|
||||||
Name: "trace.noreturndata",
|
Name: "trace.noreturndata",
|
||||||
Usage: "Disable return data output in traces",
|
Usage: "Disable return data output in traces",
|
||||||
}
|
}
|
||||||
|
@ -16,38 +16,47 @@ var _ = (*stEnvMarshaling)(nil)
|
|||||||
// MarshalJSON marshals as JSON.
|
// MarshalJSON marshals as JSON.
|
||||||
func (s stEnv) MarshalJSON() ([]byte, error) {
|
func (s stEnv) MarshalJSON() ([]byte, error) {
|
||||||
type stEnv struct {
|
type stEnv struct {
|
||||||
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||||
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||||
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
|
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||||
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
|
Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
|
||||||
Ommers []ommer `json:"ommers,omitempty"`
|
ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
|
||||||
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
|
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
|
||||||
|
Ommers []ommer `json:"ommers,omitempty"`
|
||||||
|
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
|
||||||
|
ParentUncleHash common.Hash `json:"parentUncleHash"`
|
||||||
}
|
}
|
||||||
var enc stEnv
|
var enc stEnv
|
||||||
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
||||||
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
|
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
|
||||||
|
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
|
||||||
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
|
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
|
||||||
enc.Number = math.HexOrDecimal64(s.Number)
|
enc.Number = math.HexOrDecimal64(s.Number)
|
||||||
enc.Timestamp = math.HexOrDecimal64(s.Timestamp)
|
enc.Timestamp = math.HexOrDecimal64(s.Timestamp)
|
||||||
|
enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp)
|
||||||
enc.BlockHashes = s.BlockHashes
|
enc.BlockHashes = s.BlockHashes
|
||||||
enc.Ommers = s.Ommers
|
enc.Ommers = s.Ommers
|
||||||
enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee)
|
enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee)
|
||||||
|
enc.ParentUncleHash = s.ParentUncleHash
|
||||||
return json.Marshal(&enc)
|
return json.Marshal(&enc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals from JSON.
|
// UnmarshalJSON unmarshals from JSON.
|
||||||
func (s *stEnv) UnmarshalJSON(input []byte) error {
|
func (s *stEnv) UnmarshalJSON(input []byte) error {
|
||||||
type stEnv struct {
|
type stEnv struct {
|
||||||
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||||
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||||
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
|
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||||
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
|
Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
|
||||||
Ommers []ommer `json:"ommers,omitempty"`
|
ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
|
||||||
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
|
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
|
||||||
|
Ommers []ommer `json:"ommers,omitempty"`
|
||||||
|
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
|
||||||
|
ParentUncleHash *common.Hash `json:"parentUncleHash"`
|
||||||
}
|
}
|
||||||
var dec stEnv
|
var dec stEnv
|
||||||
if err := json.Unmarshal(input, &dec); err != nil {
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
@ -57,10 +66,12 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
|||||||
return errors.New("missing required field 'currentCoinbase' for stEnv")
|
return errors.New("missing required field 'currentCoinbase' for stEnv")
|
||||||
}
|
}
|
||||||
s.Coinbase = common.Address(*dec.Coinbase)
|
s.Coinbase = common.Address(*dec.Coinbase)
|
||||||
if dec.Difficulty == nil {
|
if dec.Difficulty != nil {
|
||||||
return errors.New("missing required field 'currentDifficulty' for stEnv")
|
s.Difficulty = (*big.Int)(dec.Difficulty)
|
||||||
|
}
|
||||||
|
if dec.ParentDifficulty != nil {
|
||||||
|
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
|
||||||
}
|
}
|
||||||
s.Difficulty = (*big.Int)(dec.Difficulty)
|
|
||||||
if dec.GasLimit == nil {
|
if dec.GasLimit == nil {
|
||||||
return errors.New("missing required field 'currentGasLimit' for stEnv")
|
return errors.New("missing required field 'currentGasLimit' for stEnv")
|
||||||
}
|
}
|
||||||
@ -73,6 +84,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
|||||||
return errors.New("missing required field 'currentTimestamp' for stEnv")
|
return errors.New("missing required field 'currentTimestamp' for stEnv")
|
||||||
}
|
}
|
||||||
s.Timestamp = uint64(*dec.Timestamp)
|
s.Timestamp = uint64(*dec.Timestamp)
|
||||||
|
if dec.ParentTimestamp != nil {
|
||||||
|
s.ParentTimestamp = uint64(*dec.ParentTimestamp)
|
||||||
|
}
|
||||||
if dec.BlockHashes != nil {
|
if dec.BlockHashes != nil {
|
||||||
s.BlockHashes = dec.BlockHashes
|
s.BlockHashes = dec.BlockHashes
|
||||||
}
|
}
|
||||||
@ -82,5 +96,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
|||||||
if dec.BaseFee != nil {
|
if dec.BaseFee != nil {
|
||||||
s.BaseFee = (*big.Int)(dec.BaseFee)
|
s.BaseFee = (*big.Int)(dec.BaseFee)
|
||||||
}
|
}
|
||||||
|
if dec.ParentUncleHash != nil {
|
||||||
|
s.ParentUncleHash = *dec.ParentUncleHash
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
136
cmd/evm/internal/t8ntool/transaction.go
Normal file
136
cmd/evm/internal/t8ntool/transaction.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type result struct {
|
||||||
|
Error error
|
||||||
|
Address common.Address
|
||||||
|
Hash common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON marshals as JSON with a hash.
|
||||||
|
func (r *result) MarshalJSON() ([]byte, error) {
|
||||||
|
type xx struct {
|
||||||
|
Error string `json:"error,omitempty"`
|
||||||
|
Address *common.Address `json:"address,omitempty"`
|
||||||
|
Hash *common.Hash `json:"hash,omitempty"`
|
||||||
|
}
|
||||||
|
var out xx
|
||||||
|
if r.Error != nil {
|
||||||
|
out.Error = r.Error.Error()
|
||||||
|
}
|
||||||
|
if r.Address != (common.Address{}) {
|
||||||
|
out.Address = &r.Address
|
||||||
|
}
|
||||||
|
if r.Hash != (common.Hash{}) {
|
||||||
|
out.Hash = &r.Hash
|
||||||
|
}
|
||||||
|
return json.Marshal(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Transaction(ctx *cli.Context) error {
|
||||||
|
// Configure the go-ethereum logger
|
||||||
|
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||||
|
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
||||||
|
log.Root().SetHandler(glogger)
|
||||||
|
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
// We need to load the transactions. May be either in stdin input or in files.
|
||||||
|
// Check if anything needs to be read from stdin
|
||||||
|
var (
|
||||||
|
txStr = ctx.String(InputTxsFlag.Name)
|
||||||
|
inputData = &input{}
|
||||||
|
chainConfig *params.ChainConfig
|
||||||
|
)
|
||||||
|
// Construct the chainconfig
|
||||||
|
if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
||||||
|
return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
|
||||||
|
} else {
|
||||||
|
chainConfig = cConf
|
||||||
|
}
|
||||||
|
// Set the chain id
|
||||||
|
chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
|
||||||
|
var body hexutil.Bytes
|
||||||
|
if txStr == stdinSelector {
|
||||||
|
decoder := json.NewDecoder(os.Stdin)
|
||||||
|
if err := decoder.Decode(inputData); err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
|
||||||
|
}
|
||||||
|
// Decode the body of already signed transactions
|
||||||
|
body = common.FromHex(inputData.TxRlp)
|
||||||
|
} else {
|
||||||
|
// Read input from file
|
||||||
|
inFile, err := os.Open(txStr)
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
|
||||||
|
}
|
||||||
|
defer inFile.Close()
|
||||||
|
decoder := json.NewDecoder(inFile)
|
||||||
|
if strings.HasSuffix(txStr, ".rlp") {
|
||||||
|
if err := decoder.Decode(&body); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return NewError(ErrorIO, errors.New("only rlp supported"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
signer := types.MakeSigner(chainConfig, new(big.Int))
|
||||||
|
// We now have the transactions in 'body', which is supposed to be an
|
||||||
|
// rlp list of transactions
|
||||||
|
it, err := rlp.NewListIterator([]byte(body))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var results []result
|
||||||
|
for it.Next() {
|
||||||
|
var tx types.Transaction
|
||||||
|
err := rlp.DecodeBytes(it.Value(), &tx)
|
||||||
|
if err != nil {
|
||||||
|
results = append(results, result{Error: err})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sender, err := types.Sender(signer, &tx)
|
||||||
|
if err != nil {
|
||||||
|
results = append(results, result{Error: err})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
results = append(results, result{Address: sender, Hash: tx.Hash()})
|
||||||
|
}
|
||||||
|
out, err := json.MarshalIndent(results, "", " ")
|
||||||
|
fmt.Println(string(out))
|
||||||
|
return err
|
||||||
|
}
|
@ -65,10 +65,15 @@ func (n *NumberedError) Error() string {
|
|||||||
return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error())
|
return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NumberedError) Code() int {
|
func (n *NumberedError) ExitCode() int {
|
||||||
return n.errorCode
|
return n.errorCode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// compile-time conformance test
|
||||||
|
var (
|
||||||
|
_ cli.ExitCoder = (*NumberedError)(nil)
|
||||||
|
)
|
||||||
|
|
||||||
type input struct {
|
type input struct {
|
||||||
Alloc core.GenesisAlloc `json:"alloc,omitempty"`
|
Alloc core.GenesisAlloc `json:"alloc,omitempty"`
|
||||||
Env *stEnv `json:"env,omitempty"`
|
Env *stEnv `json:"env,omitempty"`
|
||||||
@ -76,7 +81,7 @@ type input struct {
|
|||||||
TxRlp string `json:"txsRlp,omitempty"`
|
TxRlp string `json:"txsRlp,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func Main(ctx *cli.Context) error {
|
func Transition(ctx *cli.Context) error {
|
||||||
// Configure the go-ethereum logger
|
// Configure the go-ethereum logger
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
||||||
@ -102,10 +107,10 @@ func Main(ctx *cli.Context) error {
|
|||||||
if ctx.Bool(TraceFlag.Name) {
|
if ctx.Bool(TraceFlag.Name) {
|
||||||
// Configure the EVM logger
|
// Configure the EVM logger
|
||||||
logConfig := &vm.LogConfig{
|
logConfig := &vm.LogConfig{
|
||||||
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
||||||
DisableMemory: ctx.Bool(TraceDisableMemoryFlag.Name),
|
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name),
|
||||||
DisableReturnData: ctx.Bool(TraceDisableReturnDataFlag.Name),
|
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name),
|
||||||
Debug: true,
|
Debug: true,
|
||||||
}
|
}
|
||||||
var prevFile *os.File
|
var prevFile *os.File
|
||||||
// This one closes the last file
|
// This one closes the last file
|
||||||
@ -252,6 +257,20 @@ func Main(ctx *cli.Context) error {
|
|||||||
return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
|
return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if env := prestate.Env; env.Difficulty == nil {
|
||||||
|
// If difficulty was not provided by caller, we need to calculate it.
|
||||||
|
switch {
|
||||||
|
case env.ParentDifficulty == nil:
|
||||||
|
return NewError(ErrorVMConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
|
||||||
|
case env.Number == 0:
|
||||||
|
return NewError(ErrorVMConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
|
||||||
|
case env.Timestamp <= env.ParentTimestamp:
|
||||||
|
return NewError(ErrorVMConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
|
||||||
|
env.Timestamp, env.ParentTimestamp))
|
||||||
|
}
|
||||||
|
prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
|
||||||
|
env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash)
|
||||||
|
}
|
||||||
// Run the test and aggregate the result
|
// Run the test and aggregate the result
|
||||||
s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
|
s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -395,7 +414,7 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(stdOutObject) > 0 {
|
if len(stdOutObject) > 0 {
|
||||||
b, err := json.MarshalIndent(stdOutObject, "", " ")
|
b, err := json.MarshalIndent(stdOutObject, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
||||||
}
|
}
|
||||||
@ -403,7 +422,7 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
|
|||||||
os.Stdout.Write([]byte("\n"))
|
os.Stdout.Write([]byte("\n"))
|
||||||
}
|
}
|
||||||
if len(stdErrObject) > 0 {
|
if len(stdErrObject) > 0 {
|
||||||
b, err := json.MarshalIndent(stdErrObject, "", " ")
|
b, err := json.MarshalIndent(stdErrObject, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ var (
|
|||||||
Name: "receiver",
|
Name: "receiver",
|
||||||
Usage: "The transaction receiver (execution context)",
|
Usage: "The transaction receiver (execution context)",
|
||||||
}
|
}
|
||||||
DisableMemoryFlag = cli.BoolFlag{
|
DisableMemoryFlag = cli.BoolTFlag{
|
||||||
Name: "nomemory",
|
Name: "nomemory",
|
||||||
Usage: "disable memory output",
|
Usage: "disable memory output",
|
||||||
}
|
}
|
||||||
@ -125,9 +125,9 @@ var (
|
|||||||
Name: "nostorage",
|
Name: "nostorage",
|
||||||
Usage: "disable storage output",
|
Usage: "disable storage output",
|
||||||
}
|
}
|
||||||
DisableReturnDataFlag = cli.BoolFlag{
|
DisableReturnDataFlag = cli.BoolTFlag{
|
||||||
Name: "noreturndata",
|
Name: "noreturndata",
|
||||||
Usage: "disable return data output",
|
Usage: "enable return data output",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -135,7 +135,7 @@ var stateTransitionCommand = cli.Command{
|
|||||||
Name: "transition",
|
Name: "transition",
|
||||||
Aliases: []string{"t8n"},
|
Aliases: []string{"t8n"},
|
||||||
Usage: "executes a full state transition",
|
Usage: "executes a full state transition",
|
||||||
Action: t8ntool.Main,
|
Action: t8ntool.Transition,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
t8ntool.TraceFlag,
|
t8ntool.TraceFlag,
|
||||||
t8ntool.TraceDisableMemoryFlag,
|
t8ntool.TraceDisableMemoryFlag,
|
||||||
@ -154,6 +154,18 @@ var stateTransitionCommand = cli.Command{
|
|||||||
t8ntool.VerbosityFlag,
|
t8ntool.VerbosityFlag,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
var transactionCommand = cli.Command{
|
||||||
|
Name: "transaction",
|
||||||
|
Aliases: []string{"t9n"},
|
||||||
|
Usage: "performs transaction validation",
|
||||||
|
Action: t8ntool.Transaction,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
t8ntool.InputTxsFlag,
|
||||||
|
t8ntool.ChainIDFlag,
|
||||||
|
t8ntool.ForknameFlag,
|
||||||
|
t8ntool.VerbosityFlag,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
app.Flags = []cli.Flag{
|
app.Flags = []cli.Flag{
|
||||||
@ -187,6 +199,7 @@ func init() {
|
|||||||
runCommand,
|
runCommand,
|
||||||
stateTestCommand,
|
stateTestCommand,
|
||||||
stateTransitionCommand,
|
stateTransitionCommand,
|
||||||
|
transactionCommand,
|
||||||
}
|
}
|
||||||
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
|
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
|
||||||
}
|
}
|
||||||
@ -195,7 +208,7 @@ func main() {
|
|||||||
if err := app.Run(os.Args); err != nil {
|
if err := app.Run(os.Args); err != nil {
|
||||||
code := 1
|
code := 1
|
||||||
if ec, ok := err.(*t8ntool.NumberedError); ok {
|
if ec, ok := err.(*t8ntool.NumberedError); ok {
|
||||||
code = ec.Code()
|
code = ec.ExitCode()
|
||||||
}
|
}
|
||||||
fmt.Fprintln(os.Stderr, err)
|
fmt.Fprintln(os.Stderr, err)
|
||||||
os.Exit(code)
|
os.Exit(code)
|
||||||
|
@ -108,11 +108,11 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||||
log.Root().SetHandler(glogger)
|
log.Root().SetHandler(glogger)
|
||||||
logconfig := &vm.LogConfig{
|
logconfig := &vm.LogConfig{
|
||||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||||
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
||||||
DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
|
EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
|
||||||
Debug: ctx.GlobalBool(DebugFlag.Name),
|
Debug: ctx.GlobalBool(DebugFlag.Name),
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -59,10 +59,10 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||||||
|
|
||||||
// Configure the EVM logger
|
// Configure the EVM logger
|
||||||
config := &vm.LogConfig{
|
config := &vm.LogConfig{
|
||||||
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
|
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
|
||||||
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
|
||||||
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
|
||||||
DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
|
EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
tracer vm.Tracer
|
tracer vm.Tracer
|
||||||
|
292
cmd/evm/t8n_test.go
Normal file
292
cmd/evm/t8n_test.go
Normal file
@ -0,0 +1,292 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/reexec"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
// Run the app if we've been exec'd as "ethkey-test" in runEthkey.
|
||||||
|
reexec.Register("evm-test", func() {
|
||||||
|
if err := app.Run(os.Args); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
})
|
||||||
|
// check if we have been reexec'd
|
||||||
|
if reexec.Init() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
type testT8n struct {
|
||||||
|
*cmdtest.TestCmd
|
||||||
|
}
|
||||||
|
|
||||||
|
type t8nInput struct {
|
||||||
|
inAlloc string
|
||||||
|
inTxs string
|
||||||
|
inEnv string
|
||||||
|
stFork string
|
||||||
|
stReward string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *t8nInput) get(base string) []string {
|
||||||
|
var out []string
|
||||||
|
if opt := args.inAlloc; opt != "" {
|
||||||
|
out = append(out, "--input.alloc")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
if opt := args.inTxs; opt != "" {
|
||||||
|
out = append(out, "--input.txs")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
if opt := args.inEnv; opt != "" {
|
||||||
|
out = append(out, "--input.env")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
if opt := args.stFork; opt != "" {
|
||||||
|
out = append(out, "--state.fork", opt)
|
||||||
|
}
|
||||||
|
if opt := args.stReward; opt != "" {
|
||||||
|
out = append(out, "--state.reward", opt)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
type t8nOutput struct {
|
||||||
|
alloc bool
|
||||||
|
result bool
|
||||||
|
body bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *t8nOutput) get() (out []string) {
|
||||||
|
if args.body {
|
||||||
|
out = append(out, "--output.body", "stdout")
|
||||||
|
} else {
|
||||||
|
out = append(out, "--output.body", "") // empty means ignore
|
||||||
|
}
|
||||||
|
if args.result {
|
||||||
|
out = append(out, "--output.result", "stdout")
|
||||||
|
} else {
|
||||||
|
out = append(out, "--output.result", "")
|
||||||
|
}
|
||||||
|
if args.alloc {
|
||||||
|
out = append(out, "--output.alloc", "stdout")
|
||||||
|
} else {
|
||||||
|
out = append(out, "--output.alloc", "")
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestT8n(t *testing.T) {
|
||||||
|
tt := new(testT8n)
|
||||||
|
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||||
|
for i, tc := range []struct {
|
||||||
|
base string
|
||||||
|
input t8nInput
|
||||||
|
output t8nOutput
|
||||||
|
expExitCode int
|
||||||
|
expOut string
|
||||||
|
}{
|
||||||
|
{ // Test exit (3) on bad config
|
||||||
|
base: "./testdata/1",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "Frontier+1346", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{alloc: true, result: true},
|
||||||
|
expExitCode: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
base: "./testdata/1",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "Byzantium", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{alloc: true, result: true},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // blockhash test
|
||||||
|
base: "./testdata/3",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "Berlin", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{alloc: true, result: true},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // missing blockhash test
|
||||||
|
base: "./testdata/4",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "Berlin", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{alloc: true, result: true},
|
||||||
|
expExitCode: 4,
|
||||||
|
},
|
||||||
|
{ // Ommer test
|
||||||
|
base: "./testdata/5",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "Byzantium", "0x80",
|
||||||
|
},
|
||||||
|
output: t8nOutput{alloc: true, result: true},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // Sign json transactions
|
||||||
|
base: "./testdata/13",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "London", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{body: true},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // Already signed transactions
|
||||||
|
base: "./testdata/13",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "signed_txs.rlp", "env.json", "London", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp2.json",
|
||||||
|
},
|
||||||
|
{ // Difficulty calculation - no uncles
|
||||||
|
base: "./testdata/14",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.json", "London", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // Difficulty calculation - with uncles
|
||||||
|
base: "./testdata/14",
|
||||||
|
input: t8nInput{
|
||||||
|
"alloc.json", "txs.json", "env.uncles.json", "London", "",
|
||||||
|
},
|
||||||
|
output: t8nOutput{result: true},
|
||||||
|
expOut: "exp2.json",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
|
||||||
|
args := []string{"t8n"}
|
||||||
|
args = append(args, tc.output.get()...)
|
||||||
|
args = append(args, tc.input.get(tc.base)...)
|
||||||
|
tt.Run("evm-test", args...)
|
||||||
|
tt.Logf("args: %v\n", strings.Join(args, " "))
|
||||||
|
// Compare the expected output, if provided
|
||||||
|
if tc.expOut != "" {
|
||||||
|
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("test %d: could not read expected output: %v", i, err)
|
||||||
|
}
|
||||||
|
have := tt.Output()
|
||||||
|
ok, err := cmpJson(have, want)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
t.Fatalf("test %d, json parsing failed: %v", i, err)
|
||||||
|
case !ok:
|
||||||
|
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tt.WaitExit()
|
||||||
|
if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
|
||||||
|
t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type t9nInput struct {
|
||||||
|
inTxs string
|
||||||
|
stFork string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *t9nInput) get(base string) []string {
|
||||||
|
var out []string
|
||||||
|
if opt := args.inTxs; opt != "" {
|
||||||
|
out = append(out, "--input.txs")
|
||||||
|
out = append(out, fmt.Sprintf("%v/%v", base, opt))
|
||||||
|
}
|
||||||
|
if opt := args.stFork; opt != "" {
|
||||||
|
out = append(out, "--state.fork", opt)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestT9n(t *testing.T) {
|
||||||
|
tt := new(testT8n)
|
||||||
|
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||||
|
for i, tc := range []struct {
|
||||||
|
base string
|
||||||
|
input t9nInput
|
||||||
|
expExitCode int
|
||||||
|
expOut string
|
||||||
|
}{
|
||||||
|
{ // London txs on homestead
|
||||||
|
base: "./testdata/15",
|
||||||
|
input: t9nInput{
|
||||||
|
inTxs: "signed_txs.rlp",
|
||||||
|
stFork: "Homestead",
|
||||||
|
},
|
||||||
|
expOut: "exp.json",
|
||||||
|
},
|
||||||
|
{ // London txs on homestead
|
||||||
|
base: "./testdata/15",
|
||||||
|
input: t9nInput{
|
||||||
|
inTxs: "signed_txs.rlp",
|
||||||
|
stFork: "London",
|
||||||
|
},
|
||||||
|
expOut: "exp2.json",
|
||||||
|
},
|
||||||
|
{ // An RLP list (a blockheader really)
|
||||||
|
base: "./testdata/15",
|
||||||
|
input: t9nInput{
|
||||||
|
inTxs: "blockheader.rlp",
|
||||||
|
stFork: "London",
|
||||||
|
},
|
||||||
|
expOut: "exp3.json",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
|
||||||
|
args := []string{"t9n"}
|
||||||
|
args = append(args, tc.input.get(tc.base)...)
|
||||||
|
|
||||||
|
tt.Run("evm-test", args...)
|
||||||
|
tt.Logf("args:\n go run . %v\n", strings.Join(args, " "))
|
||||||
|
// Compare the expected output, if provided
|
||||||
|
if tc.expOut != "" {
|
||||||
|
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("test %d: could not read expected output: %v", i, err)
|
||||||
|
}
|
||||||
|
have := tt.Output()
|
||||||
|
ok, err := cmpJson(have, want)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
t.Logf(string(have))
|
||||||
|
t.Fatalf("test %d, json parsing failed: %v", i, err)
|
||||||
|
case !ok:
|
||||||
|
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tt.WaitExit()
|
||||||
|
if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
|
||||||
|
t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cmpJson compares the JSON in two byte slices.
|
||||||
|
func cmpJson(a, b []byte) (bool, error) {
|
||||||
|
var j, j2 interface{}
|
||||||
|
if err := json.Unmarshal(a, &j); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(b, &j2); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return reflect.DeepEqual(j2, j), nil
|
||||||
|
}
|
43
cmd/evm/testdata/1/exp.json
vendored
Normal file
43
cmd/evm/testdata/1/exp.json
vendored
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
{
|
||||||
|
"alloc": {
|
||||||
|
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
|
||||||
|
"balance": "0xfeed1a9d",
|
||||||
|
"nonce": "0x1"
|
||||||
|
},
|
||||||
|
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0x5ffd4878be161d74",
|
||||||
|
"nonce": "0xac"
|
||||||
|
},
|
||||||
|
"0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0xa410"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
|
||||||
|
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
|
||||||
|
"receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [
|
||||||
|
{
|
||||||
|
"root": "0x",
|
||||||
|
"status": "0x1",
|
||||||
|
"cumulativeGasUsed": "0x5208",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"logs": null,
|
||||||
|
"transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
|
||||||
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
|
"gasUsed": "0x5208",
|
||||||
|
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"transactionIndex": "0x0"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"rejected": [
|
||||||
|
{
|
||||||
|
"index": 1,
|
||||||
|
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"currentDifficulty": "0x20000"
|
||||||
|
}
|
||||||
|
}
|
3
cmd/evm/testdata/13/exp.json
vendored
Normal file
3
cmd/evm/testdata/13/exp.json
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
"body": "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
|
||||||
|
}
|
38
cmd/evm/testdata/13/exp2.json
vendored
Normal file
38
cmd/evm/testdata/13/exp2.json
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61",
|
||||||
|
"txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d",
|
||||||
|
"receiptRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [
|
||||||
|
{
|
||||||
|
"type": "0x2",
|
||||||
|
"root": "0x",
|
||||||
|
"status": "0x0",
|
||||||
|
"cumulativeGasUsed": "0x84d0",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"logs": null,
|
||||||
|
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
|
||||||
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
|
"gasUsed": "0x84d0",
|
||||||
|
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"transactionIndex": "0x0"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "0x2",
|
||||||
|
"root": "0x",
|
||||||
|
"status": "0x0",
|
||||||
|
"cumulativeGasUsed": "0x109a0",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"logs": null,
|
||||||
|
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
|
||||||
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
|
"gasUsed": "0x84d0",
|
||||||
|
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"transactionIndex": "0x1"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"currentDifficulty": "0x20000"
|
||||||
|
}
|
||||||
|
}
|
1
cmd/evm/testdata/13/signed_txs.rlp
vendored
Normal file
1
cmd/evm/testdata/13/signed_txs.rlp
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
|
12
cmd/evm/testdata/14/alloc.json
vendored
Normal file
12
cmd/evm/testdata/14/alloc.json
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0x5ffd4878be161d74",
|
||||||
|
"code": "0x",
|
||||||
|
"nonce": "0xac",
|
||||||
|
"storage": {}
|
||||||
|
},
|
||||||
|
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
|
||||||
|
"balance": "0xfeedbead",
|
||||||
|
"nonce" : "0x00"
|
||||||
|
}
|
||||||
|
}
|
9
cmd/evm/testdata/14/env.json
vendored
Normal file
9
cmd/evm/testdata/14/env.json
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
|
"currentGasLimit": "0x750a163df65e8a",
|
||||||
|
"currentBaseFee": "0x500",
|
||||||
|
"currentNumber": "12800000",
|
||||||
|
"currentTimestamp": "100015",
|
||||||
|
"parentTimestamp" : "99999",
|
||||||
|
"parentDifficulty" : "0x2000000000000"
|
||||||
|
}
|
10
cmd/evm/testdata/14/env.uncles.json
vendored
Normal file
10
cmd/evm/testdata/14/env.uncles.json
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||||
|
"currentGasLimit": "0x750a163df65e8a",
|
||||||
|
"currentBaseFee": "0x500",
|
||||||
|
"currentNumber": "12800000",
|
||||||
|
"currentTimestamp": "100035",
|
||||||
|
"parentTimestamp" : "99999",
|
||||||
|
"parentDifficulty" : "0x2000000000000",
|
||||||
|
"parentUncleHash" : "0x000000000000000000000000000000000000000000000000000000000000beef"
|
||||||
|
}
|
11
cmd/evm/testdata/14/exp.json
vendored
Normal file
11
cmd/evm/testdata/14/exp.json
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"currentDifficulty": "0x2000020000000",
|
||||||
|
"receipts": []
|
||||||
|
}
|
||||||
|
}
|
11
cmd/evm/testdata/14/exp2.json
vendored
Normal file
11
cmd/evm/testdata/14/exp2.json
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [],
|
||||||
|
"currentDifficulty": "0x1ff8020000000"
|
||||||
|
}
|
||||||
|
}
|
41
cmd/evm/testdata/14/readme.md
vendored
Normal file
41
cmd/evm/testdata/14/readme.md
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
## Difficulty calculation
|
||||||
|
|
||||||
|
This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller.
|
||||||
|
|
||||||
|
Calculating it (with an empty set of txs) using `London` rules (and no provided unclehash for the parent block):
|
||||||
|
```
|
||||||
|
[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=London
|
||||||
|
INFO [08-30|20:43:09.352] Trie dumping started root=6f0588..7f4bdc
|
||||||
|
INFO [08-30|20:43:09.352] Trie dumping complete accounts=2 elapsed="82.533µs"
|
||||||
|
INFO [08-30|20:43:09.352] Wrote file file=alloc.json
|
||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [],
|
||||||
|
"currentDifficulty": "0x2000020000000"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Same thing, but this time providing a non-empty (and non-`emptyKeccak`) unclehash, which leads to a slightly different result:
|
||||||
|
```
|
||||||
|
[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.uncles.json --output.result=stdout --state.fork=London
|
||||||
|
INFO [08-30|20:44:33.102] Trie dumping started root=6f0588..7f4bdc
|
||||||
|
INFO [08-30|20:44:33.102] Trie dumping complete accounts=2 elapsed="72.91µs"
|
||||||
|
INFO [08-30|20:44:33.102] Wrote file file=alloc.json
|
||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [],
|
||||||
|
"currentDifficulty": "0x1ff8020000000"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
1
cmd/evm/testdata/14/txs.json
vendored
Normal file
1
cmd/evm/testdata/14/txs.json
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
[]
|
1
cmd/evm/testdata/15/blockheader.rlp
vendored
Normal file
1
cmd/evm/testdata/15/blockheader.rlp
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
"0xf901f0a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007b0101020383010203a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
|
8
cmd/evm/testdata/15/exp.json
vendored
Normal file
8
cmd/evm/testdata/15/exp.json
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
}
|
||||||
|
]
|
10
cmd/evm/testdata/15/exp2.json
vendored
Normal file
10
cmd/evm/testdata/15/exp2.json
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
||||||
|
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
|
||||||
|
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a"
|
||||||
|
}
|
||||||
|
]
|
47
cmd/evm/testdata/15/exp3.json
vendored
Normal file
47
cmd/evm/testdata/15/exp3.json
vendored
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "rlp: expected List"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "rlp: expected List"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "rlp: expected List"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "rlp: expected List"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "rlp: expected List"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "rlp: expected input list for types.AccessListTx"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error": "transaction type not supported"
|
||||||
|
}
|
||||||
|
]
|
1
cmd/evm/testdata/15/signed_txs.rlp
vendored
Normal file
1
cmd/evm/testdata/15/signed_txs.rlp
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
|
4
cmd/evm/testdata/15/signed_txs.rlp.json
vendored
Normal file
4
cmd/evm/testdata/15/signed_txs.rlp.json
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"txsRlp" : "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
|
||||||
|
}
|
||||||
|
|
37
cmd/evm/testdata/3/exp.json
vendored
Normal file
37
cmd/evm/testdata/3/exp.json
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
{
|
||||||
|
"alloc": {
|
||||||
|
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87": {
|
||||||
|
"code": "0x600140",
|
||||||
|
"balance": "0xde0b6b3a76586a0"
|
||||||
|
},
|
||||||
|
"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": {
|
||||||
|
"balance": "0x521f"
|
||||||
|
},
|
||||||
|
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||||
|
"balance": "0xde0b6b3a7622741",
|
||||||
|
"nonce": "0x1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1",
|
||||||
|
"txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
|
||||||
|
"receiptRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [
|
||||||
|
{
|
||||||
|
"root": "0x",
|
||||||
|
"status": "0x1",
|
||||||
|
"cumulativeGasUsed": "0x521f",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"logs": null,
|
||||||
|
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
|
||||||
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
|
"gasUsed": "0x521f",
|
||||||
|
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"transactionIndex": "0x0"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"currentDifficulty": "0x20000"
|
||||||
|
}
|
||||||
|
}
|
22
cmd/evm/testdata/5/exp.json
vendored
Normal file
22
cmd/evm/testdata/5/exp.json
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"alloc": {
|
||||||
|
"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {
|
||||||
|
"balance": "0x88"
|
||||||
|
},
|
||||||
|
"0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": {
|
||||||
|
"balance": "0x70"
|
||||||
|
},
|
||||||
|
"0xcccccccccccccccccccccccccccccccccccccccc": {
|
||||||
|
"balance": "0x60"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"result": {
|
||||||
|
"stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393",
|
||||||
|
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"receipts": [],
|
||||||
|
"currentDifficulty": "0x20000"
|
||||||
|
}
|
||||||
|
}
|
@ -268,11 +268,16 @@ func accountCreate(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
utils.SetNodeConfig(ctx, &cfg.Node)
|
utils.SetNodeConfig(ctx, &cfg.Node)
|
||||||
scryptN, scryptP, keydir, err := cfg.Node.AccountConfig()
|
keydir, err := cfg.Node.KeyDirConfig()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to read configuration: %v", err)
|
utils.Fatalf("Failed to read configuration: %v", err)
|
||||||
}
|
}
|
||||||
|
scryptN := keystore.StandardScryptN
|
||||||
|
scryptP := keystore.StandardScryptP
|
||||||
|
if cfg.Node.UseLightweightKDF {
|
||||||
|
scryptN = keystore.LightScryptN
|
||||||
|
scryptP = keystore.LightScryptP
|
||||||
|
}
|
||||||
|
|
||||||
password := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
|
password := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
|
||||||
|
|
||||||
|
@ -27,6 +27,10 @@ import (
|
|||||||
|
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/external"
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
@ -135,6 +139,11 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to create the protocol stack: %v", err)
|
utils.Fatalf("Failed to create the protocol stack: %v", err)
|
||||||
}
|
}
|
||||||
|
// Node doesn't by default populate account manager backends
|
||||||
|
if err := setAccountManagerBackends(stack); err != nil {
|
||||||
|
utils.Fatalf("Failed to set account manager backends: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
utils.SetEthConfig(ctx, stack, &cfg.Eth)
|
utils.SetEthConfig(ctx, stack, &cfg.Eth)
|
||||||
if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) {
|
if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) {
|
||||||
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
|
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
|
||||||
@ -257,3 +266,62 @@ func deprecated(field string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setAccountManagerBackends(stack *node.Node) error {
|
||||||
|
conf := stack.Config()
|
||||||
|
am := stack.AccountManager()
|
||||||
|
keydir := stack.KeyStoreDir()
|
||||||
|
scryptN := keystore.StandardScryptN
|
||||||
|
scryptP := keystore.StandardScryptP
|
||||||
|
if conf.UseLightweightKDF {
|
||||||
|
scryptN = keystore.LightScryptN
|
||||||
|
scryptP = keystore.LightScryptP
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assemble the supported backends
|
||||||
|
if len(conf.ExternalSigner) > 0 {
|
||||||
|
log.Info("Using external signer", "url", conf.ExternalSigner)
|
||||||
|
if extapi, err := external.NewExternalBackend(conf.ExternalSigner); err == nil {
|
||||||
|
am.AddBackend(extapi)
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("error connecting to external signer: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For now, we're using EITHER external signer OR local signers.
|
||||||
|
// If/when we implement some form of lockfile for USB and keystore wallets,
|
||||||
|
// we can have both, but it's very confusing for the user to see the same
|
||||||
|
// accounts in both externally and locally, plus very racey.
|
||||||
|
am.AddBackend(keystore.NewKeyStore(keydir, scryptN, scryptP))
|
||||||
|
if conf.USB {
|
||||||
|
// Start a USB hub for Ledger hardware wallets
|
||||||
|
if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil {
|
||||||
|
log.Warn(fmt.Sprintf("Failed to start Ledger hub, disabling: %v", err))
|
||||||
|
} else {
|
||||||
|
am.AddBackend(ledgerhub)
|
||||||
|
}
|
||||||
|
// Start a USB hub for Trezor hardware wallets (HID version)
|
||||||
|
if trezorhub, err := usbwallet.NewTrezorHubWithHID(); err != nil {
|
||||||
|
log.Warn(fmt.Sprintf("Failed to start HID Trezor hub, disabling: %v", err))
|
||||||
|
} else {
|
||||||
|
am.AddBackend(trezorhub)
|
||||||
|
}
|
||||||
|
// Start a USB hub for Trezor hardware wallets (WebUSB version)
|
||||||
|
if trezorhub, err := usbwallet.NewTrezorHubWithWebUSB(); err != nil {
|
||||||
|
log.Warn(fmt.Sprintf("Failed to start WebUSB Trezor hub, disabling: %v", err))
|
||||||
|
} else {
|
||||||
|
am.AddBackend(trezorhub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(conf.SmartCardDaemonPath) > 0 {
|
||||||
|
// Start a smart card hub
|
||||||
|
if schub, err := scwallet.NewHub(conf.SmartCardDaemonPath, scwallet.Scheme, keydir); err != nil {
|
||||||
|
log.Warn(fmt.Sprintf("Failed to start smart card hub, disabling: %v", err))
|
||||||
|
} else {
|
||||||
|
am.AddBackend(schub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -75,7 +75,7 @@ at block: 0 ({{niltime}})
|
|||||||
datadir: {{.Datadir}}
|
datadir: {{.Datadir}}
|
||||||
modules: {{apis}}
|
modules: {{apis}}
|
||||||
|
|
||||||
To exit, press ctrl-d
|
To exit, press ctrl-d or type exit
|
||||||
> {{.InputLine "exit"}}
|
> {{.InputLine "exit"}}
|
||||||
`)
|
`)
|
||||||
geth.ExpectExit()
|
geth.ExpectExit()
|
||||||
@ -149,7 +149,7 @@ at block: 0 ({{niltime}}){{if ipc}}
|
|||||||
datadir: {{datadir}}{{end}}
|
datadir: {{datadir}}{{end}}
|
||||||
modules: {{apis}}
|
modules: {{apis}}
|
||||||
|
|
||||||
To exit, press ctrl-d
|
To exit, press ctrl-d or type exit
|
||||||
> {{.InputLine "exit" }}
|
> {{.InputLine "exit" }}
|
||||||
`)
|
`)
|
||||||
attach.ExpectExit()
|
attach.ExpectExit()
|
||||||
|
@ -127,7 +127,7 @@ var (
|
|||||||
utils.MinerEtherbaseFlag,
|
utils.MinerEtherbaseFlag,
|
||||||
utils.MinerExtraDataFlag,
|
utils.MinerExtraDataFlag,
|
||||||
utils.MinerRecommitIntervalFlag,
|
utils.MinerRecommitIntervalFlag,
|
||||||
utils.MinerNoVerfiyFlag,
|
utils.MinerNoVerifyFlag,
|
||||||
utils.NATFlag,
|
utils.NATFlag,
|
||||||
utils.NoDiscoverFlag,
|
utils.NoDiscoverFlag,
|
||||||
utils.DiscoveryV5Flag,
|
utils.DiscoveryV5Flag,
|
||||||
@ -161,12 +161,6 @@ var (
|
|||||||
utils.HTTPPortFlag,
|
utils.HTTPPortFlag,
|
||||||
utils.HTTPCORSDomainFlag,
|
utils.HTTPCORSDomainFlag,
|
||||||
utils.HTTPVirtualHostsFlag,
|
utils.HTTPVirtualHostsFlag,
|
||||||
utils.LegacyRPCEnabledFlag,
|
|
||||||
utils.LegacyRPCListenAddrFlag,
|
|
||||||
utils.LegacyRPCPortFlag,
|
|
||||||
utils.LegacyRPCCORSDomainFlag,
|
|
||||||
utils.LegacyRPCVirtualHostsFlag,
|
|
||||||
utils.LegacyRPCApiFlag,
|
|
||||||
utils.GraphQLEnabledFlag,
|
utils.GraphQLEnabledFlag,
|
||||||
utils.GraphQLCORSDomainFlag,
|
utils.GraphQLCORSDomainFlag,
|
||||||
utils.GraphQLVirtualHostsFlag,
|
utils.GraphQLVirtualHostsFlag,
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/state/pruner"
|
"github.com/ethereum/go-ethereum/core/state/pruner"
|
||||||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@ -232,7 +233,7 @@ func verifyState(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := snaptree.Verify(root); err != nil {
|
if err := snaptree.Verify(root); err != nil {
|
||||||
log.Error("Failed to verfiy state", "root", root, "err", err)
|
log.Error("Failed to verify state", "root", root, "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Info("Verified the state", "root", root)
|
log.Info("Verified the state", "root", root)
|
||||||
@ -287,7 +288,7 @@ func traverseState(ctx *cli.Context) error {
|
|||||||
accIter := trie.NewIterator(t.NodeIterator(nil))
|
accIter := trie.NewIterator(t.NodeIterator(nil))
|
||||||
for accIter.Next() {
|
for accIter.Next() {
|
||||||
accounts += 1
|
accounts += 1
|
||||||
var acc state.Account
|
var acc types.StateAccount
|
||||||
if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil {
|
if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil {
|
||||||
log.Error("Invalid account encountered during traversal", "err", err)
|
log.Error("Invalid account encountered during traversal", "err", err)
|
||||||
return err
|
return err
|
||||||
@ -393,7 +394,7 @@ func traverseRawState(ctx *cli.Context) error {
|
|||||||
// dig into the storage trie further.
|
// dig into the storage trie further.
|
||||||
if accIter.Leaf() {
|
if accIter.Leaf() {
|
||||||
accounts += 1
|
accounts += 1
|
||||||
var acc state.Account
|
var acc types.StateAccount
|
||||||
if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
|
if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
|
||||||
log.Error("Invalid account encountered during traversal", "err", err)
|
log.Error("Invalid account encountered during traversal", "err", err)
|
||||||
return errors.New("invalid account")
|
return errors.New("invalid account")
|
||||||
|
@ -185,7 +185,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
utils.MinerEtherbaseFlag,
|
utils.MinerEtherbaseFlag,
|
||||||
utils.MinerExtraDataFlag,
|
utils.MinerExtraDataFlag,
|
||||||
utils.MinerRecommitIntervalFlag,
|
utils.MinerRecommitIntervalFlag,
|
||||||
utils.MinerNoVerfiyFlag,
|
utils.MinerNoVerifyFlag,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -218,13 +218,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
Name: "ALIASED (deprecated)",
|
Name: "ALIASED (deprecated)",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.NoUSBFlag,
|
utils.NoUSBFlag,
|
||||||
utils.LegacyRPCEnabledFlag,
|
|
||||||
utils.LegacyRPCListenAddrFlag,
|
|
||||||
utils.LegacyRPCPortFlag,
|
|
||||||
utils.LegacyRPCCORSDomainFlag,
|
|
||||||
utils.LegacyRPCVirtualHostsFlag,
|
|
||||||
utils.LegacyRPCApiFlag,
|
|
||||||
utils.LegacyMinerGasTargetFlag,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -35,8 +35,8 @@ FROM puppeth/blockscout:latest
|
|||||||
ADD genesis.json /genesis.json
|
ADD genesis.json /genesis.json
|
||||||
RUN \
|
RUN \
|
||||||
echo 'geth --cache 512 init /genesis.json' > explorer.sh && \
|
echo 'geth --cache 512 init /genesis.json' > explorer.sh && \
|
||||||
echo $'geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,shh,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" --exitwhensynced' >> explorer.sh && \
|
echo $'geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" --exitwhensynced' >> explorer.sh && \
|
||||||
echo $'exec geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,shh,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" &' >> explorer.sh && \
|
echo $'exec geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" &' >> explorer.sh && \
|
||||||
echo '/usr/local/bin/docker-entrypoint.sh postgres &' >> explorer.sh && \
|
echo '/usr/local/bin/docker-entrypoint.sh postgres &' >> explorer.sh && \
|
||||||
echo 'sleep 5' >> explorer.sh && \
|
echo 'sleep 5' >> explorer.sh && \
|
||||||
echo 'mix do ecto.drop --force, ecto.create, ecto.migrate' >> explorer.sh && \
|
echo 'mix do ecto.drop --force, ecto.create, ecto.migrate' >> explorer.sh && \
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !windows && !openbsd
|
||||||
// +build !windows,!openbsd
|
// +build !windows,!openbsd
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build openbsd
|
||||||
// +build openbsd
|
// +build openbsd
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
@ -460,7 +460,7 @@ var (
|
|||||||
Usage: "Time interval to recreate the block being mined",
|
Usage: "Time interval to recreate the block being mined",
|
||||||
Value: ethconfig.Defaults.Miner.Recommit,
|
Value: ethconfig.Defaults.Miner.Recommit,
|
||||||
}
|
}
|
||||||
MinerNoVerfiyFlag = cli.BoolFlag{
|
MinerNoVerifyFlag = cli.BoolFlag{
|
||||||
Name: "miner.noverify",
|
Name: "miner.noverify",
|
||||||
Usage: "Disable remote sealing verification",
|
Usage: "Disable remote sealing verification",
|
||||||
}
|
}
|
||||||
@ -920,14 +920,6 @@ func SplitAndTrim(input string) (ret []string) {
|
|||||||
// setHTTP creates the HTTP RPC listener interface string from the set
|
// setHTTP creates the HTTP RPC listener interface string from the set
|
||||||
// command line flags, returning empty if the HTTP endpoint is disabled.
|
// command line flags, returning empty if the HTTP endpoint is disabled.
|
||||||
func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
||||||
if ctx.GlobalBool(LegacyRPCEnabledFlag.Name) && cfg.HTTPHost == "" {
|
|
||||||
log.Warn("The flag --rpc is deprecated and will be removed June 2021, please use --http")
|
|
||||||
cfg.HTTPHost = "127.0.0.1"
|
|
||||||
if ctx.GlobalIsSet(LegacyRPCListenAddrFlag.Name) {
|
|
||||||
cfg.HTTPHost = ctx.GlobalString(LegacyRPCListenAddrFlag.Name)
|
|
||||||
log.Warn("The flag --rpcaddr is deprecated and will be removed June 2021, please use --http.addr")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ctx.GlobalBool(HTTPEnabledFlag.Name) && cfg.HTTPHost == "" {
|
if ctx.GlobalBool(HTTPEnabledFlag.Name) && cfg.HTTPHost == "" {
|
||||||
cfg.HTTPHost = "127.0.0.1"
|
cfg.HTTPHost = "127.0.0.1"
|
||||||
if ctx.GlobalIsSet(HTTPListenAddrFlag.Name) {
|
if ctx.GlobalIsSet(HTTPListenAddrFlag.Name) {
|
||||||
@ -935,34 +927,18 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(LegacyRPCPortFlag.Name) {
|
|
||||||
cfg.HTTPPort = ctx.GlobalInt(LegacyRPCPortFlag.Name)
|
|
||||||
log.Warn("The flag --rpcport is deprecated and will be removed June 2021, please use --http.port")
|
|
||||||
}
|
|
||||||
if ctx.GlobalIsSet(HTTPPortFlag.Name) {
|
if ctx.GlobalIsSet(HTTPPortFlag.Name) {
|
||||||
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
|
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(LegacyRPCCORSDomainFlag.Name) {
|
|
||||||
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(LegacyRPCCORSDomainFlag.Name))
|
|
||||||
log.Warn("The flag --rpccorsdomain is deprecated and will be removed June 2021, please use --http.corsdomain")
|
|
||||||
}
|
|
||||||
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
|
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
|
||||||
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
|
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(LegacyRPCApiFlag.Name) {
|
|
||||||
cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(LegacyRPCApiFlag.Name))
|
|
||||||
log.Warn("The flag --rpcapi is deprecated and will be removed June 2021, please use --http.api")
|
|
||||||
}
|
|
||||||
if ctx.GlobalIsSet(HTTPApiFlag.Name) {
|
if ctx.GlobalIsSet(HTTPApiFlag.Name) {
|
||||||
cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(HTTPApiFlag.Name))
|
cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(HTTPApiFlag.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(LegacyRPCVirtualHostsFlag.Name) {
|
|
||||||
cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(LegacyRPCVirtualHostsFlag.Name))
|
|
||||||
log.Warn("The flag --rpcvhosts is deprecated and will be removed June 2021, please use --http.vhosts")
|
|
||||||
}
|
|
||||||
if ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) {
|
if ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) {
|
||||||
cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name))
|
cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name))
|
||||||
}
|
}
|
||||||
@ -1398,8 +1374,8 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
|
|||||||
if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) {
|
if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) {
|
||||||
cfg.Recommit = ctx.GlobalDuration(MinerRecommitIntervalFlag.Name)
|
cfg.Recommit = ctx.GlobalDuration(MinerRecommitIntervalFlag.Name)
|
||||||
}
|
}
|
||||||
if ctx.GlobalIsSet(MinerNoVerfiyFlag.Name) {
|
if ctx.GlobalIsSet(MinerNoVerifyFlag.Name) {
|
||||||
cfg.Noverify = ctx.GlobalBool(MinerNoVerfiyFlag.Name)
|
cfg.Noverify = ctx.GlobalBool(MinerNoVerifyFlag.Name)
|
||||||
}
|
}
|
||||||
if ctx.GlobalIsSet(LegacyMinerGasTargetFlag.Name) {
|
if ctx.GlobalIsSet(LegacyMinerGasTargetFlag.Name) {
|
||||||
log.Warn("The generic --miner.gastarget flag is deprecated and will be removed in the future!")
|
log.Warn("The generic --miner.gastarget flag is deprecated and will be removed in the future!")
|
||||||
|
@ -18,10 +18,8 @@ package utils
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -45,35 +43,6 @@ var (
|
|||||||
Name: "nousb",
|
Name: "nousb",
|
||||||
Usage: "Disables monitoring for and managing USB hardware wallets (deprecated)",
|
Usage: "Disables monitoring for and managing USB hardware wallets (deprecated)",
|
||||||
}
|
}
|
||||||
LegacyRPCEnabledFlag = cli.BoolFlag{
|
|
||||||
Name: "rpc",
|
|
||||||
Usage: "Enable the HTTP-RPC server (deprecated and will be removed June 2021, use --http)",
|
|
||||||
}
|
|
||||||
LegacyRPCListenAddrFlag = cli.StringFlag{
|
|
||||||
Name: "rpcaddr",
|
|
||||||
Usage: "HTTP-RPC server listening interface (deprecated and will be removed June 2021, use --http.addr)",
|
|
||||||
Value: node.DefaultHTTPHost,
|
|
||||||
}
|
|
||||||
LegacyRPCPortFlag = cli.IntFlag{
|
|
||||||
Name: "rpcport",
|
|
||||||
Usage: "HTTP-RPC server listening port (deprecated and will be removed June 2021, use --http.port)",
|
|
||||||
Value: node.DefaultHTTPPort,
|
|
||||||
}
|
|
||||||
LegacyRPCCORSDomainFlag = cli.StringFlag{
|
|
||||||
Name: "rpccorsdomain",
|
|
||||||
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced) (deprecated and will be removed June 2021, use --http.corsdomain)",
|
|
||||||
Value: "",
|
|
||||||
}
|
|
||||||
LegacyRPCVirtualHostsFlag = cli.StringFlag{
|
|
||||||
Name: "rpcvhosts",
|
|
||||||
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (deprecated and will be removed June 2021, use --http.vhosts)",
|
|
||||||
Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","),
|
|
||||||
}
|
|
||||||
LegacyRPCApiFlag = cli.StringFlag{
|
|
||||||
Name: "rpcapi",
|
|
||||||
Usage: "API's offered over the HTTP-RPC interface (deprecated and will be removed June 2021, use --http.api)",
|
|
||||||
Value: "",
|
|
||||||
}
|
|
||||||
// (Deprecated July 2021, shown in aliased flags section)
|
// (Deprecated July 2021, shown in aliased flags section)
|
||||||
LegacyMinerGasTargetFlag = cli.Uint64Flag{
|
LegacyMinerGasTargetFlag = cli.Uint64Flag{
|
||||||
Name: "miner.gastarget",
|
Name: "miner.gastarget",
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build freebsd || dragonfly
|
||||||
// +build freebsd dragonfly
|
// +build freebsd dragonfly
|
||||||
|
|
||||||
package fdlimit
|
package fdlimit
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build linux || netbsd || openbsd || solaris
|
||||||
// +build linux netbsd openbsd solaris
|
// +build linux netbsd openbsd solaris
|
||||||
|
|
||||||
package fdlimit
|
package fdlimit
|
||||||
|
@ -86,7 +86,7 @@ func (h Hash) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Format implements fmt.Formatter.
|
// Format implements fmt.Formatter.
|
||||||
// Hash supports the %v, %s, %v, %x, %X and %d format verbs.
|
// Hash supports the %v, %s, %q, %x, %X and %d format verbs.
|
||||||
func (h Hash) Format(s fmt.State, c rune) {
|
func (h Hash) Format(s fmt.State, c rune) {
|
||||||
hexb := make([]byte, 2+len(h)*2)
|
hexb := make([]byte, 2+len(h)*2)
|
||||||
copy(hexb, "0x")
|
copy(hexb, "0x")
|
||||||
@ -270,7 +270,7 @@ func (a Address) hex() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Format implements fmt.Formatter.
|
// Format implements fmt.Formatter.
|
||||||
// Address supports the %v, %s, %v, %x, %X and %d format verbs.
|
// Address supports the %v, %s, %q, %x, %X and %d format verbs.
|
||||||
func (a Address) Format(s fmt.State, c rune) {
|
func (a Address) Format(s fmt.State, c rune) {
|
||||||
switch c {
|
switch c {
|
||||||
case 'v', 's':
|
case 'v', 's':
|
||||||
|
@ -324,7 +324,7 @@ func (c *Console) Welcome() {
|
|||||||
sort.Strings(modules)
|
sort.Strings(modules)
|
||||||
message += " modules: " + strings.Join(modules, " ") + "\n"
|
message += " modules: " + strings.Join(modules, " ") + "\n"
|
||||||
}
|
}
|
||||||
message += "\nTo exit, press ctrl-d"
|
message += "\nTo exit, press ctrl-d or type exit"
|
||||||
fmt.Fprintln(c.printer, message)
|
fmt.Fprintln(c.printer, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,8 +207,7 @@ type BlockChain struct {
|
|||||||
processor Processor // Block transaction processor interface
|
processor Processor // Block transaction processor interface
|
||||||
vmConfig vm.Config
|
vmConfig vm.Config
|
||||||
|
|
||||||
shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
|
shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
|
||||||
terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBlockChain returns a fully initialised block chain using information
|
// NewBlockChain returns a fully initialised block chain using information
|
||||||
@ -1085,38 +1084,6 @@ const (
|
|||||||
SideStatTy
|
SideStatTy
|
||||||
)
|
)
|
||||||
|
|
||||||
// truncateAncient rewinds the blockchain to the specified header and deletes all
|
|
||||||
// data in the ancient store that exceeds the specified header.
|
|
||||||
func (bc *BlockChain) truncateAncient(head uint64) error {
|
|
||||||
frozen, err := bc.db.Ancients()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Short circuit if there is no data to truncate in ancient store.
|
|
||||||
if frozen <= head+1 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Truncate all the data in the freezer beyond the specified head
|
|
||||||
if err := bc.db.TruncateAncients(head + 1); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Clear out any stale content from the caches
|
|
||||||
bc.hc.headerCache.Purge()
|
|
||||||
bc.hc.tdCache.Purge()
|
|
||||||
bc.hc.numberCache.Purge()
|
|
||||||
|
|
||||||
// Clear out any stale content from the caches
|
|
||||||
bc.bodyCache.Purge()
|
|
||||||
bc.bodyRLPCache.Purge()
|
|
||||||
bc.receiptsCache.Purge()
|
|
||||||
bc.blockCache.Purge()
|
|
||||||
bc.txLookupCache.Purge()
|
|
||||||
bc.futureBlocks.Purge()
|
|
||||||
|
|
||||||
log.Info("Rewind ancient data", "number", head)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// numberHash is just a container for a number and a hash, to represent a block
|
// numberHash is just a container for a number and a hash, to represent a block
|
||||||
type numberHash struct {
|
type numberHash struct {
|
||||||
number uint64
|
number uint64
|
||||||
@ -1155,12 +1122,14 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
var (
|
var (
|
||||||
stats = struct{ processed, ignored int32 }{}
|
stats = struct{ processed, ignored int32 }{}
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
size = 0
|
size = int64(0)
|
||||||
)
|
)
|
||||||
|
|
||||||
// updateHead updates the head fast sync block if the inserted blocks are better
|
// updateHead updates the head fast sync block if the inserted blocks are better
|
||||||
// and returns an indicator whether the inserted blocks are canonical.
|
// and returns an indicator whether the inserted blocks are canonical.
|
||||||
updateHead := func(head *types.Block) bool {
|
updateHead := func(head *types.Block) bool {
|
||||||
bc.chainmu.Lock()
|
bc.chainmu.Lock()
|
||||||
|
defer bc.chainmu.Unlock()
|
||||||
|
|
||||||
// Rewind may have occurred, skip in that case.
|
// Rewind may have occurred, skip in that case.
|
||||||
if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
|
if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
|
||||||
@ -1169,68 +1138,63 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
|
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
|
||||||
bc.currentFastBlock.Store(head)
|
bc.currentFastBlock.Store(head)
|
||||||
headFastBlockGauge.Update(int64(head.NumberU64()))
|
headFastBlockGauge.Update(int64(head.NumberU64()))
|
||||||
bc.chainmu.Unlock()
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bc.chainmu.Unlock()
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeAncient writes blockchain and corresponding receipt chain into ancient store.
|
// writeAncient writes blockchain and corresponding receipt chain into ancient store.
|
||||||
//
|
//
|
||||||
// this function only accepts canonical chain data. All side chain will be reverted
|
// this function only accepts canonical chain data. All side chain will be reverted
|
||||||
// eventually.
|
// eventually.
|
||||||
writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
|
writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
|
||||||
var (
|
first := blockChain[0]
|
||||||
previous = bc.CurrentFastBlock()
|
last := blockChain[len(blockChain)-1]
|
||||||
batch = bc.db.NewBatch()
|
|
||||||
)
|
|
||||||
// If any error occurs before updating the head or we are inserting a side chain,
|
|
||||||
// all the data written this time wll be rolled back.
|
|
||||||
defer func() {
|
|
||||||
if previous != nil {
|
|
||||||
if err := bc.truncateAncient(previous.NumberU64()); err != nil {
|
|
||||||
log.Crit("Truncate ancient store failed", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
var deleted []*numberHash
|
|
||||||
for i, block := range blockChain {
|
|
||||||
// Short circuit insertion if shutting down or processing failed
|
|
||||||
if bc.insertStopped() {
|
|
||||||
return 0, errInsertionInterrupted
|
|
||||||
}
|
|
||||||
// Short circuit insertion if it is required(used in testing only)
|
|
||||||
if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) {
|
|
||||||
return i, errors.New("insertion is terminated for testing purpose")
|
|
||||||
}
|
|
||||||
// Short circuit if the owner header is unknown
|
|
||||||
if !bc.HasHeader(block.Hash(), block.NumberU64()) {
|
|
||||||
return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
|
|
||||||
}
|
|
||||||
if block.NumberU64() == 1 {
|
|
||||||
// Make sure to write the genesis into the freezer
|
|
||||||
if frozen, _ := bc.db.Ancients(); frozen == 0 {
|
|
||||||
h := rawdb.ReadCanonicalHash(bc.db, 0)
|
|
||||||
b := rawdb.ReadBlock(bc.db, h, 0)
|
|
||||||
size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, 0, bc.chainConfig), rawdb.ReadTd(bc.db, h, 0))
|
|
||||||
log.Info("Wrote genesis to ancients")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Flush data into ancient database.
|
|
||||||
size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
|
|
||||||
|
|
||||||
// Write tx indices if any condition is satisfied:
|
// Ensure genesis is in ancients.
|
||||||
// * If user requires to reserve all tx indices(txlookuplimit=0)
|
if first.NumberU64() == 1 {
|
||||||
// * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
|
if frozen, _ := bc.db.Ancients(); frozen == 0 {
|
||||||
// * If block number is large enough to be regarded as a recent block
|
b := bc.genesisBlock
|
||||||
// It means blocks below the ancientLimit-txlookupLimit won't be indexed.
|
td := bc.genesisBlock.Difficulty()
|
||||||
//
|
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{b}, []types.Receipts{nil}, td)
|
||||||
// But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
|
size += writeSize
|
||||||
// an external ancient database, during the setup, blockchain will start
|
if err != nil {
|
||||||
// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
|
log.Error("Error writing genesis to ancients", "err", err)
|
||||||
// range. In this case, all tx indices of newly imported blocks should be
|
return 0, err
|
||||||
// generated.
|
}
|
||||||
|
log.Info("Wrote genesis to ancients")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Before writing the blocks to the ancients, we need to ensure that
|
||||||
|
// they correspond to the what the headerchain 'expects'.
|
||||||
|
// We only check the last block/header, since it's a contiguous chain.
|
||||||
|
if !bc.HasHeader(last.Hash(), last.NumberU64()) {
|
||||||
|
return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write all chain data to ancients.
|
||||||
|
td := bc.GetTd(first.Hash(), first.NumberU64())
|
||||||
|
writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
|
||||||
|
size += writeSize
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error importing chain data to ancients", "err", err)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write tx indices if any condition is satisfied:
|
||||||
|
// * If user requires to reserve all tx indices(txlookuplimit=0)
|
||||||
|
// * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
|
||||||
|
// * If block number is large enough to be regarded as a recent block
|
||||||
|
// It means blocks below the ancientLimit-txlookupLimit won't be indexed.
|
||||||
|
//
|
||||||
|
// But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
|
||||||
|
// an external ancient database, during the setup, blockchain will start
|
||||||
|
// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
|
||||||
|
// range. In this case, all tx indices of newly imported blocks should be
|
||||||
|
// generated.
|
||||||
|
var batch = bc.db.NewBatch()
|
||||||
|
for _, block := range blockChain {
|
||||||
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
|
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||||
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
|
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
|
||||||
@ -1238,51 +1202,50 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
}
|
}
|
||||||
stats.processed++
|
stats.processed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush all tx-lookup index data.
|
// Flush all tx-lookup index data.
|
||||||
size += batch.ValueSize()
|
size += int64(batch.ValueSize())
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
|
// The tx index data could not be written.
|
||||||
|
// Roll back the ancient store update.
|
||||||
|
fastBlock := bc.CurrentFastBlock().NumberU64()
|
||||||
|
if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
|
||||||
|
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
||||||
|
}
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
batch.Reset()
|
|
||||||
|
|
||||||
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
||||||
if err := bc.db.Sync(); err != nil {
|
if err := bc.db.Sync(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update the current fast block because all block data is now present in DB.
|
||||||
|
previousFastBlock := bc.CurrentFastBlock().NumberU64()
|
||||||
if !updateHead(blockChain[len(blockChain)-1]) {
|
if !updateHead(blockChain[len(blockChain)-1]) {
|
||||||
return 0, errors.New("side blocks can't be accepted as the ancient chain data")
|
// We end up here if the header chain has reorg'ed, and the blocks/receipts
|
||||||
}
|
// don't match the canonical chain.
|
||||||
previous = nil // disable rollback explicitly
|
if err := bc.db.TruncateAncients(previousFastBlock + 1); err != nil {
|
||||||
|
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
||||||
// Wipe out canonical block data.
|
|
||||||
for _, nh := range deleted {
|
|
||||||
rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
|
|
||||||
rawdb.DeleteCanonicalHash(batch, nh.number)
|
|
||||||
}
|
|
||||||
for _, block := range blockChain {
|
|
||||||
// Always keep genesis block in active database.
|
|
||||||
if block.NumberU64() != 0 {
|
|
||||||
rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
|
|
||||||
rawdb.DeleteCanonicalHash(batch, block.NumberU64())
|
|
||||||
}
|
}
|
||||||
|
return 0, errSideChainReceipts
|
||||||
}
|
}
|
||||||
if err := batch.Write(); err != nil {
|
|
||||||
return 0, err
|
// Delete block data from the main database.
|
||||||
}
|
|
||||||
batch.Reset()
|
batch.Reset()
|
||||||
|
canonHashes := make(map[common.Hash]struct{})
|
||||||
// Wipe out side chain too.
|
|
||||||
for _, nh := range deleted {
|
|
||||||
for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
|
|
||||||
rawdb.DeleteBlock(batch, hash, nh.number)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, block := range blockChain {
|
for _, block := range blockChain {
|
||||||
// Always keep genesis block in active database.
|
canonHashes[block.Hash()] = struct{}{}
|
||||||
if block.NumberU64() != 0 {
|
if block.NumberU64() == 0 {
|
||||||
for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
|
continue
|
||||||
rawdb.DeleteBlock(batch, hash, block.NumberU64())
|
}
|
||||||
}
|
rawdb.DeleteCanonicalHash(batch, block.NumberU64())
|
||||||
|
rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
|
||||||
|
}
|
||||||
|
// Delete side chain hash-to-number mappings.
|
||||||
|
for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) {
|
||||||
|
if _, canon := canonHashes[nh.Hash]; !canon {
|
||||||
|
rawdb.DeleteHeader(batch, nh.Hash, nh.Number)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
@ -1290,6 +1253,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
}
|
}
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeLive writes blockchain and corresponding receipt chain into active store.
|
// writeLive writes blockchain and corresponding receipt chain into active store.
|
||||||
writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
|
writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
|
||||||
skipPresenceCheck := false
|
skipPresenceCheck := false
|
||||||
@ -1327,7 +1291,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
size += batch.ValueSize()
|
size += int64(batch.ValueSize())
|
||||||
batch.Reset()
|
batch.Reset()
|
||||||
}
|
}
|
||||||
stats.processed++
|
stats.processed++
|
||||||
@ -1336,7 +1300,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
// we can ensure all components of body is completed(body, receipts,
|
// we can ensure all components of body is completed(body, receipts,
|
||||||
// tx indexes)
|
// tx indexes)
|
||||||
if batch.ValueSize() > 0 {
|
if batch.ValueSize() > 0 {
|
||||||
size += batch.ValueSize()
|
size += int64(batch.ValueSize())
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -1344,6 +1308,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
updateHead(blockChain[len(blockChain)-1])
|
updateHead(blockChain[len(blockChain)-1])
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write downloaded chain data and corresponding receipt chain data
|
// Write downloaded chain data and corresponding receipt chain data
|
||||||
if len(ancientBlocks) > 0 {
|
if len(ancientBlocks) > 0 {
|
||||||
if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
|
if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
|
||||||
|
@ -670,6 +670,7 @@ func TestFastVsFullChains(t *testing.T) {
|
|||||||
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil {
|
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil {
|
||||||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over all chain data components, and cross reference
|
// Iterate over all chain data components, and cross reference
|
||||||
for i := 0; i < len(blocks); i++ {
|
for i := 0; i < len(blocks); i++ {
|
||||||
num, hash := blocks[i].NumberU64(), blocks[i].Hash()
|
num, hash := blocks[i].NumberU64(), blocks[i].Hash()
|
||||||
@ -693,10 +694,27 @@ func TestFastVsFullChains(t *testing.T) {
|
|||||||
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) {
|
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) {
|
||||||
t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles())
|
t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles())
|
||||||
}
|
}
|
||||||
if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) {
|
|
||||||
|
// Check receipts.
|
||||||
|
freceipts := rawdb.ReadReceipts(fastDb, hash, num, fast.Config())
|
||||||
|
anreceipts := rawdb.ReadReceipts(ancientDb, hash, num, fast.Config())
|
||||||
|
areceipts := rawdb.ReadReceipts(archiveDb, hash, num, fast.Config())
|
||||||
|
if types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) {
|
||||||
t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts)
|
t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that hash-to-number mappings are present in all databases.
|
||||||
|
if m := rawdb.ReadHeaderNumber(fastDb, hash); m == nil || *m != num {
|
||||||
|
t.Errorf("block #%d [%x]: wrong hash-to-number mapping in fastdb: %v", num, hash, m)
|
||||||
|
}
|
||||||
|
if m := rawdb.ReadHeaderNumber(ancientDb, hash); m == nil || *m != num {
|
||||||
|
t.Errorf("block #%d [%x]: wrong hash-to-number mapping in ancientdb: %v", num, hash, m)
|
||||||
|
}
|
||||||
|
if m := rawdb.ReadHeaderNumber(archiveDb, hash); m == nil || *m != num {
|
||||||
|
t.Errorf("block #%d [%x]: wrong hash-to-number mapping in archivedb: %v", num, hash, m)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the canonical chains are the same between the databases
|
// Check that the canonical chains are the same between the databases
|
||||||
for i := 0; i < len(blocks)+1; i++ {
|
for i := 0; i < len(blocks)+1; i++ {
|
||||||
if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
|
if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
|
||||||
@ -1639,20 +1657,34 @@ func TestBlockchainRecovery(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIncompleteAncientReceiptChainInsertion(t *testing.T) {
|
// This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain.
|
||||||
// Configure and generate a sample block chain
|
func TestInsertReceiptChainRollback(t *testing.T) {
|
||||||
var (
|
// Generate forked chain. The returned BlockChain object is used to process the side chain blocks.
|
||||||
gendb = rawdb.NewMemoryDatabase()
|
tmpChain, sideblocks, canonblocks, err := getLongAndShortChains()
|
||||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
if err != nil {
|
||||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
t.Fatal(err)
|
||||||
funds = big.NewInt(1000000000)
|
}
|
||||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
|
defer tmpChain.Stop()
|
||||||
genesis = gspec.MustCommit(gendb)
|
// Get the side chain receipts.
|
||||||
)
|
if _, err := tmpChain.InsertChain(sideblocks); err != nil {
|
||||||
height := uint64(1024)
|
t.Fatal("processing side chain failed:", err)
|
||||||
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
|
}
|
||||||
|
t.Log("sidechain head:", tmpChain.CurrentBlock().Number(), tmpChain.CurrentBlock().Hash())
|
||||||
|
sidechainReceipts := make([]types.Receipts, len(sideblocks))
|
||||||
|
for i, block := range sideblocks {
|
||||||
|
sidechainReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
|
||||||
|
}
|
||||||
|
// Get the canon chain receipts.
|
||||||
|
if _, err := tmpChain.InsertChain(canonblocks); err != nil {
|
||||||
|
t.Fatal("processing canon chain failed:", err)
|
||||||
|
}
|
||||||
|
t.Log("canon head:", tmpChain.CurrentBlock().Number(), tmpChain.CurrentBlock().Hash())
|
||||||
|
canonReceipts := make([]types.Receipts, len(canonblocks))
|
||||||
|
for i, block := range canonblocks {
|
||||||
|
canonReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
|
||||||
|
}
|
||||||
|
|
||||||
// Import the chain as a ancient-first node and ensure all pointers are updated
|
// Set up a BlockChain that uses the ancient store.
|
||||||
frdir, err := ioutil.TempDir("", "")
|
frdir, err := ioutil.TempDir("", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
t.Fatalf("failed to create temp freezer dir: %v", err)
|
||||||
@ -1662,38 +1694,43 @@ func TestIncompleteAncientReceiptChainInsertion(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
|
gspec := Genesis{Config: params.AllEthashProtocolChanges}
|
||||||
gspec.MustCommit(ancientDb)
|
gspec.MustCommit(ancientDb)
|
||||||
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
ancientChain, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
defer ancient.Stop()
|
defer ancientChain.Stop()
|
||||||
|
|
||||||
headers := make([]*types.Header, len(blocks))
|
// Import the canonical header chain.
|
||||||
for i, block := range blocks {
|
canonHeaders := make([]*types.Header, len(canonblocks))
|
||||||
headers[i] = block.Header()
|
for i, block := range canonblocks {
|
||||||
|
canonHeaders[i] = block.Header()
|
||||||
}
|
}
|
||||||
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
|
if _, err = ancientChain.InsertHeaderChain(canonHeaders, 1); err != nil {
|
||||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
t.Fatal("can't import canon headers:", err)
|
||||||
}
|
}
|
||||||
// Abort ancient receipt chain insertion deliberately
|
|
||||||
ancient.terminateInsert = func(hash common.Hash, number uint64) bool {
|
// Try to insert blocks/receipts of the side chain.
|
||||||
return number == blocks[len(blocks)/2].NumberU64()
|
_, err = ancientChain.InsertReceiptChain(sideblocks, sidechainReceipts, uint64(len(sideblocks)))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error from InsertReceiptChain.")
|
||||||
}
|
}
|
||||||
previousFastBlock := ancient.CurrentFastBlock()
|
if ancientChain.CurrentFastBlock().NumberU64() != 0 {
|
||||||
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err == nil {
|
t.Fatalf("failed to rollback ancient data, want %d, have %d", 0, ancientChain.CurrentFastBlock().NumberU64())
|
||||||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
||||||
}
|
}
|
||||||
if ancient.CurrentFastBlock().NumberU64() != previousFastBlock.NumberU64() {
|
if frozen, err := ancientChain.db.Ancients(); err != nil || frozen != 1 {
|
||||||
t.Fatalf("failed to rollback ancient data, want %d, have %d", previousFastBlock.NumberU64(), ancient.CurrentFastBlock().NumberU64())
|
t.Fatalf("failed to truncate ancient data, frozen index is %d", frozen)
|
||||||
}
|
}
|
||||||
if frozen, err := ancient.db.Ancients(); err != nil || frozen != 1 {
|
|
||||||
t.Fatalf("failed to truncate ancient data")
|
// Insert blocks/receipts of the canonical chain.
|
||||||
|
_, err = ancientChain.InsertReceiptChain(canonblocks, canonReceipts, uint64(len(canonblocks)))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("can't import canon chain receipts: %v", err)
|
||||||
}
|
}
|
||||||
ancient.terminateInsert = nil
|
if ancientChain.CurrentFastBlock().NumberU64() != canonblocks[len(canonblocks)-1].NumberU64() {
|
||||||
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
|
|
||||||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
||||||
}
|
|
||||||
if ancient.CurrentFastBlock().NumberU64() != blocks[len(blocks)-1].NumberU64() {
|
|
||||||
t.Fatalf("failed to insert ancient recept chain after rollback")
|
t.Fatalf("failed to insert ancient recept chain after rollback")
|
||||||
}
|
}
|
||||||
|
if frozen, _ := ancientChain.db.Ancients(); frozen != uint64(len(canonblocks))+1 {
|
||||||
|
t.Fatalf("wrong ancients count %d", frozen)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that importing a very large side fork, which is larger than the canon chain,
|
// Tests that importing a very large side fork, which is larger than the canon chain,
|
||||||
@ -1958,9 +1995,8 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
|||||||
asserter(t, blocks2[len(blocks2)-1])
|
asserter(t, blocks2[len(blocks2)-1])
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLongAndShortChains returns two chains,
|
// getLongAndShortChains returns two chains: A is longer, B is heavier.
|
||||||
// A is longer, B is heavier
|
func getLongAndShortChains() (bc *BlockChain, longChain []*types.Block, heavyChain []*types.Block, err error) {
|
||||||
func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, error) {
|
|
||||||
// Generate a canonical chain to act as the main dataset
|
// Generate a canonical chain to act as the main dataset
|
||||||
engine := ethash.NewFaker()
|
engine := ethash.NewFaker()
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
@ -1968,7 +2004,7 @@ func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, error
|
|||||||
|
|
||||||
// Generate and import the canonical chain,
|
// Generate and import the canonical chain,
|
||||||
// Offset the time, to keep the difficulty low
|
// Offset the time, to keep the difficulty low
|
||||||
longChain, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 80, func(i int, b *BlockGen) {
|
longChain, _ = GenerateChain(params.TestChainConfig, genesis, engine, db, 80, func(i int, b *BlockGen) {
|
||||||
b.SetCoinbase(common.Address{1})
|
b.SetCoinbase(common.Address{1})
|
||||||
})
|
})
|
||||||
diskdb := rawdb.NewMemoryDatabase()
|
diskdb := rawdb.NewMemoryDatabase()
|
||||||
@ -1982,10 +2018,13 @@ func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, error
|
|||||||
// Generate fork chain, make it shorter than canon, with common ancestor pretty early
|
// Generate fork chain, make it shorter than canon, with common ancestor pretty early
|
||||||
parentIndex := 3
|
parentIndex := 3
|
||||||
parent := longChain[parentIndex]
|
parent := longChain[parentIndex]
|
||||||
heavyChain, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 75, func(i int, b *BlockGen) {
|
heavyChainExt, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 75, func(i int, b *BlockGen) {
|
||||||
b.SetCoinbase(common.Address{2})
|
b.SetCoinbase(common.Address{2})
|
||||||
b.OffsetTime(-9)
|
b.OffsetTime(-9)
|
||||||
})
|
})
|
||||||
|
heavyChain = append(heavyChain, longChain[:parentIndex+1]...)
|
||||||
|
heavyChain = append(heavyChain, heavyChainExt...)
|
||||||
|
|
||||||
// Verify that the test is sane
|
// Verify that the test is sane
|
||||||
var (
|
var (
|
||||||
longerTd = new(big.Int)
|
longerTd = new(big.Int)
|
||||||
|
@ -510,8 +510,9 @@ type MatcherSession struct {
|
|||||||
closer sync.Once // Sync object to ensure we only ever close once
|
closer sync.Once // Sync object to ensure we only ever close once
|
||||||
quit chan struct{} // Quit channel to request pipeline termination
|
quit chan struct{} // Quit channel to request pipeline termination
|
||||||
|
|
||||||
ctx context.Context // Context used by the light client to abort filtering
|
ctx context.Context // Context used by the light client to abort filtering
|
||||||
err atomic.Value // Global error to track retrieval failures deep in the chain
|
err error // Global error to track retrieval failures deep in the chain
|
||||||
|
errLock sync.Mutex
|
||||||
|
|
||||||
pend sync.WaitGroup
|
pend sync.WaitGroup
|
||||||
}
|
}
|
||||||
@ -529,10 +530,10 @@ func (s *MatcherSession) Close() {
|
|||||||
|
|
||||||
// Error returns any failure encountered during the matching session.
|
// Error returns any failure encountered during the matching session.
|
||||||
func (s *MatcherSession) Error() error {
|
func (s *MatcherSession) Error() error {
|
||||||
if err := s.err.Load(); err != nil {
|
s.errLock.Lock()
|
||||||
return err.(error)
|
defer s.errLock.Unlock()
|
||||||
}
|
|
||||||
return nil
|
return s.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocateRetrieval assigns a bloom bit index to a client process that can either
|
// allocateRetrieval assigns a bloom bit index to a client process that can either
|
||||||
@ -630,7 +631,9 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
|
|||||||
|
|
||||||
result := <-request
|
result := <-request
|
||||||
if result.Error != nil {
|
if result.Error != nil {
|
||||||
s.err.Store(result.Error)
|
s.errLock.Lock()
|
||||||
|
s.err = result.Error
|
||||||
|
s.errLock.Unlock()
|
||||||
s.Close()
|
s.Close()
|
||||||
}
|
}
|
||||||
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
|
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
|
||||||
|
@ -31,6 +31,8 @@ var (
|
|||||||
|
|
||||||
// ErrNoGenesis is returned when there is no Genesis Block.
|
// ErrNoGenesis is returned when there is no Genesis Block.
|
||||||
ErrNoGenesis = errors.New("genesis not found in chain")
|
ErrNoGenesis = errors.New("genesis not found in chain")
|
||||||
|
|
||||||
|
errSideChainReceipts = errors.New("side blocks can't be accepted as ancient chain data")
|
||||||
)
|
)
|
||||||
|
|
||||||
// List of evm-call-message pre-checking errors. All state transition messages will
|
// List of evm-call-message pre-checking errors. All state transition messages will
|
||||||
|
@ -310,7 +310,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
|||||||
func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
||||||
block := g.ToBlock(db)
|
block := g.ToBlock(db)
|
||||||
if block.Number().Sign() != 0 {
|
if block.Number().Sign() != 0 {
|
||||||
return nil, fmt.Errorf("can't commit genesis block with number > 0")
|
return nil, errors.New("can't commit genesis block with number > 0")
|
||||||
}
|
}
|
||||||
config := g.Config
|
config := g.Config
|
||||||
if config == nil {
|
if config == nil {
|
||||||
@ -319,6 +319,9 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
|||||||
if err := config.CheckConfigForkOrder(); err != nil {
|
if err := config.CheckConfigForkOrder(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if config.Clique != nil && len(block.Extra()) == 0 {
|
||||||
|
return nil, errors.New("can't start clique chain without signers")
|
||||||
|
}
|
||||||
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty)
|
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty)
|
||||||
rawdb.WriteBlock(db, block)
|
rawdb.WriteBlock(db, block)
|
||||||
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
||||||
|
@ -39,6 +39,22 @@ func TestDefaultGenesisBlock(t *testing.T) {
|
|||||||
if block.Hash() != params.RopstenGenesisHash {
|
if block.Hash() != params.RopstenGenesisHash {
|
||||||
t.Errorf("wrong ropsten genesis hash, got %v, want %v", block.Hash(), params.RopstenGenesisHash)
|
t.Errorf("wrong ropsten genesis hash, got %v, want %v", block.Hash(), params.RopstenGenesisHash)
|
||||||
}
|
}
|
||||||
|
block = DefaultRinkebyGenesisBlock().ToBlock(nil)
|
||||||
|
if block.Hash() != params.RinkebyGenesisHash {
|
||||||
|
t.Errorf("wrong rinkeby genesis hash, got %v, want %v", block.Hash(), params.RinkebyGenesisHash)
|
||||||
|
}
|
||||||
|
block = DefaultGoerliGenesisBlock().ToBlock(nil)
|
||||||
|
if block.Hash() != params.GoerliGenesisHash {
|
||||||
|
t.Errorf("wrong goerli genesis hash, got %v, want %v", block.Hash(), params.GoerliGenesisHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidCliqueConfig(t *testing.T) {
|
||||||
|
block := DefaultGoerliGenesisBlock()
|
||||||
|
block.ExtraData = []byte{}
|
||||||
|
if _, err := block.Commit(nil); err == nil {
|
||||||
|
t.Fatal("Expected error on invalid clique config")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetupGenesis(t *testing.T) {
|
func TestSetupGenesis(t *testing.T) {
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build none
|
||||||
// +build none
|
// +build none
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -19,6 +19,8 @@ package rawdb
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
@ -81,6 +83,37 @@ func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
|
|||||||
return hashes
|
return hashes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type NumberHash struct {
|
||||||
|
Number uint64
|
||||||
|
Hash common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
|
||||||
|
// both canonical and reorged forks included.
|
||||||
|
// This method considers both limits to be _inclusive_.
|
||||||
|
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
|
||||||
|
var (
|
||||||
|
start = encodeBlockNumber(first)
|
||||||
|
keyLength = len(headerPrefix) + 8 + 32
|
||||||
|
hashes = make([]*NumberHash, 0, 1+last-first)
|
||||||
|
it = db.NewIterator(headerPrefix, start)
|
||||||
|
)
|
||||||
|
defer it.Release()
|
||||||
|
for it.Next() {
|
||||||
|
key := it.Key()
|
||||||
|
if len(key) != keyLength {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8])
|
||||||
|
if num > last {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
hash := common.BytesToHash(key[len(key)-32:])
|
||||||
|
hashes = append(hashes, &NumberHash{num, hash})
|
||||||
|
}
|
||||||
|
return hashes
|
||||||
|
}
|
||||||
|
|
||||||
// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
|
// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
|
||||||
// certain chain range. If the accumulated entries reaches the given threshold,
|
// certain chain range. If the accumulated entries reaches the given threshold,
|
||||||
// abort the iteration and return the semi-finish result.
|
// abort the iteration and return the semi-finish result.
|
||||||
@ -631,6 +664,86 @@ func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// storedReceiptRLP is the storage encoding of a receipt.
|
||||||
|
// Re-definition in core/types/receipt.go.
|
||||||
|
type storedReceiptRLP struct {
|
||||||
|
PostStateOrStatus []byte
|
||||||
|
CumulativeGasUsed uint64
|
||||||
|
Logs []*types.LogForStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
|
||||||
|
// the list of logs. When decoding a stored receipt into this object we
|
||||||
|
// avoid creating the bloom filter.
|
||||||
|
type receiptLogs struct {
|
||||||
|
Logs []*types.Log
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeRLP implements rlp.Decoder.
|
||||||
|
func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
|
||||||
|
var stored storedReceiptRLP
|
||||||
|
if err := s.Decode(&stored); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.Logs = make([]*types.Log, len(stored.Logs))
|
||||||
|
for i, log := range stored.Logs {
|
||||||
|
r.Logs[i] = (*types.Log)(log)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
|
||||||
|
func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
|
||||||
|
logIndex := uint(0)
|
||||||
|
if len(txs) != len(receipts) {
|
||||||
|
return errors.New("transaction and receipt count mismatch")
|
||||||
|
}
|
||||||
|
for i := 0; i < len(receipts); i++ {
|
||||||
|
txHash := txs[i].Hash()
|
||||||
|
// The derived log fields can simply be set from the block and transaction
|
||||||
|
for j := 0; j < len(receipts[i].Logs); j++ {
|
||||||
|
receipts[i].Logs[j].BlockNumber = number
|
||||||
|
receipts[i].Logs[j].BlockHash = hash
|
||||||
|
receipts[i].Logs[j].TxHash = txHash
|
||||||
|
receipts[i].Logs[j].TxIndex = uint(i)
|
||||||
|
receipts[i].Logs[j].Index = logIndex
|
||||||
|
logIndex++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadLogs retrieves the logs for all transactions in a block. The log fields
|
||||||
|
// are populated with metadata. In case the receipts or the block body
|
||||||
|
// are not found, a nil is returned.
|
||||||
|
func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
|
||||||
|
// Retrieve the flattened receipt slice
|
||||||
|
data := ReadReceiptsRLP(db, hash, number)
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
receipts := []*receiptLogs{}
|
||||||
|
if err := rlp.DecodeBytes(data, &receipts); err != nil {
|
||||||
|
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
body := ReadBody(db, hash, number)
|
||||||
|
if body == nil {
|
||||||
|
log.Error("Missing body but have receipt", "hash", hash, "number", number)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil {
|
||||||
|
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
logs := make([][]*types.Log, len(receipts))
|
||||||
|
for i, receipt := range receipts {
|
||||||
|
logs[i] = receipt.Logs
|
||||||
|
}
|
||||||
|
return logs
|
||||||
|
}
|
||||||
|
|
||||||
// ReadBlock retrieves an entire block corresponding to the hash, assembling it
|
// ReadBlock retrieves an entire block corresponding to the hash, assembling it
|
||||||
// back from the stored header and body. If either the header or body could not
|
// back from the stored header and body. If either the header or body could not
|
||||||
// be retrieved nil is returned.
|
// be retrieved nil is returned.
|
||||||
@ -656,34 +769,48 @@ func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
|
// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
|
||||||
func WriteAncientBlock(db ethdb.AncientWriter, block *types.Block, receipts types.Receipts, td *big.Int) int {
|
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
|
||||||
// Encode all block components to RLP format.
|
var (
|
||||||
headerBlob, err := rlp.EncodeToBytes(block.Header())
|
tdSum = new(big.Int).Set(td)
|
||||||
if err != nil {
|
stReceipts []*types.ReceiptForStorage
|
||||||
log.Crit("Failed to RLP encode block header", "err", err)
|
)
|
||||||
|
return db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||||
|
for i, block := range blocks {
|
||||||
|
// Convert receipts to storage format and sum up total difficulty.
|
||||||
|
stReceipts = stReceipts[:0]
|
||||||
|
for _, receipt := range receipts[i] {
|
||||||
|
stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt))
|
||||||
|
}
|
||||||
|
header := block.Header()
|
||||||
|
if i > 0 {
|
||||||
|
tdSum.Add(tdSum, header.Difficulty)
|
||||||
|
}
|
||||||
|
if err := writeAncientBlock(op, block, header, stReceipts, tdSum); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error {
|
||||||
|
num := block.NumberU64()
|
||||||
|
if err := op.AppendRaw(freezerHashTable, num, block.Hash().Bytes()); err != nil {
|
||||||
|
return fmt.Errorf("can't add block %d hash: %v", num, err)
|
||||||
}
|
}
|
||||||
bodyBlob, err := rlp.EncodeToBytes(block.Body())
|
if err := op.Append(freezerHeaderTable, num, header); err != nil {
|
||||||
if err != nil {
|
return fmt.Errorf("can't append block header %d: %v", num, err)
|
||||||
log.Crit("Failed to RLP encode body", "err", err)
|
|
||||||
}
|
}
|
||||||
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
|
if err := op.Append(freezerBodiesTable, num, block.Body()); err != nil {
|
||||||
for i, receipt := range receipts {
|
return fmt.Errorf("can't append block body %d: %v", num, err)
|
||||||
storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
|
|
||||||
}
|
}
|
||||||
receiptBlob, err := rlp.EncodeToBytes(storageReceipts)
|
if err := op.Append(freezerReceiptTable, num, receipts); err != nil {
|
||||||
if err != nil {
|
return fmt.Errorf("can't append block %d receipts: %v", num, err)
|
||||||
log.Crit("Failed to RLP encode block receipts", "err", err)
|
|
||||||
}
|
}
|
||||||
tdBlob, err := rlp.EncodeToBytes(td)
|
if err := op.Append(freezerDifficultyTable, num, td); err != nil {
|
||||||
if err != nil {
|
return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
|
||||||
log.Crit("Failed to RLP encode block total difficulty", "err", err)
|
|
||||||
}
|
}
|
||||||
// Write all blob to flatten files.
|
return nil
|
||||||
err = db.AppendAncient(block.NumberU64(), block.Hash().Bytes(), headerBlob, bodyBlob, receiptBlob, tdBlob)
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to write block data to ancient store", "err", err)
|
|
||||||
}
|
|
||||||
return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBlock removes all block data associated with a hash.
|
// DeleteBlock removes all block data associated with a hash.
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
@ -438,7 +439,7 @@ func TestAncientStorage(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
t.Fatalf("failed to create temp freezer dir: %v", err)
|
||||||
}
|
}
|
||||||
defer os.Remove(frdir)
|
defer os.RemoveAll(frdir)
|
||||||
|
|
||||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -467,8 +468,10 @@ func TestAncientStorage(t *testing.T) {
|
|||||||
if blob := ReadTdRLP(db, hash, number); len(blob) > 0 {
|
if blob := ReadTdRLP(db, hash, number); len(blob) > 0 {
|
||||||
t.Fatalf("non existent td returned")
|
t.Fatalf("non existent td returned")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write and verify the header in the database
|
// Write and verify the header in the database
|
||||||
WriteAncientBlock(db, block, nil, big.NewInt(100))
|
WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil}, big.NewInt(100))
|
||||||
|
|
||||||
if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
|
if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
|
||||||
t.Fatalf("no header returned")
|
t.Fatalf("no header returned")
|
||||||
}
|
}
|
||||||
@ -481,6 +484,7 @@ func TestAncientStorage(t *testing.T) {
|
|||||||
if blob := ReadTdRLP(db, hash, number); len(blob) == 0 {
|
if blob := ReadTdRLP(db, hash, number); len(blob) == 0 {
|
||||||
t.Fatalf("no td returned")
|
t.Fatalf("no td returned")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use a fake hash for data retrieval, nothing should be returned.
|
// Use a fake hash for data retrieval, nothing should be returned.
|
||||||
fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
|
fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
|
||||||
if blob := ReadHeaderRLP(db, fakeHash, number); len(blob) != 0 {
|
if blob := ReadHeaderRLP(db, fakeHash, number); len(blob) != 0 {
|
||||||
@ -528,3 +532,354 @@ func TestCanonicalHashIteration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHashesInRange(t *testing.T) {
|
||||||
|
mkHeader := func(number, seq int) *types.Header {
|
||||||
|
h := types.Header{
|
||||||
|
Difficulty: new(big.Int),
|
||||||
|
Number: big.NewInt(int64(number)),
|
||||||
|
GasLimit: uint64(seq),
|
||||||
|
}
|
||||||
|
return &h
|
||||||
|
}
|
||||||
|
db := NewMemoryDatabase()
|
||||||
|
// For each number, write N versions of that particular number
|
||||||
|
total := 0
|
||||||
|
for i := 0; i < 15; i++ {
|
||||||
|
for ii := 0; ii < i; ii++ {
|
||||||
|
WriteHeader(db, mkHeader(i, ii))
|
||||||
|
total++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if have, want := len(ReadAllHashesInRange(db, 10, 10)), 10; have != want {
|
||||||
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||||
|
}
|
||||||
|
if have, want := len(ReadAllHashesInRange(db, 10, 9)), 0; have != want {
|
||||||
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||||
|
}
|
||||||
|
if have, want := len(ReadAllHashesInRange(db, 0, 100)), total; have != want {
|
||||||
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||||
|
}
|
||||||
|
if have, want := len(ReadAllHashesInRange(db, 9, 10)), 9+10; have != want {
|
||||||
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||||
|
}
|
||||||
|
if have, want := len(ReadAllHashes(db, 10)), 10; have != want {
|
||||||
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||||
|
}
|
||||||
|
if have, want := len(ReadAllHashes(db, 16)), 0; have != want {
|
||||||
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||||
|
}
|
||||||
|
if have, want := len(ReadAllHashes(db, 1)), 1; have != want {
|
||||||
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This measures the write speed of the WriteAncientBlocks operation.
|
||||||
|
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
||||||
|
// Open freezer database.
|
||||||
|
frdir, err := ioutil.TempDir("", "")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("failed to create temp freezer dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(frdir)
|
||||||
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("failed to create database with ancient backend")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the data to insert. The blocks must have consecutive numbers, so we create
|
||||||
|
// all of them ahead of time. However, there is no need to create receipts
|
||||||
|
// individually for each block, just make one batch here and reuse it for all writes.
|
||||||
|
const batchSize = 128
|
||||||
|
const blockTxs = 20
|
||||||
|
allBlocks := makeTestBlocks(b.N, blockTxs)
|
||||||
|
batchReceipts := makeTestReceipts(batchSize, blockTxs)
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
// The benchmark loop writes batches of blocks, but note that the total block count is
|
||||||
|
// b.N. This means the resulting ns/op measurement is the time it takes to write a
|
||||||
|
// single block and its associated data.
|
||||||
|
var td = big.NewInt(55)
|
||||||
|
var totalSize int64
|
||||||
|
for i := 0; i < b.N; i += batchSize {
|
||||||
|
length := batchSize
|
||||||
|
if i+batchSize > b.N {
|
||||||
|
length = b.N - i
|
||||||
|
}
|
||||||
|
|
||||||
|
blocks := allBlocks[i : i+length]
|
||||||
|
receipts := batchReceipts[:length]
|
||||||
|
writeSize, err := WriteAncientBlocks(db, blocks, receipts, td)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
totalSize += writeSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable MB/s reporting.
|
||||||
|
b.SetBytes(totalSize / int64(b.N))
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeTestBlocks creates fake blocks for the ancient write benchmark.
|
||||||
|
func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
|
||||||
|
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
signer := types.LatestSignerForChainID(big.NewInt(8))
|
||||||
|
|
||||||
|
// Create transactions.
|
||||||
|
txs := make([]*types.Transaction, txsPerBlock)
|
||||||
|
for i := 0; i < len(txs); i++ {
|
||||||
|
var err error
|
||||||
|
to := common.Address{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
||||||
|
txs[i], err = types.SignNewTx(key, signer, &types.LegacyTx{
|
||||||
|
Nonce: 2,
|
||||||
|
GasPrice: big.NewInt(30000),
|
||||||
|
Gas: 0x45454545,
|
||||||
|
To: &to,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the blocks.
|
||||||
|
blocks := make([]*types.Block, nblock)
|
||||||
|
for i := 0; i < nblock; i++ {
|
||||||
|
header := &types.Header{
|
||||||
|
Number: big.NewInt(int64(i)),
|
||||||
|
Extra: []byte("test block"),
|
||||||
|
}
|
||||||
|
blocks[i] = types.NewBlockWithHeader(header).WithBody(txs, nil)
|
||||||
|
blocks[i].Hash() // pre-cache the block hash
|
||||||
|
}
|
||||||
|
return blocks
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeTestReceipts creates fake receipts for the ancient write benchmark.
|
||||||
|
func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
|
||||||
|
receipts := make([]*types.Receipt, nPerBlock)
|
||||||
|
for i := 0; i < len(receipts); i++ {
|
||||||
|
receipts[i] = &types.Receipt{
|
||||||
|
Status: types.ReceiptStatusSuccessful,
|
||||||
|
CumulativeGasUsed: 0x888888888,
|
||||||
|
Logs: make([]*types.Log, 5),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
allReceipts := make([]types.Receipts, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
allReceipts[i] = receipts
|
||||||
|
}
|
||||||
|
return allReceipts
|
||||||
|
}
|
||||||
|
|
||||||
|
type fullLogRLP struct {
|
||||||
|
Address common.Address
|
||||||
|
Topics []common.Hash
|
||||||
|
Data []byte
|
||||||
|
BlockNumber uint64
|
||||||
|
TxHash common.Hash
|
||||||
|
TxIndex uint
|
||||||
|
BlockHash common.Hash
|
||||||
|
Index uint
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFullLogRLP(l *types.Log) *fullLogRLP {
|
||||||
|
return &fullLogRLP{
|
||||||
|
Address: l.Address,
|
||||||
|
Topics: l.Topics,
|
||||||
|
Data: l.Data,
|
||||||
|
BlockNumber: l.BlockNumber,
|
||||||
|
TxHash: l.TxHash,
|
||||||
|
TxIndex: l.TxIndex,
|
||||||
|
BlockHash: l.BlockHash,
|
||||||
|
Index: l.Index,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that logs associated with a single block can be retrieved.
|
||||||
|
func TestReadLogs(t *testing.T) {
|
||||||
|
db := NewMemoryDatabase()
|
||||||
|
|
||||||
|
// Create a live block since we need metadata to reconstruct the receipt
|
||||||
|
tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
|
||||||
|
tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
|
||||||
|
|
||||||
|
body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
|
||||||
|
|
||||||
|
// Create the two receipts to manage afterwards
|
||||||
|
receipt1 := &types.Receipt{
|
||||||
|
Status: types.ReceiptStatusFailed,
|
||||||
|
CumulativeGasUsed: 1,
|
||||||
|
Logs: []*types.Log{
|
||||||
|
{Address: common.BytesToAddress([]byte{0x11})},
|
||||||
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
||||||
|
},
|
||||||
|
TxHash: tx1.Hash(),
|
||||||
|
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
|
||||||
|
GasUsed: 111111,
|
||||||
|
}
|
||||||
|
receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
|
||||||
|
|
||||||
|
receipt2 := &types.Receipt{
|
||||||
|
PostState: common.Hash{2}.Bytes(),
|
||||||
|
CumulativeGasUsed: 2,
|
||||||
|
Logs: []*types.Log{
|
||||||
|
{Address: common.BytesToAddress([]byte{0x22})},
|
||||||
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
||||||
|
},
|
||||||
|
TxHash: tx2.Hash(),
|
||||||
|
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
|
||||||
|
GasUsed: 222222,
|
||||||
|
}
|
||||||
|
receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
|
||||||
|
receipts := []*types.Receipt{receipt1, receipt2}
|
||||||
|
|
||||||
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
||||||
|
// Check that no receipt entries are in a pristine database
|
||||||
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
|
||||||
|
t.Fatalf("non existent receipts returned: %v", rs)
|
||||||
|
}
|
||||||
|
// Insert the body that corresponds to the receipts
|
||||||
|
WriteBody(db, hash, 0, body)
|
||||||
|
|
||||||
|
// Insert the receipt slice into the database and check presence
|
||||||
|
WriteReceipts(db, hash, 0, receipts)
|
||||||
|
|
||||||
|
logs := ReadLogs(db, hash, 0)
|
||||||
|
if len(logs) == 0 {
|
||||||
|
t.Fatalf("no logs returned")
|
||||||
|
}
|
||||||
|
if have, want := len(logs), 2; have != want {
|
||||||
|
t.Fatalf("unexpected number of logs returned, have %d want %d", have, want)
|
||||||
|
}
|
||||||
|
if have, want := len(logs[0]), 2; have != want {
|
||||||
|
t.Fatalf("unexpected number of logs[0] returned, have %d want %d", have, want)
|
||||||
|
}
|
||||||
|
if have, want := len(logs[1]), 2; have != want {
|
||||||
|
t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill in log fields so we can compare their rlp encoding
|
||||||
|
if err := types.Receipts(receipts).DeriveFields(params.TestChainConfig, hash, 0, body.Transactions); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i, pr := range receipts {
|
||||||
|
for j, pl := range pr.Logs {
|
||||||
|
rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j]))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
rlpWant, err := rlp.EncodeToBytes(newFullLogRLP(pl))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(rlpHave, rlpWant) {
|
||||||
|
t.Fatalf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeriveLogFields(t *testing.T) {
|
||||||
|
// Create a few transactions to have receipts for
|
||||||
|
to2 := common.HexToAddress("0x2")
|
||||||
|
to3 := common.HexToAddress("0x3")
|
||||||
|
txs := types.Transactions{
|
||||||
|
types.NewTx(&types.LegacyTx{
|
||||||
|
Nonce: 1,
|
||||||
|
Value: big.NewInt(1),
|
||||||
|
Gas: 1,
|
||||||
|
GasPrice: big.NewInt(1),
|
||||||
|
}),
|
||||||
|
types.NewTx(&types.LegacyTx{
|
||||||
|
To: &to2,
|
||||||
|
Nonce: 2,
|
||||||
|
Value: big.NewInt(2),
|
||||||
|
Gas: 2,
|
||||||
|
GasPrice: big.NewInt(2),
|
||||||
|
}),
|
||||||
|
types.NewTx(&types.AccessListTx{
|
||||||
|
To: &to3,
|
||||||
|
Nonce: 3,
|
||||||
|
Value: big.NewInt(3),
|
||||||
|
Gas: 3,
|
||||||
|
GasPrice: big.NewInt(3),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
// Create the corresponding receipts
|
||||||
|
receipts := []*receiptLogs{
|
||||||
|
{
|
||||||
|
Logs: []*types.Log{
|
||||||
|
{Address: common.BytesToAddress([]byte{0x11})},
|
||||||
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Logs: []*types.Log{
|
||||||
|
{Address: common.BytesToAddress([]byte{0x22})},
|
||||||
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Logs: []*types.Log{
|
||||||
|
{Address: common.BytesToAddress([]byte{0x33})},
|
||||||
|
{Address: common.BytesToAddress([]byte{0x03, 0x33})},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Derive log metadata fields
|
||||||
|
number := big.NewInt(1)
|
||||||
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
||||||
|
if err := deriveLogFields(receipts, hash, number.Uint64(), txs); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over all the computed fields and check that they're correct
|
||||||
|
logIndex := uint(0)
|
||||||
|
for i := range receipts {
|
||||||
|
for j := range receipts[i].Logs {
|
||||||
|
if receipts[i].Logs[j].BlockNumber != number.Uint64() {
|
||||||
|
t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64())
|
||||||
|
}
|
||||||
|
if receipts[i].Logs[j].BlockHash != hash {
|
||||||
|
t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String())
|
||||||
|
}
|
||||||
|
if receipts[i].Logs[j].TxHash != txs[i].Hash() {
|
||||||
|
t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
|
||||||
|
}
|
||||||
|
if receipts[i].Logs[j].TxIndex != uint(i) {
|
||||||
|
t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
|
||||||
|
}
|
||||||
|
if receipts[i].Logs[j].Index != logIndex {
|
||||||
|
t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex)
|
||||||
|
}
|
||||||
|
logIndex++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDecodeRLPLogs(b *testing.B) {
|
||||||
|
// Encoded receipts from block 0x14ee094309fbe8f70b65f45ebcc08fb33f126942d97464aad5eb91cfd1e2d269
|
||||||
|
buf, err := ioutil.ReadFile("testdata/stored_receipts.bin")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
b.Run("ReceiptForStorage", func(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
var r []*types.ReceiptForStorage
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
if err := rlp.DecodeBytes(buf, &r); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
b.Run("rlpLogs", func(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
var r []*receiptLogs
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
if err := rlp.DecodeBytes(buf, &r); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -104,9 +104,9 @@ func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
|
|||||||
return 0, errNotSupported
|
return 0, errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendAncient returns an error as we don't have a backing chain freezer.
|
// ModifyAncients is not supported.
|
||||||
func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
|
func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
|
||||||
return errNotSupported
|
return 0, errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// TruncateAncients returns an error as we don't have a backing chain freezer.
|
// TruncateAncients returns an error as we don't have a backing chain freezer.
|
||||||
@ -122,9 +122,7 @@ func (db *nofreezedb) Sync() error {
|
|||||||
// NewDatabase creates a high level database on top of a given key-value data
|
// NewDatabase creates a high level database on top of a given key-value data
|
||||||
// store without a freezer moving immutable chain segments into cold storage.
|
// store without a freezer moving immutable chain segments into cold storage.
|
||||||
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
||||||
return &nofreezedb{
|
return &nofreezedb{KeyValueStore: db}
|
||||||
KeyValueStore: db,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDatabaseWithFreezer creates a high level database on top of a given key-
|
// NewDatabaseWithFreezer creates a high level database on top of a given key-
|
||||||
@ -132,7 +130,7 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
|||||||
// storage.
|
// storage.
|
||||||
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
|
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
|
||||||
// Create the idle freezer instance
|
// Create the idle freezer instance
|
||||||
frdb, err := newFreezer(freezer, namespace, readonly)
|
frdb, err := newFreezer(freezer, namespace, readonly, freezerTableSize, FreezerNoSnappy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -61,6 +61,9 @@ const (
|
|||||||
// freezerBatchLimit is the maximum number of blocks to freeze in one batch
|
// freezerBatchLimit is the maximum number of blocks to freeze in one batch
|
||||||
// before doing an fsync and deleting it from the key-value store.
|
// before doing an fsync and deleting it from the key-value store.
|
||||||
freezerBatchLimit = 30000
|
freezerBatchLimit = 30000
|
||||||
|
|
||||||
|
// freezerTableSize defines the maximum size of freezer data files.
|
||||||
|
freezerTableSize = 2 * 1000 * 1000 * 1000
|
||||||
)
|
)
|
||||||
|
|
||||||
// freezer is an memory mapped append-only database to store immutable chain data
|
// freezer is an memory mapped append-only database to store immutable chain data
|
||||||
@ -77,6 +80,10 @@ type freezer struct {
|
|||||||
frozen uint64 // Number of blocks already frozen
|
frozen uint64 // Number of blocks already frozen
|
||||||
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
||||||
|
|
||||||
|
// This lock synchronizes writers and the truncate operation.
|
||||||
|
writeLock sync.Mutex
|
||||||
|
writeBatch *freezerBatch
|
||||||
|
|
||||||
readonly bool
|
readonly bool
|
||||||
tables map[string]*freezerTable // Data tables for storing everything
|
tables map[string]*freezerTable // Data tables for storing everything
|
||||||
instanceLock fileutil.Releaser // File-system lock to prevent double opens
|
instanceLock fileutil.Releaser // File-system lock to prevent double opens
|
||||||
@ -90,7 +97,10 @@ type freezer struct {
|
|||||||
|
|
||||||
// newFreezer creates a chain freezer that moves ancient chain data into
|
// newFreezer creates a chain freezer that moves ancient chain data into
|
||||||
// append-only flat file containers.
|
// append-only flat file containers.
|
||||||
func newFreezer(datadir string, namespace string, readonly bool) (*freezer, error) {
|
//
|
||||||
|
// The 'tables' argument defines the data tables. If the value of a map
|
||||||
|
// entry is true, snappy compression is disabled for the table.
|
||||||
|
func newFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*freezer, error) {
|
||||||
// Create the initial freezer object
|
// Create the initial freezer object
|
||||||
var (
|
var (
|
||||||
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
|
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
|
||||||
@ -119,8 +129,10 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro
|
|||||||
trigger: make(chan chan struct{}),
|
trigger: make(chan chan struct{}),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
}
|
}
|
||||||
for name, disableSnappy := range FreezerNoSnappy {
|
|
||||||
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, disableSnappy)
|
// Create the tables.
|
||||||
|
for name, disableSnappy := range tables {
|
||||||
|
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for _, table := range freezer.tables {
|
for _, table := range freezer.tables {
|
||||||
table.Close()
|
table.Close()
|
||||||
@ -130,6 +142,8 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro
|
|||||||
}
|
}
|
||||||
freezer.tables[name] = table
|
freezer.tables[name] = table
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Truncate all tables to common length.
|
||||||
if err := freezer.repair(); err != nil {
|
if err := freezer.repair(); err != nil {
|
||||||
for _, table := range freezer.tables {
|
for _, table := range freezer.tables {
|
||||||
table.Close()
|
table.Close()
|
||||||
@ -137,12 +151,19 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro
|
|||||||
lock.Release()
|
lock.Release()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create the write batch.
|
||||||
|
freezer.writeBatch = newFreezerBatch(freezer)
|
||||||
|
|
||||||
log.Info("Opened ancient database", "database", datadir, "readonly", readonly)
|
log.Info("Opened ancient database", "database", datadir, "readonly", readonly)
|
||||||
return freezer, nil
|
return freezer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close terminates the chain freezer, unmapping all the data files.
|
// Close terminates the chain freezer, unmapping all the data files.
|
||||||
func (f *freezer) Close() error {
|
func (f *freezer) Close() error {
|
||||||
|
f.writeLock.Lock()
|
||||||
|
defer f.writeLock.Unlock()
|
||||||
|
|
||||||
var errs []error
|
var errs []error
|
||||||
f.closeOnce.Do(func() {
|
f.closeOnce.Do(func() {
|
||||||
close(f.quit)
|
close(f.quit)
|
||||||
@ -199,61 +220,49 @@ func (f *freezer) Ancients() (uint64, error) {
|
|||||||
|
|
||||||
// AncientSize returns the ancient size of the specified category.
|
// AncientSize returns the ancient size of the specified category.
|
||||||
func (f *freezer) AncientSize(kind string) (uint64, error) {
|
func (f *freezer) AncientSize(kind string) (uint64, error) {
|
||||||
|
// This needs the write lock to avoid data races on table fields.
|
||||||
|
// Speed doesn't matter here, AncientSize is for debugging.
|
||||||
|
f.writeLock.Lock()
|
||||||
|
defer f.writeLock.Unlock()
|
||||||
|
|
||||||
if table := f.tables[kind]; table != nil {
|
if table := f.tables[kind]; table != nil {
|
||||||
return table.size()
|
return table.size()
|
||||||
}
|
}
|
||||||
return 0, errUnknownTable
|
return 0, errUnknownTable
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendAncient injects all binary blobs belong to block at the end of the
|
// ModifyAncients runs the given write operation.
|
||||||
// append-only immutable table files.
|
func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
|
||||||
//
|
|
||||||
// Notably, this function is lock free but kind of thread-safe. All out-of-order
|
|
||||||
// injection will be rejected. But if two injections with same number happen at
|
|
||||||
// the same time, we can get into the trouble.
|
|
||||||
func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td []byte) (err error) {
|
|
||||||
if f.readonly {
|
if f.readonly {
|
||||||
return errReadOnly
|
return 0, errReadOnly
|
||||||
}
|
}
|
||||||
// Ensure the binary blobs we are appending is continuous with freezer.
|
f.writeLock.Lock()
|
||||||
if atomic.LoadUint64(&f.frozen) != number {
|
defer f.writeLock.Unlock()
|
||||||
return errOutOrderInsertion
|
|
||||||
}
|
// Roll back all tables to the starting position in case of error.
|
||||||
// Rollback all inserted data if any insertion below failed to ensure
|
prevItem := f.frozen
|
||||||
// the tables won't out of sync.
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rerr := f.repair()
|
// The write operation has failed. Go back to the previous item position.
|
||||||
if rerr != nil {
|
for name, table := range f.tables {
|
||||||
log.Crit("Failed to repair freezer", "err", rerr)
|
err := table.truncate(prevItem)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
log.Info("Append ancient failed", "number", number, "err", err)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pluginAppendAncient(number, hash, header, body, receipts, td)
|
|
||||||
// Inject all the components into the relevant data tables
|
f.writeBatch.reset()
|
||||||
if err := f.tables[freezerHashTable].Append(f.frozen, hash[:]); err != nil {
|
if err := fn(f.writeBatch); err != nil {
|
||||||
log.Error("Failed to append ancient hash", "number", f.frozen, "hash", hash, "err", err)
|
return 0, err
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
if err := f.tables[freezerHeaderTable].Append(f.frozen, header); err != nil {
|
item, writeSize, err := f.writeBatch.commit()
|
||||||
log.Error("Failed to append ancient header", "number", f.frozen, "hash", hash, "err", err)
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := f.tables[freezerBodiesTable].Append(f.frozen, body); err != nil {
|
atomic.StoreUint64(&f.frozen, item)
|
||||||
log.Error("Failed to append ancient body", "number", f.frozen, "hash", hash, "err", err)
|
return writeSize, nil
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := f.tables[freezerReceiptTable].Append(f.frozen, receipts); err != nil {
|
|
||||||
log.Error("Failed to append ancient receipts", "number", f.frozen, "hash", hash, "err", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := f.tables[freezerDifficultyTable].Append(f.frozen, td); err != nil {
|
|
||||||
log.Error("Failed to append ancient difficulty", "number", f.frozen, "hash", hash, "err", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
atomic.AddUint64(&f.frozen, 1) // Only modify atomically
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TruncateAncients discards any recent data above the provided threshold number.
|
// TruncateAncients discards any recent data above the provided threshold number.
|
||||||
@ -261,6 +270,9 @@ func (f *freezer) TruncateAncients(items uint64) error {
|
|||||||
if f.readonly {
|
if f.readonly {
|
||||||
return errReadOnly
|
return errReadOnly
|
||||||
}
|
}
|
||||||
|
f.writeLock.Lock()
|
||||||
|
defer f.writeLock.Unlock()
|
||||||
|
|
||||||
if atomic.LoadUint64(&f.frozen) <= items {
|
if atomic.LoadUint64(&f.frozen) <= items {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -287,6 +299,24 @@ func (f *freezer) Sync() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// repair truncates all data tables to the same length.
|
||||||
|
func (f *freezer) repair() error {
|
||||||
|
min := uint64(math.MaxUint64)
|
||||||
|
for _, table := range f.tables {
|
||||||
|
items := atomic.LoadUint64(&table.items)
|
||||||
|
if min > items {
|
||||||
|
min = items
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, table := range f.tables {
|
||||||
|
if err := table.truncate(min); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
atomic.StoreUint64(&f.frozen, min)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// freeze is a background thread that periodically checks the blockchain for any
|
// freeze is a background thread that periodically checks the blockchain for any
|
||||||
// import progress and moves ancient data from the fast database into the freezer.
|
// import progress and moves ancient data from the fast database into the freezer.
|
||||||
//
|
//
|
||||||
@ -353,54 +383,28 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
|
|||||||
backoff = true
|
backoff = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seems we have data ready to be frozen, process in usable batches
|
// Seems we have data ready to be frozen, process in usable batches
|
||||||
limit := *number - threshold
|
|
||||||
if limit-f.frozen > freezerBatchLimit {
|
|
||||||
limit = f.frozen + freezerBatchLimit
|
|
||||||
}
|
|
||||||
var (
|
var (
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
first = f.frozen
|
first, _ = f.Ancients()
|
||||||
ancients = make([]common.Hash, 0, limit-f.frozen)
|
limit = *number - threshold
|
||||||
)
|
)
|
||||||
for f.frozen <= limit {
|
if limit-first > freezerBatchLimit {
|
||||||
// Retrieves all the components of the canonical block
|
limit = first + freezerBatchLimit
|
||||||
hash := ReadCanonicalHash(nfdb, f.frozen)
|
|
||||||
if hash == (common.Hash{}) {
|
|
||||||
log.Error("Canonical hash missing, can't freeze", "number", f.frozen)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
header := ReadHeaderRLP(nfdb, hash, f.frozen)
|
|
||||||
if len(header) == 0 {
|
|
||||||
log.Error("Block header missing, can't freeze", "number", f.frozen, "hash", hash)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
body := ReadBodyRLP(nfdb, hash, f.frozen)
|
|
||||||
if len(body) == 0 {
|
|
||||||
log.Error("Block body missing, can't freeze", "number", f.frozen, "hash", hash)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
receipts := ReadReceiptsRLP(nfdb, hash, f.frozen)
|
|
||||||
if len(receipts) == 0 {
|
|
||||||
log.Error("Block receipts missing, can't freeze", "number", f.frozen, "hash", hash)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
td := ReadTdRLP(nfdb, hash, f.frozen)
|
|
||||||
if len(td) == 0 {
|
|
||||||
log.Error("Total difficulty missing, can't freeze", "number", f.frozen, "hash", hash)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
log.Trace("Deep froze ancient block", "number", f.frozen, "hash", hash)
|
|
||||||
// Inject all the components into the relevant data tables
|
|
||||||
if err := f.AppendAncient(f.frozen, hash[:], header, body, receipts, td); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ancients = append(ancients, hash)
|
|
||||||
}
|
}
|
||||||
|
ancients, err := f.freezeRange(nfdb, first, limit)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error in block freeze operation", "err", err)
|
||||||
|
backoff = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Batch of blocks have been frozen, flush them before wiping from leveldb
|
// Batch of blocks have been frozen, flush them before wiping from leveldb
|
||||||
if err := f.Sync(); err != nil {
|
if err := f.Sync(); err != nil {
|
||||||
log.Crit("Failed to flush frozen tables", "err", err)
|
log.Crit("Failed to flush frozen tables", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wipe out all data from the active database
|
// Wipe out all data from the active database
|
||||||
batch := db.NewBatch()
|
batch := db.NewBatch()
|
||||||
for i := 0; i < len(ancients); i++ {
|
for i := 0; i < len(ancients); i++ {
|
||||||
@ -465,6 +469,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
|
|||||||
log.Crit("Failed to delete dangling side blocks", "err", err)
|
log.Crit("Failed to delete dangling side blocks", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log something friendly for the user
|
// Log something friendly for the user
|
||||||
context := []interface{}{
|
context := []interface{}{
|
||||||
"blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,
|
"blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,
|
||||||
@ -481,20 +486,54 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// repair truncates all data tables to the same length.
|
func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []common.Hash, err error) {
|
||||||
func (f *freezer) repair() error {
|
hashes = make([]common.Hash, 0, limit-number)
|
||||||
min := uint64(math.MaxUint64)
|
|
||||||
for _, table := range f.tables {
|
_, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||||
items := atomic.LoadUint64(&table.items)
|
for ; number <= limit; number++ {
|
||||||
if min > items {
|
// Retrieve all the components of the canonical block.
|
||||||
min = items
|
hash := ReadCanonicalHash(nfdb, number)
|
||||||
|
if hash == (common.Hash{}) {
|
||||||
|
return fmt.Errorf("canonical hash missing, can't freeze block %d", number)
|
||||||
|
}
|
||||||
|
header := ReadHeaderRLP(nfdb, hash, number)
|
||||||
|
if len(header) == 0 {
|
||||||
|
return fmt.Errorf("block header missing, can't freeze block %d", number)
|
||||||
|
}
|
||||||
|
body := ReadBodyRLP(nfdb, hash, number)
|
||||||
|
if len(body) == 0 {
|
||||||
|
return fmt.Errorf("block body missing, can't freeze block %d", number)
|
||||||
|
}
|
||||||
|
receipts := ReadReceiptsRLP(nfdb, hash, number)
|
||||||
|
if len(receipts) == 0 {
|
||||||
|
return fmt.Errorf("block receipts missing, can't freeze block %d", number)
|
||||||
|
}
|
||||||
|
td := ReadTdRLP(nfdb, hash, number)
|
||||||
|
if len(td) == 0 {
|
||||||
|
return fmt.Errorf("total difficulty missing, can't freeze block %d", number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write to the batch.
|
||||||
|
if err := op.AppendRaw(freezerHashTable, number, hash[:]); err != nil {
|
||||||
|
return fmt.Errorf("can't write hash to freezer: %v", err)
|
||||||
|
}
|
||||||
|
if err := op.AppendRaw(freezerHeaderTable, number, header); err != nil {
|
||||||
|
return fmt.Errorf("can't write header to freezer: %v", err)
|
||||||
|
}
|
||||||
|
if err := op.AppendRaw(freezerBodiesTable, number, body); err != nil {
|
||||||
|
return fmt.Errorf("can't write body to freezer: %v", err)
|
||||||
|
}
|
||||||
|
if err := op.AppendRaw(freezerReceiptTable, number, receipts); err != nil {
|
||||||
|
return fmt.Errorf("can't write receipts to freezer: %v", err)
|
||||||
|
}
|
||||||
|
if err := op.AppendRaw(freezerDifficultyTable, number, td); err != nil {
|
||||||
|
return fmt.Errorf("can't write td to freezer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hashes = append(hashes, hash)
|
||||||
}
|
}
|
||||||
}
|
return nil
|
||||||
for _, table := range f.tables {
|
})
|
||||||
if err := table.truncate(min); err != nil {
|
|
||||||
return err
|
return hashes, err
|
||||||
}
|
|
||||||
}
|
|
||||||
atomic.StoreUint64(&f.frozen, min)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
248
core/rawdb/freezer_batch.go
Normal file
248
core/rawdb/freezer_batch.go
Normal file
@ -0,0 +1,248 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/golang/snappy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is the maximum amount of data that will be buffered in memory
|
||||||
|
// for a single freezer table batch.
|
||||||
|
const freezerBatchBufferLimit = 2 * 1024 * 1024
|
||||||
|
|
||||||
|
// freezerBatch is a write operation of multiple items on a freezer.
|
||||||
|
type freezerBatch struct {
|
||||||
|
tables map[string]*freezerTableBatch
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFreezerBatch(f *freezer) *freezerBatch {
|
||||||
|
batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))}
|
||||||
|
for kind, table := range f.tables {
|
||||||
|
batch.tables[kind] = table.newBatch()
|
||||||
|
}
|
||||||
|
return batch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append adds an RLP-encoded item of the given kind.
|
||||||
|
func (batch *freezerBatch) Append(kind string, num uint64, item interface{}) error {
|
||||||
|
return batch.tables[kind].Append(num, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendRaw adds an item of the given kind.
|
||||||
|
func (batch *freezerBatch) AppendRaw(kind string, num uint64, item []byte) error {
|
||||||
|
return batch.tables[kind].AppendRaw(num, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset initializes the batch.
|
||||||
|
func (batch *freezerBatch) reset() {
|
||||||
|
for _, tb := range batch.tables {
|
||||||
|
tb.reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit is called at the end of a write operation and
|
||||||
|
// writes all remaining data to tables.
|
||||||
|
func (batch *freezerBatch) commit() (item uint64, writeSize int64, err error) {
|
||||||
|
// Check that count agrees on all batches.
|
||||||
|
item = uint64(math.MaxUint64)
|
||||||
|
for name, tb := range batch.tables {
|
||||||
|
if item < math.MaxUint64 && tb.curItem != item {
|
||||||
|
return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, tb.curItem, item)
|
||||||
|
}
|
||||||
|
item = tb.curItem
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit all table batches.
|
||||||
|
for _, tb := range batch.tables {
|
||||||
|
if err := tb.commit(); err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
writeSize += tb.totalBytes
|
||||||
|
}
|
||||||
|
return item, writeSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// freezerTableBatch is a batch for a freezer table.
|
||||||
|
type freezerTableBatch struct {
|
||||||
|
t *freezerTable
|
||||||
|
|
||||||
|
sb *snappyBuffer
|
||||||
|
encBuffer writeBuffer
|
||||||
|
dataBuffer []byte
|
||||||
|
indexBuffer []byte
|
||||||
|
curItem uint64 // expected index of next append
|
||||||
|
totalBytes int64 // counts written bytes since reset
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBatch creates a new batch for the freezer table.
|
||||||
|
func (t *freezerTable) newBatch() *freezerTableBatch {
|
||||||
|
batch := &freezerTableBatch{t: t}
|
||||||
|
if !t.noCompression {
|
||||||
|
batch.sb = new(snappyBuffer)
|
||||||
|
}
|
||||||
|
batch.reset()
|
||||||
|
return batch
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset clears the batch for reuse.
|
||||||
|
func (batch *freezerTableBatch) reset() {
|
||||||
|
batch.dataBuffer = batch.dataBuffer[:0]
|
||||||
|
batch.indexBuffer = batch.indexBuffer[:0]
|
||||||
|
batch.curItem = atomic.LoadUint64(&batch.t.items)
|
||||||
|
batch.totalBytes = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append rlp-encodes and adds data at the end of the freezer table. The item number is a
|
||||||
|
// precautionary parameter to ensure data correctness, but the table will reject already
|
||||||
|
// existing data.
|
||||||
|
func (batch *freezerTableBatch) Append(item uint64, data interface{}) error {
|
||||||
|
if item != batch.curItem {
|
||||||
|
return errOutOrderInsertion
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode the item.
|
||||||
|
batch.encBuffer.Reset()
|
||||||
|
if err := rlp.Encode(&batch.encBuffer, data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
encItem := batch.encBuffer.data
|
||||||
|
if batch.sb != nil {
|
||||||
|
encItem = batch.sb.compress(encItem)
|
||||||
|
}
|
||||||
|
return batch.appendItem(encItem)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendRaw injects a binary blob at the end of the freezer table. The item number is a
|
||||||
|
// precautionary parameter to ensure data correctness, but the table will reject already
|
||||||
|
// existing data.
|
||||||
|
func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error {
|
||||||
|
if item != batch.curItem {
|
||||||
|
return errOutOrderInsertion
|
||||||
|
}
|
||||||
|
|
||||||
|
encItem := blob
|
||||||
|
if batch.sb != nil {
|
||||||
|
encItem = batch.sb.compress(blob)
|
||||||
|
}
|
||||||
|
return batch.appendItem(encItem)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (batch *freezerTableBatch) appendItem(data []byte) error {
|
||||||
|
// Check if item fits into current data file.
|
||||||
|
itemSize := int64(len(data))
|
||||||
|
itemOffset := batch.t.headBytes + int64(len(batch.dataBuffer))
|
||||||
|
if itemOffset+itemSize > int64(batch.t.maxFileSize) {
|
||||||
|
// It doesn't fit, go to next file first.
|
||||||
|
if err := batch.commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := batch.t.advanceHead(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
itemOffset = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put data to buffer.
|
||||||
|
batch.dataBuffer = append(batch.dataBuffer, data...)
|
||||||
|
batch.totalBytes += itemSize
|
||||||
|
|
||||||
|
// Put index entry to buffer.
|
||||||
|
entry := indexEntry{filenum: batch.t.headId, offset: uint32(itemOffset + itemSize)}
|
||||||
|
batch.indexBuffer = entry.append(batch.indexBuffer)
|
||||||
|
batch.curItem++
|
||||||
|
|
||||||
|
return batch.maybeCommit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybeCommit writes the buffered data if the buffer is full enough.
|
||||||
|
func (batch *freezerTableBatch) maybeCommit() error {
|
||||||
|
if len(batch.dataBuffer) > freezerBatchBufferLimit {
|
||||||
|
return batch.commit()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit writes the batched items to the backing freezerTable.
|
||||||
|
func (batch *freezerTableBatch) commit() error {
|
||||||
|
// Write data.
|
||||||
|
_, err := batch.t.head.Write(batch.dataBuffer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dataSize := int64(len(batch.dataBuffer))
|
||||||
|
batch.dataBuffer = batch.dataBuffer[:0]
|
||||||
|
|
||||||
|
// Write index.
|
||||||
|
_, err = batch.t.index.Write(batch.indexBuffer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
indexSize := int64(len(batch.indexBuffer))
|
||||||
|
batch.indexBuffer = batch.indexBuffer[:0]
|
||||||
|
|
||||||
|
// Update headBytes of table.
|
||||||
|
batch.t.headBytes += dataSize
|
||||||
|
atomic.StoreUint64(&batch.t.items, batch.curItem)
|
||||||
|
|
||||||
|
// Update metrics.
|
||||||
|
batch.t.sizeGauge.Inc(dataSize + indexSize)
|
||||||
|
batch.t.writeMeter.Mark(dataSize + indexSize)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// snappyBuffer writes snappy in block format, and can be reused. It is
|
||||||
|
// reset when WriteTo is called.
|
||||||
|
type snappyBuffer struct {
|
||||||
|
dst []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// compress snappy-compresses the data.
|
||||||
|
func (s *snappyBuffer) compress(data []byte) []byte {
|
||||||
|
// The snappy library does not care what the capacity of the buffer is,
|
||||||
|
// but only checks the length. If the length is too small, it will
|
||||||
|
// allocate a brand new buffer.
|
||||||
|
// To avoid that, we check the required size here, and grow the size of the
|
||||||
|
// buffer to utilize the full capacity.
|
||||||
|
if n := snappy.MaxEncodedLen(len(data)); len(s.dst) < n {
|
||||||
|
if cap(s.dst) < n {
|
||||||
|
s.dst = make([]byte, n)
|
||||||
|
}
|
||||||
|
s.dst = s.dst[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
s.dst = snappy.Encode(s.dst, data)
|
||||||
|
return s.dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeBuffer implements io.Writer for a byte slice.
|
||||||
|
type writeBuffer struct {
|
||||||
|
data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wb *writeBuffer) Write(data []byte) (int, error) {
|
||||||
|
wb.data = append(wb.data, data...)
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wb *writeBuffer) Reset() {
|
||||||
|
wb.data = wb.data[:0]
|
||||||
|
}
|
@ -17,6 +17,7 @@
|
|||||||
package rawdb
|
package rawdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -55,19 +56,20 @@ type indexEntry struct {
|
|||||||
|
|
||||||
const indexEntrySize = 6
|
const indexEntrySize = 6
|
||||||
|
|
||||||
// unmarshallBinary deserializes binary b into the rawIndex entry.
|
// unmarshalBinary deserializes binary b into the rawIndex entry.
|
||||||
func (i *indexEntry) unmarshalBinary(b []byte) error {
|
func (i *indexEntry) unmarshalBinary(b []byte) error {
|
||||||
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
|
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
|
||||||
i.offset = binary.BigEndian.Uint32(b[2:6])
|
i.offset = binary.BigEndian.Uint32(b[2:6])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// marshallBinary serializes the rawIndex entry into binary.
|
// append adds the encoded entry to the end of b.
|
||||||
func (i *indexEntry) marshallBinary() []byte {
|
func (i *indexEntry) append(b []byte) []byte {
|
||||||
b := make([]byte, indexEntrySize)
|
offset := len(b)
|
||||||
binary.BigEndian.PutUint16(b[:2], uint16(i.filenum))
|
out := append(b, make([]byte, indexEntrySize)...)
|
||||||
binary.BigEndian.PutUint32(b[2:6], i.offset)
|
binary.BigEndian.PutUint16(out[offset:], uint16(i.filenum))
|
||||||
return b
|
binary.BigEndian.PutUint32(out[offset+2:], i.offset)
|
||||||
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// bounds returns the start- and end- offsets, and the file number of where to
|
// bounds returns the start- and end- offsets, and the file number of where to
|
||||||
@ -107,7 +109,7 @@ type freezerTable struct {
|
|||||||
// to count how many historic items have gone missing.
|
// to count how many historic items have gone missing.
|
||||||
itemOffset uint32 // Offset (number of discarded items)
|
itemOffset uint32 // Offset (number of discarded items)
|
||||||
|
|
||||||
headBytes uint32 // Number of bytes written to the head file
|
headBytes int64 // Number of bytes written to the head file
|
||||||
readMeter metrics.Meter // Meter for measuring the effective amount of data read
|
readMeter metrics.Meter // Meter for measuring the effective amount of data read
|
||||||
writeMeter metrics.Meter // Meter for measuring the effective amount of data written
|
writeMeter metrics.Meter // Meter for measuring the effective amount of data written
|
||||||
sizeGauge metrics.Gauge // Gauge for tracking the combined size of all freezer tables
|
sizeGauge metrics.Gauge // Gauge for tracking the combined size of all freezer tables
|
||||||
@ -118,12 +120,7 @@ type freezerTable struct {
|
|||||||
|
|
||||||
// NewFreezerTable opens the given path as a freezer table.
|
// NewFreezerTable opens the given path as a freezer table.
|
||||||
func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
|
func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
|
||||||
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, disableSnappy)
|
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
|
||||||
}
|
|
||||||
|
|
||||||
// newTable opens a freezer table with default settings - 2G files
|
|
||||||
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) {
|
|
||||||
return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
||||||
@ -164,10 +161,10 @@ func truncateFreezerFile(file *os.File, size int64) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCustomTable opens a freezer table, creating the data and index files if they are
|
// newTable opens a freezer table, creating the data and index files if they are
|
||||||
// non existent. Both files are truncated to the shortest common length to ensure
|
// non existent. Both files are truncated to the shortest common length to ensure
|
||||||
// they don't go out of sync.
|
// they don't go out of sync.
|
||||||
func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
||||||
// Ensure the containing directory exists and open the indexEntry file
|
// Ensure the containing directory exists and open the indexEntry file
|
||||||
if err := os.MkdirAll(path, 0755); err != nil {
|
if err := os.MkdirAll(path, 0755); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -313,7 +310,7 @@ func (t *freezerTable) repair() error {
|
|||||||
}
|
}
|
||||||
// Update the item and byte counters and return
|
// Update the item and byte counters and return
|
||||||
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
||||||
t.headBytes = uint32(contentSize)
|
t.headBytes = contentSize
|
||||||
t.headId = lastIndex.filenum
|
t.headId = lastIndex.filenum
|
||||||
|
|
||||||
// Close opened files and preopen all files
|
// Close opened files and preopen all files
|
||||||
@ -387,14 +384,14 @@ func (t *freezerTable) truncate(items uint64) error {
|
|||||||
t.releaseFilesAfter(expected.filenum, true)
|
t.releaseFilesAfter(expected.filenum, true)
|
||||||
// Set back the historic head
|
// Set back the historic head
|
||||||
t.head = newHead
|
t.head = newHead
|
||||||
atomic.StoreUint32(&t.headId, expected.filenum)
|
t.headId = expected.filenum
|
||||||
}
|
}
|
||||||
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
|
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// All data files truncated, set internal counters and return
|
// All data files truncated, set internal counters and return
|
||||||
|
t.headBytes = int64(expected.offset)
|
||||||
atomic.StoreUint64(&t.items, items)
|
atomic.StoreUint64(&t.items, items)
|
||||||
atomic.StoreUint32(&t.headBytes, expected.offset)
|
|
||||||
|
|
||||||
// Retrieve the new size and update the total size counter
|
// Retrieve the new size and update the total size counter
|
||||||
newSize, err := t.sizeNolock()
|
newSize, err := t.sizeNolock()
|
||||||
@ -471,94 +468,6 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append injects a binary blob at the end of the freezer table. The item number
|
|
||||||
// is a precautionary parameter to ensure data correctness, but the table will
|
|
||||||
// reject already existing data.
|
|
||||||
//
|
|
||||||
// Note, this method will *not* flush any data to disk so be sure to explicitly
|
|
||||||
// fsync before irreversibly deleting data from the database.
|
|
||||||
func (t *freezerTable) Append(item uint64, blob []byte) error {
|
|
||||||
// Encode the blob before the lock portion
|
|
||||||
if !t.noCompression {
|
|
||||||
blob = snappy.Encode(nil, blob)
|
|
||||||
}
|
|
||||||
// Read lock prevents competition with truncate
|
|
||||||
retry, err := t.append(item, blob, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if retry {
|
|
||||||
// Read lock was insufficient, retry with a writelock
|
|
||||||
_, err = t.append(item, blob, true)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// append injects a binary blob at the end of the freezer table.
|
|
||||||
// Normally, inserts do not require holding the write-lock, so it should be invoked with 'wlock' set to
|
|
||||||
// false.
|
|
||||||
// However, if the data will grown the current file out of bounds, then this
|
|
||||||
// method will return 'true, nil', indicating that the caller should retry, this time
|
|
||||||
// with 'wlock' set to true.
|
|
||||||
func (t *freezerTable) append(item uint64, encodedBlob []byte, wlock bool) (bool, error) {
|
|
||||||
if wlock {
|
|
||||||
t.lock.Lock()
|
|
||||||
defer t.lock.Unlock()
|
|
||||||
} else {
|
|
||||||
t.lock.RLock()
|
|
||||||
defer t.lock.RUnlock()
|
|
||||||
}
|
|
||||||
// Ensure the table is still accessible
|
|
||||||
if t.index == nil || t.head == nil {
|
|
||||||
return false, errClosed
|
|
||||||
}
|
|
||||||
// Ensure only the next item can be written, nothing else
|
|
||||||
if atomic.LoadUint64(&t.items) != item {
|
|
||||||
return false, fmt.Errorf("appending unexpected item: want %d, have %d", t.items, item)
|
|
||||||
}
|
|
||||||
bLen := uint32(len(encodedBlob))
|
|
||||||
if t.headBytes+bLen < bLen ||
|
|
||||||
t.headBytes+bLen > t.maxFileSize {
|
|
||||||
// Writing would overflow, so we need to open a new data file.
|
|
||||||
// If we don't already hold the writelock, abort and let the caller
|
|
||||||
// invoke this method a second time.
|
|
||||||
if !wlock {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
nextID := atomic.LoadUint32(&t.headId) + 1
|
|
||||||
// We open the next file in truncated mode -- if this file already
|
|
||||||
// exists, we need to start over from scratch on it
|
|
||||||
newHead, err := t.openFile(nextID, openFreezerFileTruncated)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
// Close old file, and reopen in RDONLY mode
|
|
||||||
t.releaseFile(t.headId)
|
|
||||||
t.openFile(t.headId, openFreezerFileForReadOnly)
|
|
||||||
|
|
||||||
// Swap out the current head
|
|
||||||
t.head = newHead
|
|
||||||
atomic.StoreUint32(&t.headBytes, 0)
|
|
||||||
atomic.StoreUint32(&t.headId, nextID)
|
|
||||||
}
|
|
||||||
if _, err := t.head.Write(encodedBlob); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
newOffset := atomic.AddUint32(&t.headBytes, bLen)
|
|
||||||
idx := indexEntry{
|
|
||||||
filenum: atomic.LoadUint32(&t.headId),
|
|
||||||
offset: newOffset,
|
|
||||||
}
|
|
||||||
// Write indexEntry
|
|
||||||
t.index.Write(idx.marshallBinary())
|
|
||||||
|
|
||||||
t.writeMeter.Mark(int64(bLen + indexEntrySize))
|
|
||||||
t.sizeGauge.Inc(int64(bLen + indexEntrySize))
|
|
||||||
|
|
||||||
atomic.AddUint64(&t.items, 1)
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getIndices returns the index entries for the given from-item, covering 'count' items.
|
// getIndices returns the index entries for the given from-item, covering 'count' items.
|
||||||
// N.B: The actual number of returned indices for N items will always be N+1 (unless an
|
// N.B: The actual number of returned indices for N items will always be N+1 (unless an
|
||||||
// error is returned).
|
// error is returned).
|
||||||
@ -651,6 +560,7 @@ func (t *freezerTable) RetrieveItems(start, count, maxBytes uint64) ([][]byte, e
|
|||||||
func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []int, error) {
|
func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []int, error) {
|
||||||
t.lock.RLock()
|
t.lock.RLock()
|
||||||
defer t.lock.RUnlock()
|
defer t.lock.RUnlock()
|
||||||
|
|
||||||
// Ensure the table and the item is accessible
|
// Ensure the table and the item is accessible
|
||||||
if t.index == nil || t.head == nil {
|
if t.index == nil || t.head == nil {
|
||||||
return nil, nil, errClosed
|
return nil, nil, errClosed
|
||||||
@ -763,6 +673,32 @@ func (t *freezerTable) sizeNolock() (uint64, error) {
|
|||||||
return total, nil
|
return total, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// advanceHead should be called when the current head file would outgrow the file limits,
|
||||||
|
// and a new file must be opened. The caller of this method must hold the write-lock
|
||||||
|
// before calling this method.
|
||||||
|
func (t *freezerTable) advanceHead() error {
|
||||||
|
t.lock.Lock()
|
||||||
|
defer t.lock.Unlock()
|
||||||
|
|
||||||
|
// We open the next file in truncated mode -- if this file already
|
||||||
|
// exists, we need to start over from scratch on it.
|
||||||
|
nextID := t.headId + 1
|
||||||
|
newHead, err := t.openFile(nextID, openFreezerFileTruncated)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close old file, and reopen in RDONLY mode.
|
||||||
|
t.releaseFile(t.headId)
|
||||||
|
t.openFile(t.headId, openFreezerFileForReadOnly)
|
||||||
|
|
||||||
|
// Swap out the current head.
|
||||||
|
t.head = newHead
|
||||||
|
t.headBytes = 0
|
||||||
|
t.headId = nextID
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Sync pushes any pending data from memory out to disk. This is an expensive
|
// Sync pushes any pending data from memory out to disk. This is an expensive
|
||||||
// operation, so use it with care.
|
// operation, so use it with care.
|
||||||
func (t *freezerTable) Sync() error {
|
func (t *freezerTable) Sync() error {
|
||||||
@ -775,10 +711,21 @@ func (t *freezerTable) Sync() error {
|
|||||||
// DumpIndex is a debug print utility function, mainly for testing. It can also
|
// DumpIndex is a debug print utility function, mainly for testing. It can also
|
||||||
// be used to analyse a live freezer table index.
|
// be used to analyse a live freezer table index.
|
||||||
func (t *freezerTable) DumpIndex(start, stop int64) {
|
func (t *freezerTable) DumpIndex(start, stop int64) {
|
||||||
|
t.dumpIndex(os.Stdout, start, stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *freezerTable) dumpIndexString(start, stop int64) string {
|
||||||
|
var out bytes.Buffer
|
||||||
|
out.WriteString("\n")
|
||||||
|
t.dumpIndex(&out, start, stop)
|
||||||
|
return out.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
|
||||||
buf := make([]byte, indexEntrySize)
|
buf := make([]byte, indexEntrySize)
|
||||||
|
|
||||||
fmt.Printf("| number | fileno | offset |\n")
|
fmt.Fprintf(w, "| number | fileno | offset |\n")
|
||||||
fmt.Printf("|--------|--------|--------|\n")
|
fmt.Fprintf(w, "|--------|--------|--------|\n")
|
||||||
|
|
||||||
for i := uint64(start); ; i++ {
|
for i := uint64(start); ; i++ {
|
||||||
if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
|
if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
|
||||||
@ -786,10 +733,10 @@ func (t *freezerTable) DumpIndex(start, stop int64) {
|
|||||||
}
|
}
|
||||||
var entry indexEntry
|
var entry indexEntry
|
||||||
entry.unmarshalBinary(buf)
|
entry.unmarshalBinary(buf)
|
||||||
fmt.Printf("| %03d | %03d | %03d | \n", i, entry.filenum, entry.offset)
|
fmt.Fprintf(w, "| %03d | %03d | %03d | \n", i, entry.filenum, entry.offset)
|
||||||
if stop > 0 && i >= uint64(stop) {
|
if stop > 0 && i >= uint64(stop) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Printf("|--------------------------|\n")
|
fmt.Fprintf(w, "|--------------------------|\n")
|
||||||
}
|
}
|
||||||
|
@ -18,49 +18,36 @@ package rawdb
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets a chunk of data, filled with 'b'
|
|
||||||
func getChunk(size int, b int) []byte {
|
|
||||||
data := make([]byte, size)
|
|
||||||
for i := range data {
|
|
||||||
data[i] = byte(b)
|
|
||||||
}
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestFreezerBasics test initializing a freezertable from scratch, writing to the table,
|
// TestFreezerBasics test initializing a freezertable from scratch, writing to the table,
|
||||||
// and reading it back.
|
// and reading it back.
|
||||||
func TestFreezerBasics(t *testing.T) {
|
func TestFreezerBasics(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
// set cutoff at 50 bytes
|
// set cutoff at 50 bytes
|
||||||
f, err := newCustomTable(os.TempDir(),
|
f, err := newTable(os.TempDir(),
|
||||||
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
||||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// Write 15 bytes 255 times, results in 85 files
|
// Write 15 bytes 255 times, results in 85 files
|
||||||
for x := 0; x < 255; x++ {
|
writeChunks(t, f, 255, 15)
|
||||||
data := getChunk(15, x)
|
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
|
||||||
|
|
||||||
//print(t, f, 0)
|
//print(t, f, 0)
|
||||||
//print(t, f, 1)
|
//print(t, f, 1)
|
||||||
@ -98,16 +85,21 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
|||||||
f *freezerTable
|
f *freezerTable
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 15 bytes 255 times, results in 85 files
|
|
||||||
|
// Write 15 bytes 255 times, results in 85 files.
|
||||||
|
// In-between writes, the table is closed and re-opened.
|
||||||
for x := 0; x < 255; x++ {
|
for x := 0; x < 255; x++ {
|
||||||
data := getChunk(15, x)
|
data := getChunk(15, x)
|
||||||
f.Append(uint64(x), data)
|
batch := f.newBatch()
|
||||||
|
require.NoError(t, batch.AppendRaw(uint64(x), data))
|
||||||
|
require.NoError(t, batch.commit())
|
||||||
f.Close()
|
f.Close()
|
||||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
|
||||||
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -124,7 +116,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
|||||||
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -137,22 +129,22 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
|||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
||||||
|
|
||||||
{ // Fill table
|
// Fill table
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
{
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 15 bytes 255 times
|
// Write 15 bytes 255 times
|
||||||
for x := 0; x < 255; x++ {
|
writeChunks(t, f, 255, 15)
|
||||||
data := getChunk(15, x)
|
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
|
||||||
// The last item should be there
|
// The last item should be there
|
||||||
if _, err = f.Retrieve(0xfe); err != nil {
|
if _, err = f.Retrieve(0xfe); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// open the index
|
// open the index
|
||||||
idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
|
idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -165,9 +157,10 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
idxFile.Truncate(stat.Size() - 4)
|
idxFile.Truncate(stat.Size() - 4)
|
||||||
idxFile.Close()
|
idxFile.Close()
|
||||||
|
|
||||||
// Now open it again
|
// Now open it again
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -188,22 +181,22 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
|||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
||||||
|
|
||||||
{ // Fill a table and close it
|
// Fill a table and close it
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
{
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 15 bytes 255 times
|
// Write 15 bytes 255 times
|
||||||
for x := 0; x < 0xff; x++ {
|
writeChunks(t, f, 255, 15)
|
||||||
data := getChunk(15, x)
|
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
|
||||||
// The last item should be there
|
// The last item should be there
|
||||||
if _, err = f.Retrieve(f.items - 1); err != nil {
|
if _, err = f.Retrieve(f.items - 1); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// open the index
|
// open the index
|
||||||
idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
|
idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -213,9 +206,10 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
|||||||
// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
|
// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
|
||||||
idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2)
|
idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2)
|
||||||
idxFile.Close()
|
idxFile.Close()
|
||||||
|
|
||||||
// Now open it again
|
// Now open it again
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -228,15 +222,17 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
|||||||
t.Errorf("Expected error for missing index entry")
|
t.Errorf("Expected error for missing index entry")
|
||||||
}
|
}
|
||||||
// We should now be able to store items again, from item = 1
|
// We should now be able to store items again, from item = 1
|
||||||
|
batch := f.newBatch()
|
||||||
for x := 1; x < 0xff; x++ {
|
for x := 1; x < 0xff; x++ {
|
||||||
data := getChunk(15, ^x)
|
require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
}
|
||||||
|
require.NoError(t, batch.commit())
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// And if we open it, we should now be able to read all of them (new values)
|
// And if we open it, we should now be able to read all of them (new values)
|
||||||
{
|
{
|
||||||
f, _ := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
for y := 1; y < 255; y++ {
|
for y := 1; y < 255; y++ {
|
||||||
exp := getChunk(15, ^y)
|
exp := getChunk(15, ^y)
|
||||||
got, err := f.Retrieve(uint64(y))
|
got, err := f.Retrieve(uint64(y))
|
||||||
@ -255,22 +251,21 @@ func TestSnappyDetection(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
|
fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
|
||||||
|
|
||||||
// Open with snappy
|
// Open with snappy
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 15 bytes 255 times
|
// Write 15 bytes 255 times
|
||||||
for x := 0; x < 0xff; x++ {
|
writeChunks(t, f, 255, 15)
|
||||||
data := getChunk(15, x)
|
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open without snappy
|
// Open without snappy
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, false)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -282,7 +277,7 @@ func TestSnappyDetection(t *testing.T) {
|
|||||||
|
|
||||||
// Open with snappy
|
// Open with snappy
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -292,8 +287,8 @@ func TestSnappyDetection(t *testing.T) {
|
|||||||
t.Fatalf("expected no error, got %v", err)
|
t.Fatalf("expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertFileSize(f string, size int64) error {
|
func assertFileSize(f string, size int64) error {
|
||||||
stat, err := os.Stat(f)
|
stat, err := os.Stat(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -303,7 +298,6 @@ func assertFileSize(f string, size int64) error {
|
|||||||
return fmt.Errorf("error, expected size %d, got %d", size, stat.Size())
|
return fmt.Errorf("error, expected size %d, got %d", size, stat.Size())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data,
|
// TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data,
|
||||||
@ -313,16 +307,15 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
|||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
|
fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
|
||||||
|
|
||||||
{ // Fill a table and close it
|
// Fill a table and close it
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
{
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 15 bytes 9 times : 150 bytes
|
// Write 15 bytes 9 times : 150 bytes
|
||||||
for x := 0; x < 9; x++ {
|
writeChunks(t, f, 9, 15)
|
||||||
data := getChunk(15, x)
|
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
|
||||||
// The last item should be there
|
// The last item should be there
|
||||||
if _, err = f.Retrieve(f.items - 1); err != nil {
|
if _, err = f.Retrieve(f.items - 1); err != nil {
|
||||||
f.Close()
|
f.Close()
|
||||||
@ -331,6 +324,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
|||||||
f.Close()
|
f.Close()
|
||||||
// File sizes should be 45, 45, 45 : items[3, 3, 3)
|
// File sizes should be 45, 45, 45 : items[3, 3, 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Crop third file
|
// Crop third file
|
||||||
fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname))
|
fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname))
|
||||||
// Truncate third file: 45 ,45, 20
|
// Truncate third file: 45 ,45, 20
|
||||||
@ -345,17 +339,18 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
|||||||
file.Truncate(20)
|
file.Truncate(20)
|
||||||
file.Close()
|
file.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open db it again
|
// Open db it again
|
||||||
// It should restore the file(s) to
|
// It should restore the file(s) to
|
||||||
// 45, 45, 15
|
// 45, 45, 15
|
||||||
// with 3+3+1 items
|
// with 3+3+1 items
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
if f.items != 7 {
|
if f.items != 7 {
|
||||||
f.Close()
|
|
||||||
t.Fatalf("expected %d items, got %d", 7, f.items)
|
t.Fatalf("expected %d items, got %d", 7, f.items)
|
||||||
}
|
}
|
||||||
if err := assertFileSize(fileToCrop, 15); err != nil {
|
if err := assertFileSize(fileToCrop, 15); err != nil {
|
||||||
@ -365,30 +360,29 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFreezerTruncate(t *testing.T) {
|
func TestFreezerTruncate(t *testing.T) {
|
||||||
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("truncation-%d", rand.Uint64())
|
fname := fmt.Sprintf("truncation-%d", rand.Uint64())
|
||||||
|
|
||||||
{ // Fill table
|
// Fill table
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
{
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 15 bytes 30 times
|
// Write 15 bytes 30 times
|
||||||
for x := 0; x < 30; x++ {
|
writeChunks(t, f, 30, 15)
|
||||||
data := getChunk(15, x)
|
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
|
||||||
// The last item should be there
|
// The last item should be there
|
||||||
if _, err = f.Retrieve(f.items - 1); err != nil {
|
if _, err = f.Retrieve(f.items - 1); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reopen, truncate
|
// Reopen, truncate
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -401,9 +395,7 @@ func TestFreezerTruncate(t *testing.T) {
|
|||||||
if f.headBytes != 15 {
|
if f.headBytes != 15 {
|
||||||
t.Fatalf("expected %d bytes, got %d", 15, f.headBytes)
|
t.Fatalf("expected %d bytes, got %d", 15, f.headBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFreezerRepairFirstFile tests a head file with the very first item only half-written.
|
// TestFreezerRepairFirstFile tests a head file with the very first item only half-written.
|
||||||
@ -412,20 +404,26 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
|
fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
|
||||||
{ // Fill table
|
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
// Fill table
|
||||||
|
{
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 80 bytes, splitting out into two files
|
// Write 80 bytes, splitting out into two files
|
||||||
f.Append(0, getChunk(40, 0xFF))
|
batch := f.newBatch()
|
||||||
f.Append(1, getChunk(40, 0xEE))
|
require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF)))
|
||||||
|
require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE)))
|
||||||
|
require.NoError(t, batch.commit())
|
||||||
|
|
||||||
// The last item should be there
|
// The last item should be there
|
||||||
if _, err = f.Retrieve(f.items - 1); err != nil {
|
if _, err = f.Retrieve(1); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncate the file in half
|
// Truncate the file in half
|
||||||
fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname))
|
fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname))
|
||||||
{
|
{
|
||||||
@ -439,9 +437,10 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
|||||||
file.Truncate(20)
|
file.Truncate(20)
|
||||||
file.Close()
|
file.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reopen
|
// Reopen
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -449,9 +448,14 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
|||||||
f.Close()
|
f.Close()
|
||||||
t.Fatalf("expected %d items, got %d", 0, f.items)
|
t.Fatalf("expected %d items, got %d", 0, f.items)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write 40 bytes
|
// Write 40 bytes
|
||||||
f.Append(1, getChunk(40, 0xDD))
|
batch := f.newBatch()
|
||||||
|
require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD)))
|
||||||
|
require.NoError(t, batch.commit())
|
||||||
|
|
||||||
f.Close()
|
f.Close()
|
||||||
|
|
||||||
// Should have been truncated down to zero and then 40 written
|
// Should have been truncated down to zero and then 40 written
|
||||||
if err := assertFileSize(fileToCrop, 40); err != nil {
|
if err := assertFileSize(fileToCrop, 40); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -468,25 +472,26 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
|
fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
|
||||||
{ // Fill table
|
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
// Fill table
|
||||||
|
{
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 15 bytes 30 times
|
// Write 15 bytes 30 times
|
||||||
for x := 0; x < 30; x++ {
|
writeChunks(t, f, 30, 15)
|
||||||
data := getChunk(15, x)
|
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
|
||||||
// The last item should be there
|
// The last item should be there
|
||||||
if _, err = f.Retrieve(f.items - 1); err != nil {
|
if _, err = f.Retrieve(f.items - 1); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reopen and read all files
|
// Reopen and read all files
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -497,40 +502,48 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
|||||||
for y := byte(0); y < 30; y++ {
|
for y := byte(0); y < 30; y++ {
|
||||||
f.Retrieve(uint64(y))
|
f.Retrieve(uint64(y))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now, truncate back to zero
|
// Now, truncate back to zero
|
||||||
f.truncate(0)
|
f.truncate(0)
|
||||||
|
|
||||||
// Write the data again
|
// Write the data again
|
||||||
|
batch := f.newBatch()
|
||||||
for x := 0; x < 30; x++ {
|
for x := 0; x < 30; x++ {
|
||||||
data := getChunk(15, ^x)
|
require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
|
||||||
if err := f.Append(uint64(x), data); err != nil {
|
|
||||||
t.Fatalf("error %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
require.NoError(t, batch.commit())
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOffset(t *testing.T) {
|
func TestFreezerOffset(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("offset-%d", rand.Uint64())
|
fname := fmt.Sprintf("offset-%d", rand.Uint64())
|
||||||
{ // Fill table
|
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
// Fill table
|
||||||
|
{
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write 6 x 20 bytes, splitting out into three files
|
// Write 6 x 20 bytes, splitting out into three files
|
||||||
f.Append(0, getChunk(20, 0xFF))
|
batch := f.newBatch()
|
||||||
f.Append(1, getChunk(20, 0xEE))
|
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
|
||||||
|
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
|
||||||
|
|
||||||
f.Append(2, getChunk(20, 0xdd))
|
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
|
||||||
f.Append(3, getChunk(20, 0xcc))
|
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
|
||||||
|
|
||||||
f.Append(4, getChunk(20, 0xbb))
|
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
|
||||||
f.Append(5, getChunk(20, 0xaa))
|
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
|
||||||
f.DumpIndex(0, 100)
|
require.NoError(t, batch.commit())
|
||||||
|
|
||||||
|
t.Log(f.dumpIndexString(0, 100))
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now crop it.
|
// Now crop it.
|
||||||
{
|
{
|
||||||
// delete files 0 and 1
|
// delete files 0 and 1
|
||||||
@ -558,7 +571,7 @@ func TestOffset(t *testing.T) {
|
|||||||
filenum: tailId,
|
filenum: tailId,
|
||||||
offset: itemOffset,
|
offset: itemOffset,
|
||||||
}
|
}
|
||||||
buf := zeroIndex.marshallBinary()
|
buf := zeroIndex.append(nil)
|
||||||
// Overwrite index zero
|
// Overwrite index zero
|
||||||
copy(indexBuf, buf)
|
copy(indexBuf, buf)
|
||||||
// Remove the four next indices by overwriting
|
// Remove the four next indices by overwriting
|
||||||
@ -567,44 +580,36 @@ func TestOffset(t *testing.T) {
|
|||||||
// Need to truncate the moved index items
|
// Need to truncate the moved index items
|
||||||
indexFile.Truncate(indexEntrySize * (1 + 2))
|
indexFile.Truncate(indexEntrySize * (1 + 2))
|
||||||
indexFile.Close()
|
indexFile.Close()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now open again
|
// Now open again
|
||||||
checkPresent := func(numDeleted uint64) {
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
f.DumpIndex(0, 100)
|
defer f.Close()
|
||||||
// It should allow writing item 6
|
t.Log(f.dumpIndexString(0, 100))
|
||||||
f.Append(numDeleted+2, getChunk(20, 0x99))
|
|
||||||
|
|
||||||
// It should be fine to fetch 4,5,6
|
// It should allow writing item 6.
|
||||||
if got, err := f.Retrieve(numDeleted); err != nil {
|
batch := f.newBatch()
|
||||||
t.Fatal(err)
|
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99)))
|
||||||
} else if exp := getChunk(20, 0xbb); !bytes.Equal(got, exp) {
|
require.NoError(t, batch.commit())
|
||||||
t.Fatalf("expected %x got %x", exp, got)
|
|
||||||
}
|
|
||||||
if got, err := f.Retrieve(numDeleted + 1); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if exp := getChunk(20, 0xaa); !bytes.Equal(got, exp) {
|
|
||||||
t.Fatalf("expected %x got %x", exp, got)
|
|
||||||
}
|
|
||||||
if got, err := f.Retrieve(numDeleted + 2); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if exp := getChunk(20, 0x99); !bytes.Equal(got, exp) {
|
|
||||||
t.Fatalf("expected %x got %x", exp, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
// It should error at 0, 1,2,3
|
checkRetrieveError(t, f, map[uint64]error{
|
||||||
for i := numDeleted - 1; i > numDeleted-10; i-- {
|
0: errOutOfBounds,
|
||||||
if _, err := f.Retrieve(i); err == nil {
|
1: errOutOfBounds,
|
||||||
t.Fatal("expected err")
|
2: errOutOfBounds,
|
||||||
}
|
3: errOutOfBounds,
|
||||||
}
|
})
|
||||||
|
checkRetrieve(t, f, map[uint64][]byte{
|
||||||
|
4: getChunk(20, 0xbb),
|
||||||
|
5: getChunk(20, 0xaa),
|
||||||
|
6: getChunk(20, 0x99),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
checkPresent(4)
|
|
||||||
// Now, let's pretend we have deleted 1M items
|
// Edit the index again, with a much larger initial offset of 1M.
|
||||||
{
|
{
|
||||||
// Read the index file
|
// Read the index file
|
||||||
p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
|
p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
|
||||||
@ -624,13 +629,71 @@ func TestOffset(t *testing.T) {
|
|||||||
offset: itemOffset,
|
offset: itemOffset,
|
||||||
filenum: tailId,
|
filenum: tailId,
|
||||||
}
|
}
|
||||||
buf := zeroIndex.marshallBinary()
|
buf := zeroIndex.append(nil)
|
||||||
// Overwrite index zero
|
// Overwrite index zero
|
||||||
copy(indexBuf, buf)
|
copy(indexBuf, buf)
|
||||||
indexFile.WriteAt(indexBuf, 0)
|
indexFile.WriteAt(indexBuf, 0)
|
||||||
indexFile.Close()
|
indexFile.Close()
|
||||||
}
|
}
|
||||||
checkPresent(1000000)
|
|
||||||
|
// Check that existing items have been moved to index 1M.
|
||||||
|
{
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
t.Log(f.dumpIndexString(0, 100))
|
||||||
|
|
||||||
|
checkRetrieveError(t, f, map[uint64]error{
|
||||||
|
0: errOutOfBounds,
|
||||||
|
1: errOutOfBounds,
|
||||||
|
2: errOutOfBounds,
|
||||||
|
3: errOutOfBounds,
|
||||||
|
999999: errOutOfBounds,
|
||||||
|
})
|
||||||
|
checkRetrieve(t, f, map[uint64][]byte{
|
||||||
|
1000000: getChunk(20, 0xbb),
|
||||||
|
1000001: getChunk(20, 0xaa),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
for item, wantBytes := range items {
|
||||||
|
value, err := f.Retrieve(item)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("can't get expected item %d: %v", item, err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(value, wantBytes) {
|
||||||
|
t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
for item, wantError := range items {
|
||||||
|
value, err := f.Retrieve(item)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError)
|
||||||
|
}
|
||||||
|
if err != wantError {
|
||||||
|
t.Fatalf("wrong error for item %d: %v", item, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gets a chunk of data, filled with 'b'
|
||||||
|
func getChunk(size int, b int) []byte {
|
||||||
|
data := make([]byte, size)
|
||||||
|
for i := range data {
|
||||||
|
data[i] = byte(b)
|
||||||
|
}
|
||||||
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO (?)
|
// TODO (?)
|
||||||
@ -644,53 +707,18 @@ func TestOffset(t *testing.T) {
|
|||||||
// should be handled already, and the case described above can only (?) happen if an
|
// should be handled already, and the case described above can only (?) happen if an
|
||||||
// external process/user deletes files from the filesystem.
|
// external process/user deletes files from the filesystem.
|
||||||
|
|
||||||
// TestAppendTruncateParallel is a test to check if the Append/truncate operations are
|
func writeChunks(t *testing.T, ft *freezerTable, n int, length int) {
|
||||||
// racy.
|
t.Helper()
|
||||||
//
|
|
||||||
// The reason why it's not a regular fuzzer, within tests/fuzzers, is that it is dependent
|
|
||||||
// on timing rather than 'clever' input -- there's no determinism.
|
|
||||||
func TestAppendTruncateParallel(t *testing.T) {
|
|
||||||
dir, err := ioutil.TempDir("", "freezer")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
f, err := newCustomTable(dir, "tmp", metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, 8, true)
|
batch := ft.newBatch()
|
||||||
if err != nil {
|
for i := 0; i < n; i++ {
|
||||||
t.Fatal(err)
|
if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil {
|
||||||
}
|
t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err)
|
||||||
|
|
||||||
fill := func(mark uint64) []byte {
|
|
||||||
data := make([]byte, 8)
|
|
||||||
binary.LittleEndian.PutUint64(data, mark)
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 5000; i++ {
|
|
||||||
f.truncate(0)
|
|
||||||
data0 := fill(0)
|
|
||||||
f.Append(0, data0)
|
|
||||||
data1 := fill(1)
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(2)
|
|
||||||
go func() {
|
|
||||||
f.truncate(0)
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
f.Append(1, data1)
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
if have, err := f.Retrieve(0); err == nil {
|
|
||||||
if !bytes.Equal(have, data0) {
|
|
||||||
t.Fatalf("have %x want %x", have, data0)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err := batch.commit(); err != nil {
|
||||||
|
t.Fatalf("Commit returned error: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSequentialRead does some basic tests on the RetrieveItems.
|
// TestSequentialRead does some basic tests on the RetrieveItems.
|
||||||
@ -698,20 +726,17 @@ func TestSequentialRead(t *testing.T) {
|
|||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
|
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 15 bytes 30 times
|
// Write 15 bytes 30 times
|
||||||
for x := 0; x < 30; x++ {
|
writeChunks(t, f, 30, 15)
|
||||||
data := getChunk(15, x)
|
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
|
||||||
f.DumpIndex(0, 30)
|
f.DumpIndex(0, 30)
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
{ // Open it, iterate, verify iteration
|
{ // Open it, iterate, verify iteration
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -732,7 +757,7 @@ func TestSequentialRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
{ // Open it, iterate, verify byte limit. The byte limit is less than item
|
{ // Open it, iterate, verify byte limit. The byte limit is less than item
|
||||||
// size, so each lookup should only return one item
|
// size, so each lookup should only return one item
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -761,16 +786,13 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
|||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
|
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Write 10 bytes 30 times,
|
// Write 10 bytes 30 times,
|
||||||
// Splitting it at every 100 bytes (10 items)
|
// Splitting it at every 100 bytes (10 items)
|
||||||
for x := 0; x < 30; x++ {
|
writeChunks(t, f, 30, 10)
|
||||||
data := getChunk(10, x)
|
|
||||||
f.Append(uint64(x), data)
|
|
||||||
}
|
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
@ -786,7 +808,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
|||||||
{100, 109, 10},
|
{100, 109, 10},
|
||||||
} {
|
} {
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
301
core/rawdb/freezer_test.go
Normal file
301
core/rawdb/freezer_test.go
Normal file
@ -0,0 +1,301 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var freezerTestTableDef = map[string]bool{"test": true}
|
||||||
|
|
||||||
|
func TestFreezerModify(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Create test data.
|
||||||
|
var valuesRaw [][]byte
|
||||||
|
var valuesRLP []*big.Int
|
||||||
|
for x := 0; x < 100; x++ {
|
||||||
|
v := getChunk(256, x)
|
||||||
|
valuesRaw = append(valuesRaw, v)
|
||||||
|
iv := big.NewInt(int64(x))
|
||||||
|
iv = iv.Exp(iv, iv, nil)
|
||||||
|
valuesRLP = append(valuesRLP, iv)
|
||||||
|
}
|
||||||
|
|
||||||
|
tables := map[string]bool{"raw": true, "rlp": false}
|
||||||
|
f, dir := newFreezerForTesting(t, tables)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// Commit test data.
|
||||||
|
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||||
|
for i := range valuesRaw {
|
||||||
|
if err := op.AppendRaw("raw", uint64(i), valuesRaw[i]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := op.Append("rlp", uint64(i), valuesRLP[i]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("ModifyAncients failed:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump indexes.
|
||||||
|
for _, table := range f.tables {
|
||||||
|
t.Log(table.name, "index:", table.dumpIndexString(0, int64(len(valuesRaw))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read back test data.
|
||||||
|
checkAncientCount(t, f, "raw", uint64(len(valuesRaw)))
|
||||||
|
checkAncientCount(t, f, "rlp", uint64(len(valuesRLP)))
|
||||||
|
for i := range valuesRaw {
|
||||||
|
v, _ := f.Ancient("raw", uint64(i))
|
||||||
|
if !bytes.Equal(v, valuesRaw[i]) {
|
||||||
|
t.Fatalf("wrong raw value at %d: %x", i, v)
|
||||||
|
}
|
||||||
|
ivEnc, _ := f.Ancient("rlp", uint64(i))
|
||||||
|
want, _ := rlp.EncodeToBytes(valuesRLP[i])
|
||||||
|
if !bytes.Equal(ivEnc, want) {
|
||||||
|
t.Fatalf("wrong RLP value at %d: %x", i, ivEnc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This checks that ModifyAncients rolls back freezer updates
|
||||||
|
// when the function passed to it returns an error.
|
||||||
|
func TestFreezerModifyRollback(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
f, dir := newFreezerForTesting(t, freezerTestTableDef)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
|
theError := errors.New("oops")
|
||||||
|
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||||
|
// Append three items. This creates two files immediately,
|
||||||
|
// because the table size limit of the test freezer is 2048.
|
||||||
|
require.NoError(t, op.AppendRaw("test", 0, make([]byte, 2048)))
|
||||||
|
require.NoError(t, op.AppendRaw("test", 1, make([]byte, 2048)))
|
||||||
|
require.NoError(t, op.AppendRaw("test", 2, make([]byte, 2048)))
|
||||||
|
return theError
|
||||||
|
})
|
||||||
|
if err != theError {
|
||||||
|
t.Errorf("ModifyAncients returned wrong error %q", err)
|
||||||
|
}
|
||||||
|
checkAncientCount(t, f, "test", 0)
|
||||||
|
f.Close()
|
||||||
|
|
||||||
|
// Reopen and check that the rolled-back data doesn't reappear.
|
||||||
|
tables := map[string]bool{"test": true}
|
||||||
|
f2, err := newFreezer(dir, "", false, 2049, tables)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("can't reopen freezer after failed ModifyAncients: %v", err)
|
||||||
|
}
|
||||||
|
defer f2.Close()
|
||||||
|
checkAncientCount(t, f2, "test", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test runs ModifyAncients and Ancient concurrently with each other.
|
||||||
|
func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
f, dir := newFreezerForTesting(t, freezerTestTableDef)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
numReaders = 5
|
||||||
|
writeBatchSize = uint64(50)
|
||||||
|
written = make(chan uint64, numReaders*6)
|
||||||
|
wg sync.WaitGroup
|
||||||
|
)
|
||||||
|
wg.Add(numReaders + 1)
|
||||||
|
|
||||||
|
// Launch the writer. It appends 10000 items in batches.
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
defer close(written)
|
||||||
|
for item := uint64(0); item < 10000; item += writeBatchSize {
|
||||||
|
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||||
|
for i := uint64(0); i < writeBatchSize; i++ {
|
||||||
|
item := item + i
|
||||||
|
value := getChunk(32, int(item))
|
||||||
|
if err := op.AppendRaw("test", item, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
for i := 0; i < numReaders; i++ {
|
||||||
|
written <- item + writeBatchSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Launch the readers. They read random items from the freezer up to the
|
||||||
|
// current frozen item count.
|
||||||
|
for i := 0; i < numReaders; i++ {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for frozen := range written {
|
||||||
|
for rc := 0; rc < 80; rc++ {
|
||||||
|
num := uint64(rand.Intn(int(frozen)))
|
||||||
|
value, err := f.Ancient("test", num)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("error reading %d (frozen %d): %v", num, frozen, err))
|
||||||
|
}
|
||||||
|
if !bytes.Equal(value, getChunk(32, int(num))) {
|
||||||
|
panic(fmt.Errorf("wrong value at %d", num))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test runs ModifyAncients and TruncateAncients concurrently with each other.
|
||||||
|
func TestFreezerConcurrentModifyTruncate(t *testing.T) {
|
||||||
|
f, dir := newFreezerForTesting(t, freezerTestTableDef)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var item = make([]byte, 256)
|
||||||
|
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
// First reset and write 100 items.
|
||||||
|
if err := f.TruncateAncients(0); err != nil {
|
||||||
|
t.Fatal("truncate failed:", err)
|
||||||
|
}
|
||||||
|
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||||
|
for i := uint64(0); i < 100; i++ {
|
||||||
|
if err := op.AppendRaw("test", i, item); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("modify failed:", err)
|
||||||
|
}
|
||||||
|
checkAncientCount(t, f, "test", 100)
|
||||||
|
|
||||||
|
// Now append 100 more items and truncate concurrently.
|
||||||
|
var (
|
||||||
|
wg sync.WaitGroup
|
||||||
|
truncateErr error
|
||||||
|
modifyErr error
|
||||||
|
)
|
||||||
|
wg.Add(3)
|
||||||
|
go func() {
|
||||||
|
_, modifyErr = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||||
|
for i := uint64(100); i < 200; i++ {
|
||||||
|
if err := op.AppendRaw("test", i, item); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
truncateErr = f.TruncateAncients(10)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
f.AncientSize("test")
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Now check the outcome. If the truncate operation went through first, the append
|
||||||
|
// fails, otherwise it succeeds. In either case, the freezer should be positioned
|
||||||
|
// at 10 after both operations are done.
|
||||||
|
if truncateErr != nil {
|
||||||
|
t.Fatal("concurrent truncate failed:", err)
|
||||||
|
}
|
||||||
|
if !(modifyErr == nil || modifyErr == errOutOrderInsertion) {
|
||||||
|
t.Fatal("wrong error from concurrent modify:", modifyErr)
|
||||||
|
}
|
||||||
|
checkAncientCount(t, f, "test", 10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
dir, err := ioutil.TempDir("", "freezer")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// note: using low max table size here to ensure the tests actually
|
||||||
|
// switch between multiple files.
|
||||||
|
f, err := newFreezer(dir, "", false, 2049, tables)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("can't open freezer", err)
|
||||||
|
}
|
||||||
|
return f, dir
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkAncientCount verifies that the freezer contains n items.
|
||||||
|
func checkAncientCount(t *testing.T, f *freezer, kind string, n uint64) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
if frozen, _ := f.Ancients(); frozen != n {
|
||||||
|
t.Fatalf("Ancients() returned %d, want %d", frozen, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check at index n-1.
|
||||||
|
if n > 0 {
|
||||||
|
index := n - 1
|
||||||
|
if ok, _ := f.HasAncient(kind, index); !ok {
|
||||||
|
t.Errorf("HasAncient(%q, %d) returned false unexpectedly", kind, index)
|
||||||
|
}
|
||||||
|
if _, err := f.Ancient(kind, index); err != nil {
|
||||||
|
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check at index n.
|
||||||
|
index := n
|
||||||
|
if ok, _ := f.HasAncient(kind, index); ok {
|
||||||
|
t.Errorf("HasAncient(%q, %d) returned true unexpectedly", kind, index)
|
||||||
|
}
|
||||||
|
if _, err := f.Ancient(kind, index); err == nil {
|
||||||
|
t.Errorf("Ancient(%q, %d) didn't return expected error", kind, index)
|
||||||
|
} else if err != errOutOfBounds {
|
||||||
|
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
|
||||||
|
}
|
||||||
|
}
|
@ -80,10 +80,9 @@ func (t *table) AncientSize(kind string) (uint64, error) {
|
|||||||
return t.db.AncientSize(kind)
|
return t.db.AncientSize(kind)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendAncient is a noop passthrough that just forwards the request to the underlying
|
// ModifyAncients runs an ancient write operation on the underlying database.
|
||||||
// database.
|
func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) {
|
||||||
func (t *table) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
|
return t.db.ModifyAncients(fn)
|
||||||
return t.db.AppendAncient(number, hash, header, body, receipts, td)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TruncateAncients is a noop passthrough that just forwards the request to the underlying
|
// TruncateAncients is a noop passthrough that just forwards the request to the underlying
|
||||||
|
BIN
core/rawdb/testdata/stored_receipts.bin
vendored
Normal file
BIN
core/rawdb/testdata/stored_receipts.bin
vendored
Normal file
Binary file not shown.
@ -23,6 +23,7 @@ import (
|
|||||||
"github.com/VictoriaMetrics/fastcache"
|
"github.com/VictoriaMetrics/fastcache"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
@ -70,6 +71,9 @@ type Trie interface {
|
|||||||
// trie.MissingNodeError is returned.
|
// trie.MissingNodeError is returned.
|
||||||
TryGet(key []byte) ([]byte, error)
|
TryGet(key []byte) ([]byte, error)
|
||||||
|
|
||||||
|
// TryUpdateAccount abstract an account write in the trie.
|
||||||
|
TryUpdateAccount(key []byte, account *types.StateAccount) error
|
||||||
|
|
||||||
// TryUpdate associates key with value in the trie. If value has length zero, any
|
// TryUpdate associates key with value in the trie. If value has length zero, any
|
||||||
// existing value is deleted from the trie. The value bytes must not be modified
|
// existing value is deleted from the trie. The value bytes must not be modified
|
||||||
// by the caller while they are stored in the trie. If a node was not found in the
|
// by the caller while they are stored in the trie. If a node was not found in the
|
||||||
@ -86,7 +90,7 @@ type Trie interface {
|
|||||||
|
|
||||||
// Commit writes all nodes to the trie's memory database, tracking the internal
|
// Commit writes all nodes to the trie's memory database, tracking the internal
|
||||||
// and external (for account tries) references.
|
// and external (for account tries) references.
|
||||||
Commit(onleaf trie.LeafCallback) (common.Hash, error)
|
Commit(onleaf trie.LeafCallback) (common.Hash, int, error)
|
||||||
|
|
||||||
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
|
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
|
||||||
// starts at the key after the given start key.
|
// starts at the key after the given start key.
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
@ -140,7 +141,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
|
|||||||
|
|
||||||
it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
|
it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
var data Account
|
var data types.StateAccount
|
||||||
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
|
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
@ -104,7 +105,7 @@ func (it *NodeIterator) step() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Otherwise we've reached an account node, initiate data iteration
|
// Otherwise we've reached an account node, initiate data iteration
|
||||||
var account Account
|
var account types.StateAccount
|
||||||
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
|
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
28
core/state/metrics.go
Normal file
28
core/state/metrics.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package state
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/metrics"
|
||||||
|
|
||||||
|
var (
|
||||||
|
accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
|
||||||
|
storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
|
||||||
|
accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
|
||||||
|
storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
|
||||||
|
accountCommittedMeter = metrics.NewRegisteredMeter("state/commit/account", nil)
|
||||||
|
storageCommittedMeter = metrics.NewRegisteredMeter("state/commit/storage", nil)
|
||||||
|
)
|
@ -29,7 +29,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -426,7 +425,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
|
|||||||
// If it's a leaf node, yes we are touching an account,
|
// If it's a leaf node, yes we are touching an account,
|
||||||
// dig into the storage trie further.
|
// dig into the storage trie further.
|
||||||
if accIter.Leaf() {
|
if accIter.Leaf() {
|
||||||
var acc state.Account
|
var acc types.StateAccount
|
||||||
if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
|
if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -436,7 +436,7 @@ func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string,
|
|||||||
for i, key := range result.keys {
|
for i, key := range result.keys {
|
||||||
snapTrie.Update(key, result.vals[i])
|
snapTrie.Update(key, result.vals[i])
|
||||||
}
|
}
|
||||||
root, _ := snapTrie.Commit(nil)
|
root, _, _ := snapTrie.Commit(nil)
|
||||||
snapTrieDb.Commit(root, false, nil)
|
snapTrieDb.Commit(root, false, nil)
|
||||||
}
|
}
|
||||||
tr := result.tr
|
tr := result.tr
|
||||||
|
@ -60,7 +60,7 @@ func TestGeneration(t *testing.T) {
|
|||||||
acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
|
acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
|
||||||
val, _ = rlp.EncodeToBytes(acc)
|
val, _ = rlp.EncodeToBytes(acc)
|
||||||
accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
||||||
root, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
|
root, _, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
|
||||||
triedb.Commit(root, false, nil)
|
triedb.Commit(root, false, nil)
|
||||||
|
|
||||||
if have, want := root, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"); have != want {
|
if have, want := root, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"); have != want {
|
||||||
@ -128,7 +128,7 @@ func TestGenerateExistentState(t *testing.T) {
|
|||||||
rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-2")), []byte("val-2"))
|
rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-2")), []byte("val-2"))
|
||||||
rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-3")), []byte("val-3"))
|
rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-3")), []byte("val-3"))
|
||||||
|
|
||||||
root, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
|
root, _, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
|
||||||
triedb.Commit(root, false, nil)
|
triedb.Commit(root, false, nil)
|
||||||
|
|
||||||
snap := generateSnapshot(diskdb, triedb, 16, root)
|
snap := generateSnapshot(diskdb, triedb, 16, root)
|
||||||
@ -215,12 +215,12 @@ func (t *testHelper) makeStorageTrie(keys []string, vals []string) []byte {
|
|||||||
for i, k := range keys {
|
for i, k := range keys {
|
||||||
stTrie.Update([]byte(k), []byte(vals[i]))
|
stTrie.Update([]byte(k), []byte(vals[i]))
|
||||||
}
|
}
|
||||||
root, _ := stTrie.Commit(nil)
|
root, _, _ := stTrie.Commit(nil)
|
||||||
return root.Bytes()
|
return root.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testHelper) Generate() (common.Hash, *diskLayer) {
|
func (t *testHelper) Generate() (common.Hash, *diskLayer) {
|
||||||
root, _ := t.accTrie.Commit(nil)
|
root, _, _ := t.accTrie.Commit(nil)
|
||||||
t.triedb.Commit(root, false, nil)
|
t.triedb.Commit(root, false, nil)
|
||||||
snap := generateSnapshot(t.diskdb, t.triedb, 16, root)
|
snap := generateSnapshot(t.diskdb, t.triedb, 16, root)
|
||||||
return root, snap
|
return root, snap
|
||||||
@ -575,7 +575,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
|
|||||||
rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2"))
|
rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2"))
|
||||||
rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3"))
|
rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3"))
|
||||||
}
|
}
|
||||||
root, _ := accTrie.Commit(nil)
|
root, _, _ := accTrie.Commit(nil)
|
||||||
t.Logf("root: %x", root)
|
t.Logf("root: %x", root)
|
||||||
triedb.Commit(root, false, nil)
|
triedb.Commit(root, false, nil)
|
||||||
// To verify the test: If we now inspect the snap db, there should exist extraneous storage items
|
// To verify the test: If we now inspect the snap db, there should exist extraneous storage items
|
||||||
@ -637,7 +637,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
|
|||||||
rawdb.WriteAccountSnapshot(diskdb, key, val)
|
rawdb.WriteAccountSnapshot(diskdb, key, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
root, _ := accTrie.Commit(nil)
|
root, _, _ := accTrie.Commit(nil)
|
||||||
t.Logf("root: %x", root)
|
t.Logf("root: %x", root)
|
||||||
triedb.Commit(root, false, nil)
|
triedb.Commit(root, false, nil)
|
||||||
|
|
||||||
@ -690,7 +690,7 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
|
|||||||
rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x07"), val)
|
rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x07"), val)
|
||||||
}
|
}
|
||||||
|
|
||||||
root, _ := accTrie.Commit(nil)
|
root, _, _ := accTrie.Commit(nil)
|
||||||
t.Logf("root: %x", root)
|
t.Logf("root: %x", root)
|
||||||
triedb.Commit(root, false, nil)
|
triedb.Commit(root, false, nil)
|
||||||
|
|
||||||
@ -734,7 +734,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
|
|||||||
rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x05"), junk)
|
rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x05"), junk)
|
||||||
}
|
}
|
||||||
|
|
||||||
root, _ := accTrie.Commit(nil)
|
root, _, _ := accTrie.Commit(nil)
|
||||||
t.Logf("root: %x", root)
|
t.Logf("root: %x", root)
|
||||||
triedb.Commit(root, false, nil)
|
triedb.Commit(root, false, nil)
|
||||||
|
|
||||||
|
@ -385,7 +385,7 @@ func (it *diskStorageIterator) Hash() common.Hash {
|
|||||||
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
|
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slot returns the raw strorage slot content the iterator is currently at.
|
// Slot returns the raw storage slot content the iterator is currently at.
|
||||||
func (it *diskStorageIterator) Slot() []byte {
|
func (it *diskStorageIterator) Slot() []byte {
|
||||||
return it.it.Value()
|
return it.it.Value()
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@ -65,7 +66,7 @@ func (s Storage) Copy() Storage {
|
|||||||
type stateObject struct {
|
type stateObject struct {
|
||||||
address common.Address
|
address common.Address
|
||||||
addrHash common.Hash // hash of ethereum address of the account
|
addrHash common.Hash // hash of ethereum address of the account
|
||||||
data Account
|
data types.StateAccount
|
||||||
db *StateDB
|
db *StateDB
|
||||||
|
|
||||||
// DB error.
|
// DB error.
|
||||||
@ -97,17 +98,8 @@ func (s *stateObject) empty() bool {
|
|||||||
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
|
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account is the Ethereum consensus representation of accounts.
|
|
||||||
// These objects are stored in the main account trie.
|
|
||||||
type Account struct {
|
|
||||||
Nonce uint64
|
|
||||||
Balance *big.Int
|
|
||||||
Root common.Hash // merkle root of the storage trie
|
|
||||||
CodeHash []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObject creates a state object.
|
// newObject creates a state object.
|
||||||
func newObject(db *StateDB, address common.Address, data Account) *stateObject {
|
func newObject(db *StateDB, address common.Address, data types.StateAccount) *stateObject {
|
||||||
if data.Balance == nil {
|
if data.Balance == nil {
|
||||||
data.Balance = new(big.Int)
|
data.Balance = new(big.Int)
|
||||||
}
|
}
|
||||||
@ -130,7 +122,7 @@ func newObject(db *StateDB, address common.Address, data Account) *stateObject {
|
|||||||
|
|
||||||
// EncodeRLP implements rlp.Encoder.
|
// EncodeRLP implements rlp.Encoder.
|
||||||
func (s *stateObject) EncodeRLP(w io.Writer) error {
|
func (s *stateObject) EncodeRLP(w io.Writer) error {
|
||||||
return rlp.Encode(w, s.data)
|
return rlp.Encode(w, &s.data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setError remembers the first non-nil error it is called with.
|
// setError remembers the first non-nil error it is called with.
|
||||||
@ -329,7 +321,7 @@ func (s *stateObject) finalise(prefetch bool) {
|
|||||||
// It will return nil if the trie has not been loaded and no changes have been made
|
// It will return nil if the trie has not been loaded and no changes have been made
|
||||||
func (s *stateObject) updateTrie(db Database) Trie {
|
func (s *stateObject) updateTrie(db Database) Trie {
|
||||||
// Make sure all dirty slots are finalized into the pending storage area
|
// Make sure all dirty slots are finalized into the pending storage area
|
||||||
s.finalise(false) // Don't prefetch any more, pull directly if need be
|
s.finalise(false) // Don't prefetch anymore, pull directly if need be
|
||||||
if len(s.pendingStorage) == 0 {
|
if len(s.pendingStorage) == 0 {
|
||||||
return s.trie
|
return s.trie
|
||||||
}
|
}
|
||||||
@ -354,10 +346,12 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
|||||||
var v []byte
|
var v []byte
|
||||||
if (value == common.Hash{}) {
|
if (value == common.Hash{}) {
|
||||||
s.setError(tr.TryDelete(key[:]))
|
s.setError(tr.TryDelete(key[:]))
|
||||||
|
s.db.StorageDeleted += 1
|
||||||
} else {
|
} else {
|
||||||
// Encoding []byte cannot fail, ok to ignore the error.
|
// Encoding []byte cannot fail, ok to ignore the error.
|
||||||
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
||||||
s.setError(tr.TryUpdate(key[:], v))
|
s.setError(tr.TryUpdate(key[:], v))
|
||||||
|
s.db.StorageUpdated += 1
|
||||||
}
|
}
|
||||||
// If state snapshotting is active, cache the data til commit
|
// If state snapshotting is active, cache the data til commit
|
||||||
if s.db.snap != nil {
|
if s.db.snap != nil {
|
||||||
@ -368,7 +362,7 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
|||||||
s.db.snapStorage[s.addrHash] = storage
|
s.db.snapStorage[s.addrHash] = storage
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00
|
storage[crypto.HashData(hasher, key[:])] = v // v will be nil if it's deleted
|
||||||
}
|
}
|
||||||
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
|
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
|
||||||
}
|
}
|
||||||
@ -396,23 +390,23 @@ func (s *stateObject) updateRoot(db Database) {
|
|||||||
|
|
||||||
// CommitTrie the storage trie of the object to db.
|
// CommitTrie the storage trie of the object to db.
|
||||||
// This updates the trie root.
|
// This updates the trie root.
|
||||||
func (s *stateObject) CommitTrie(db Database) error {
|
func (s *stateObject) CommitTrie(db Database) (int, error) {
|
||||||
// If nothing changed, don't bother with hashing anything
|
// If nothing changed, don't bother with hashing anything
|
||||||
if s.updateTrie(db) == nil {
|
if s.updateTrie(db) == nil {
|
||||||
return nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
if s.dbErr != nil {
|
if s.dbErr != nil {
|
||||||
return s.dbErr
|
return 0, s.dbErr
|
||||||
}
|
}
|
||||||
// Track the amount of time wasted on committing the storage trie
|
// Track the amount of time wasted on committing the storage trie
|
||||||
if metrics.EnabledExpensive {
|
if metrics.EnabledExpensive {
|
||||||
defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
|
defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
|
||||||
}
|
}
|
||||||
root, err := s.trie.Commit(nil)
|
root, committed, err := s.trie.Commit(nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
s.data.Root = root
|
s.data.Root = root
|
||||||
}
|
}
|
||||||
return err
|
return committed, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddBalance adds amount to s's balance.
|
// AddBalance adds amount to s's balance.
|
||||||
|
@ -117,6 +117,11 @@ type StateDB struct {
|
|||||||
SnapshotAccountReads time.Duration
|
SnapshotAccountReads time.Duration
|
||||||
SnapshotStorageReads time.Duration
|
SnapshotStorageReads time.Duration
|
||||||
SnapshotCommits time.Duration
|
SnapshotCommits time.Duration
|
||||||
|
|
||||||
|
AccountUpdated int
|
||||||
|
StorageUpdated int
|
||||||
|
AccountDeleted int
|
||||||
|
StorageDeleted int
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new state from a given trie.
|
// New creates a new state from a given trie.
|
||||||
@ -455,12 +460,7 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
|
|||||||
}
|
}
|
||||||
// Encode the account and update the account trie
|
// Encode the account and update the account trie
|
||||||
addr := obj.Address()
|
addr := obj.Address()
|
||||||
|
if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil {
|
||||||
data, err := rlp.EncodeToBytes(obj)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
|
|
||||||
}
|
|
||||||
if err = s.trie.TryUpdate(addr[:], data); err != nil {
|
|
||||||
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
|
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -507,7 +507,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
|||||||
}
|
}
|
||||||
// If no live objects are available, attempt to use snapshots
|
// If no live objects are available, attempt to use snapshots
|
||||||
var (
|
var (
|
||||||
data *Account
|
data *types.StateAccount
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
if s.snap != nil {
|
if s.snap != nil {
|
||||||
@ -519,7 +519,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
|||||||
if acc == nil {
|
if acc == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
data = &Account{
|
data = &types.StateAccount{
|
||||||
Nonce: acc.Nonce,
|
Nonce: acc.Nonce,
|
||||||
Balance: acc.Balance,
|
Balance: acc.Balance,
|
||||||
CodeHash: acc.CodeHash,
|
CodeHash: acc.CodeHash,
|
||||||
@ -546,7 +546,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
|||||||
if len(enc) == 0 {
|
if len(enc) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
data = new(Account)
|
data = new(types.StateAccount)
|
||||||
if err := rlp.DecodeBytes(enc, data); err != nil {
|
if err := rlp.DecodeBytes(enc, data); err != nil {
|
||||||
log.Error("Failed to decode state object", "addr", addr, "err", err)
|
log.Error("Failed to decode state object", "addr", addr, "err", err)
|
||||||
return nil
|
return nil
|
||||||
@ -583,7 +583,7 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject)
|
|||||||
s.snapDestructs[prev.addrHash] = struct{}{}
|
s.snapDestructs[prev.addrHash] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
newobj = newObject(s, addr, Account{})
|
newobj = newObject(s, addr, types.StateAccount{})
|
||||||
if prev == nil {
|
if prev == nil {
|
||||||
s.journal.append(createObjectChange{account: &addr})
|
s.journal.append(createObjectChange{account: &addr})
|
||||||
} else {
|
} else {
|
||||||
@ -860,8 +860,10 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
|||||||
for addr := range s.stateObjectsPending {
|
for addr := range s.stateObjectsPending {
|
||||||
if obj := s.stateObjects[addr]; obj.deleted {
|
if obj := s.stateObjects[addr]; obj.deleted {
|
||||||
s.deleteStateObject(obj)
|
s.deleteStateObject(obj)
|
||||||
|
s.AccountDeleted += 1
|
||||||
} else {
|
} else {
|
||||||
s.updateStateObject(obj)
|
s.updateStateObject(obj)
|
||||||
|
s.AccountUpdated += 1
|
||||||
}
|
}
|
||||||
usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
|
usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
|
||||||
}
|
}
|
||||||
@ -903,6 +905,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
|||||||
s.IntermediateRoot(deleteEmptyObjects)
|
s.IntermediateRoot(deleteEmptyObjects)
|
||||||
|
|
||||||
// Commit objects to the trie, measuring the elapsed time
|
// Commit objects to the trie, measuring the elapsed time
|
||||||
|
var storageCommitted int
|
||||||
codeUpdates := make(map[common.Hash][]byte)
|
codeUpdates := make(map[common.Hash][]byte)
|
||||||
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
|
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
|
||||||
for addr := range s.stateObjectsDirty {
|
for addr := range s.stateObjectsDirty {
|
||||||
@ -914,9 +917,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
|||||||
obj.dirtyCode = false
|
obj.dirtyCode = false
|
||||||
}
|
}
|
||||||
// Write any storage changes in the state object to its storage trie
|
// Write any storage changes in the state object to its storage trie
|
||||||
if err := obj.CommitTrie(s.db); err != nil {
|
committed, err := obj.CommitTrie(s.db)
|
||||||
|
if err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
|
storageCommitted += committed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(s.stateObjectsDirty) > 0 {
|
if len(s.stateObjectsDirty) > 0 {
|
||||||
@ -934,8 +939,8 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
|||||||
}
|
}
|
||||||
// The onleaf func is called _serially_, so we can reuse the same account
|
// The onleaf func is called _serially_, so we can reuse the same account
|
||||||
// for unmarshalling every time.
|
// for unmarshalling every time.
|
||||||
var account Account
|
var account types.StateAccount
|
||||||
root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
|
root, accountCommitted, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
|
||||||
if err := rlp.DecodeBytes(leaf, &account); err != nil {
|
if err := rlp.DecodeBytes(leaf, &account); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -944,8 +949,20 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
if metrics.EnabledExpensive {
|
if metrics.EnabledExpensive {
|
||||||
s.AccountCommits += time.Since(start)
|
s.AccountCommits += time.Since(start)
|
||||||
|
|
||||||
|
accountUpdatedMeter.Mark(int64(s.AccountUpdated))
|
||||||
|
storageUpdatedMeter.Mark(int64(s.StorageUpdated))
|
||||||
|
accountDeletedMeter.Mark(int64(s.AccountDeleted))
|
||||||
|
storageDeletedMeter.Mark(int64(s.StorageDeleted))
|
||||||
|
accountCommittedMeter.Mark(int64(accountCommitted))
|
||||||
|
storageCommittedMeter.Mark(int64(storageCommitted))
|
||||||
|
s.AccountUpdated, s.AccountDeleted = 0, 0
|
||||||
|
s.StorageUpdated, s.StorageDeleted = 0, 0
|
||||||
}
|
}
|
||||||
// If snapshotting is enabled, update the snapshot tree with this new version
|
// If snapshotting is enabled, update the snapshot tree with this new version
|
||||||
if s.snap != nil {
|
if s.snap != nil {
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
@ -43,7 +44,7 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.S
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var obj Account
|
var obj types.StateAccount
|
||||||
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
|
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||||
@ -203,7 +204,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
|
|||||||
}
|
}
|
||||||
results[len(hashQueue)+i] = trie.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
|
results[len(hashQueue)+i] = trie.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
|
||||||
} else {
|
} else {
|
||||||
var acc Account
|
var acc types.StateAccount
|
||||||
if err := rlp.DecodeBytes(srcTrie.Get(path[0]), &acc); err != nil {
|
if err := rlp.DecodeBytes(srcTrie.Get(path[0]), &acc); err != nil {
|
||||||
t.Fatalf("failed to decode account on path %x: %v", path, err)
|
t.Fatalf("failed to decode account on path %x: %v", path, err)
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,8 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -478,9 +480,15 @@ func (h *priceHeap) Pop() interface{} {
|
|||||||
// better candidates for inclusion while in other cases (at the top of the baseFee peak)
|
// better candidates for inclusion while in other cases (at the top of the baseFee peak)
|
||||||
// the floating heap is better. When baseFee is decreasing they behave similarly.
|
// the floating heap is better. When baseFee is decreasing they behave similarly.
|
||||||
type txPricedList struct {
|
type txPricedList struct {
|
||||||
all *txLookup // Pointer to the map of all transactions
|
// Number of stale price points to (re-heap trigger).
|
||||||
urgent, floating priceHeap // Heaps of prices of all the stored **remote** transactions
|
// This field is accessed atomically, and must be the first field
|
||||||
stales int // Number of stale price points to (re-heap trigger)
|
// to ensure it has correct alignment for atomic.AddInt64.
|
||||||
|
// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||||
|
stales int64
|
||||||
|
|
||||||
|
all *txLookup // Pointer to the map of all transactions
|
||||||
|
urgent, floating priceHeap // Heaps of prices of all the stored **remote** transactions
|
||||||
|
reheapMu sync.Mutex // Mutex asserts that only one routine is reheaping the list
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -510,8 +518,8 @@ func (l *txPricedList) Put(tx *types.Transaction, local bool) {
|
|||||||
// the heap if a large enough ratio of transactions go stale.
|
// the heap if a large enough ratio of transactions go stale.
|
||||||
func (l *txPricedList) Removed(count int) {
|
func (l *txPricedList) Removed(count int) {
|
||||||
// Bump the stale counter, but exit if still too low (< 25%)
|
// Bump the stale counter, but exit if still too low (< 25%)
|
||||||
l.stales += count
|
stales := atomic.AddInt64(&l.stales, int64(count))
|
||||||
if l.stales <= (len(l.urgent.list)+len(l.floating.list))/4 {
|
if int(stales) <= (len(l.urgent.list)+len(l.floating.list))/4 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Seems we've reached a critical number of stale transactions, reheap
|
// Seems we've reached a critical number of stale transactions, reheap
|
||||||
@ -535,7 +543,7 @@ func (l *txPricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool
|
|||||||
for len(h.list) > 0 {
|
for len(h.list) > 0 {
|
||||||
head := h.list[0]
|
head := h.list[0]
|
||||||
if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated
|
if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated
|
||||||
l.stales--
|
atomic.AddInt64(&l.stales, -1)
|
||||||
heap.Pop(h)
|
heap.Pop(h)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -561,7 +569,7 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
|
|||||||
// Discard stale transactions if found during cleanup
|
// Discard stale transactions if found during cleanup
|
||||||
tx := heap.Pop(&l.urgent).(*types.Transaction)
|
tx := heap.Pop(&l.urgent).(*types.Transaction)
|
||||||
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
|
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
|
||||||
l.stales--
|
atomic.AddInt64(&l.stales, -1)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Non stale transaction found, move to floating heap
|
// Non stale transaction found, move to floating heap
|
||||||
@ -574,7 +582,7 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
|
|||||||
// Discard stale transactions if found during cleanup
|
// Discard stale transactions if found during cleanup
|
||||||
tx := heap.Pop(&l.floating).(*types.Transaction)
|
tx := heap.Pop(&l.floating).(*types.Transaction)
|
||||||
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
|
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
|
||||||
l.stales--
|
atomic.AddInt64(&l.stales, -1)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Non stale transaction found, discard it
|
// Non stale transaction found, discard it
|
||||||
@ -594,8 +602,10 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
|
|||||||
|
|
||||||
// Reheap forcibly rebuilds the heap based on the current remote transaction set.
|
// Reheap forcibly rebuilds the heap based on the current remote transaction set.
|
||||||
func (l *txPricedList) Reheap() {
|
func (l *txPricedList) Reheap() {
|
||||||
|
l.reheapMu.Lock()
|
||||||
|
defer l.reheapMu.Unlock()
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
l.stales = 0
|
atomic.StoreInt64(&l.stales, 0)
|
||||||
l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount())
|
l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount())
|
||||||
l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
|
l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
|
||||||
l.urgent.list = append(l.urgent.list, tx)
|
l.urgent.list = append(l.urgent.list, tx)
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -111,6 +112,14 @@ var (
|
|||||||
invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
|
invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
|
||||||
underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
|
underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
|
||||||
overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil)
|
overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil)
|
||||||
|
// throttleTxMeter counts how many transactions are rejected due to too-many-changes between
|
||||||
|
// txpool reorgs.
|
||||||
|
throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
|
||||||
|
// reorgDurationTimer measures how long time a txpool reorg takes.
|
||||||
|
reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
|
||||||
|
// dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
|
||||||
|
// that this number is pretty low, since txpool reorgs happen very frequently.
|
||||||
|
dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
|
|
||||||
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
|
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
|
||||||
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
|
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
|
||||||
@ -256,6 +265,9 @@ type TxPool struct {
|
|||||||
reorgDoneCh chan chan struct{}
|
reorgDoneCh chan chan struct{}
|
||||||
reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop
|
reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop
|
||||||
wg sync.WaitGroup // tracks loop, scheduleReorgLoop
|
wg sync.WaitGroup // tracks loop, scheduleReorgLoop
|
||||||
|
initDoneCh chan struct{} // is closed once the pool is initialized (for tests)
|
||||||
|
|
||||||
|
changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
|
||||||
}
|
}
|
||||||
|
|
||||||
type txpoolResetRequest struct {
|
type txpoolResetRequest struct {
|
||||||
@ -284,6 +296,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
|
|||||||
queueTxEventCh: make(chan *types.Transaction),
|
queueTxEventCh: make(chan *types.Transaction),
|
||||||
reorgDoneCh: make(chan chan struct{}),
|
reorgDoneCh: make(chan chan struct{}),
|
||||||
reorgShutdownCh: make(chan struct{}),
|
reorgShutdownCh: make(chan struct{}),
|
||||||
|
initDoneCh: make(chan struct{}),
|
||||||
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
|
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
|
||||||
}
|
}
|
||||||
pool.locals = newAccountSet(pool.signer)
|
pool.locals = newAccountSet(pool.signer)
|
||||||
@ -337,6 +350,8 @@ func (pool *TxPool) loop() {
|
|||||||
defer evict.Stop()
|
defer evict.Stop()
|
||||||
defer journal.Stop()
|
defer journal.Stop()
|
||||||
|
|
||||||
|
// Notify tests that the init phase is done
|
||||||
|
close(pool.initDoneCh)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
// Handle ChainHeadEvent
|
// Handle ChainHeadEvent
|
||||||
@ -355,8 +370,8 @@ func (pool *TxPool) loop() {
|
|||||||
case <-report.C:
|
case <-report.C:
|
||||||
pool.mu.RLock()
|
pool.mu.RLock()
|
||||||
pending, queued := pool.stats()
|
pending, queued := pool.stats()
|
||||||
stales := pool.priced.stales
|
|
||||||
pool.mu.RUnlock()
|
pool.mu.RUnlock()
|
||||||
|
stales := int(atomic.LoadInt64(&pool.priced.stales))
|
||||||
|
|
||||||
if pending != prevPending || queued != prevQueued || stales != prevStales {
|
if pending != prevPending || queued != prevQueued || stales != prevStales {
|
||||||
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
|
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
|
||||||
@ -663,6 +678,15 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
|||||||
underpricedTxMeter.Mark(1)
|
underpricedTxMeter.Mark(1)
|
||||||
return false, ErrUnderpriced
|
return false, ErrUnderpriced
|
||||||
}
|
}
|
||||||
|
// We're about to replace a transaction. The reorg does a more thorough
|
||||||
|
// analysis of what to remove and how, but it runs async. We don't want to
|
||||||
|
// do too many replacements between reorg-runs, so we cap the number of
|
||||||
|
// replacements to 25% of the slots
|
||||||
|
if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
|
||||||
|
throttleTxMeter.Mark(1)
|
||||||
|
return false, ErrTxPoolOverflow
|
||||||
|
}
|
||||||
|
|
||||||
// New transaction is better than our worse ones, make room for it.
|
// New transaction is better than our worse ones, make room for it.
|
||||||
// If it's a local transaction, forcibly discard all available transactions.
|
// If it's a local transaction, forcibly discard all available transactions.
|
||||||
// Otherwise if we can't make enough room for new one, abort the operation.
|
// Otherwise if we can't make enough room for new one, abort the operation.
|
||||||
@ -674,6 +698,8 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
|||||||
overflowedTxMeter.Mark(1)
|
overflowedTxMeter.Mark(1)
|
||||||
return false, ErrTxPoolOverflow
|
return false, ErrTxPoolOverflow
|
||||||
}
|
}
|
||||||
|
// Bump the counter of rejections-since-reorg
|
||||||
|
pool.changesSinceReorg += len(drop)
|
||||||
// Kick out the underpriced remote transactions.
|
// Kick out the underpriced remote transactions.
|
||||||
for _, tx := range drop {
|
for _, tx := range drop {
|
||||||
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
|
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
|
||||||
@ -1114,6 +1140,9 @@ func (pool *TxPool) scheduleReorgLoop() {
|
|||||||
|
|
||||||
// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
|
// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
|
||||||
func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
|
func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
|
||||||
|
defer func(t0 time.Time) {
|
||||||
|
reorgDurationTimer.Update(time.Since(t0))
|
||||||
|
}(time.Now())
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
var promoteAddrs []common.Address
|
var promoteAddrs []common.Address
|
||||||
@ -1163,6 +1192,8 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt
|
|||||||
highestPending := list.LastElement()
|
highestPending := list.LastElement()
|
||||||
pool.pendingNonces.set(addr, highestPending.Nonce()+1)
|
pool.pendingNonces.set(addr, highestPending.Nonce()+1)
|
||||||
}
|
}
|
||||||
|
dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
|
||||||
|
pool.changesSinceReorg = 0 // Reset change counter
|
||||||
pool.mu.Unlock()
|
pool.mu.Unlock()
|
||||||
|
|
||||||
// Notify subsystems for newly added transactions
|
// Notify subsystems for newly added transactions
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -57,14 +58,14 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type testBlockChain struct {
|
type testBlockChain struct {
|
||||||
|
gasLimit uint64 // must be first field for 64 bit alignment (atomic access)
|
||||||
statedb *state.StateDB
|
statedb *state.StateDB
|
||||||
gasLimit uint64
|
|
||||||
chainHeadFeed *event.Feed
|
chainHeadFeed *event.Feed
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *testBlockChain) CurrentBlock() *types.Block {
|
func (bc *testBlockChain) CurrentBlock() *types.Block {
|
||||||
return types.NewBlock(&types.Header{
|
return types.NewBlock(&types.Header{
|
||||||
GasLimit: bc.gasLimit,
|
GasLimit: atomic.LoadUint64(&bc.gasLimit),
|
||||||
}, nil, nil, nil, trie.NewStackTrie(nil))
|
}, nil, nil, nil, trie.NewStackTrie(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,11 +119,13 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
|
|||||||
|
|
||||||
func setupTxPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) {
|
func setupTxPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) {
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
|
blockchain := &testBlockChain{10000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
pool := NewTxPool(testTxPoolConfig, config, blockchain)
|
pool := NewTxPool(testTxPoolConfig, config, blockchain)
|
||||||
|
|
||||||
|
// wait for the pool to initialize
|
||||||
|
<-pool.initDoneCh
|
||||||
return pool, key
|
return pool, key
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,7 +231,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) {
|
|||||||
|
|
||||||
// setup pool with 2 transaction in it
|
// setup pool with 2 transaction in it
|
||||||
statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether))
|
statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether))
|
||||||
blockchain := &testChain{&testBlockChain{statedb, 1000000000, new(event.Feed)}, address, &trigger}
|
blockchain := &testChain{&testBlockChain{1000000000, statedb, new(event.Feed)}, address, &trigger}
|
||||||
|
|
||||||
tx0 := transaction(0, 100000, key)
|
tx0 := transaction(0, 100000, key)
|
||||||
tx1 := transaction(1, 100000, key)
|
tx1 := transaction(1, 100000, key)
|
||||||
@ -426,7 +429,7 @@ func TestTransactionChainFork(t *testing.T) {
|
|||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
statedb.AddBalance(addr, big.NewInt(100000000000000))
|
statedb.AddBalance(addr, big.NewInt(100000000000000))
|
||||||
|
|
||||||
pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)}
|
pool.chain = &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
<-pool.requestReset(nil, nil)
|
<-pool.requestReset(nil, nil)
|
||||||
}
|
}
|
||||||
resetState()
|
resetState()
|
||||||
@ -455,7 +458,7 @@ func TestTransactionDoubleNonce(t *testing.T) {
|
|||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
statedb.AddBalance(addr, big.NewInt(100000000000000))
|
statedb.AddBalance(addr, big.NewInt(100000000000000))
|
||||||
|
|
||||||
pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)}
|
pool.chain = &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
<-pool.requestReset(nil, nil)
|
<-pool.requestReset(nil, nil)
|
||||||
}
|
}
|
||||||
resetState()
|
resetState()
|
||||||
@ -625,7 +628,7 @@ func TestTransactionDropping(t *testing.T) {
|
|||||||
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4)
|
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4)
|
||||||
}
|
}
|
||||||
// Reduce the block gas limit, check that invalidated transactions are dropped
|
// Reduce the block gas limit, check that invalidated transactions are dropped
|
||||||
pool.chain.(*testBlockChain).gasLimit = 100
|
atomic.StoreUint64(&pool.chain.(*testBlockChain).gasLimit, 100)
|
||||||
<-pool.requestReset(nil, nil)
|
<-pool.requestReset(nil, nil)
|
||||||
|
|
||||||
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
|
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
|
||||||
@ -653,7 +656,7 @@ func TestTransactionPostponing(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the postponing with
|
// Create the pool to test the postponing with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
||||||
defer pool.Stop()
|
defer pool.Stop()
|
||||||
@ -866,7 +869,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) {
|
|||||||
|
|
||||||
// Create the pool to test the limit enforcement with
|
// Create the pool to test the limit enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
config := testTxPoolConfig
|
config := testTxPoolConfig
|
||||||
config.NoLocals = nolocals
|
config.NoLocals = nolocals
|
||||||
@ -958,7 +961,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
|
|||||||
|
|
||||||
// Create the pool to test the non-expiration enforcement
|
// Create the pool to test the non-expiration enforcement
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
config := testTxPoolConfig
|
config := testTxPoolConfig
|
||||||
config.Lifetime = time.Second
|
config.Lifetime = time.Second
|
||||||
@ -1143,7 +1146,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the limit enforcement with
|
// Create the pool to test the limit enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
config := testTxPoolConfig
|
config := testTxPoolConfig
|
||||||
config.GlobalSlots = config.AccountSlots * 10
|
config.GlobalSlots = config.AccountSlots * 10
|
||||||
@ -1245,7 +1248,7 @@ func TestTransactionCapClearsFromAll(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the limit enforcement with
|
// Create the pool to test the limit enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
config := testTxPoolConfig
|
config := testTxPoolConfig
|
||||||
config.AccountSlots = 2
|
config.AccountSlots = 2
|
||||||
@ -1279,7 +1282,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the limit enforcement with
|
// Create the pool to test the limit enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
config := testTxPoolConfig
|
config := testTxPoolConfig
|
||||||
config.GlobalSlots = 1
|
config.GlobalSlots = 1
|
||||||
@ -1327,7 +1330,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the pricing enforcement with
|
// Create the pool to test the pricing enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
||||||
defer pool.Stop()
|
defer pool.Stop()
|
||||||
@ -1575,7 +1578,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the pricing enforcement with
|
// Create the pool to test the pricing enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
|
pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
|
||||||
defer pool.Stop()
|
defer pool.Stop()
|
||||||
@ -1648,7 +1651,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the pricing enforcement with
|
// Create the pool to test the pricing enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
config := testTxPoolConfig
|
config := testTxPoolConfig
|
||||||
config.GlobalSlots = 2
|
config.GlobalSlots = 2
|
||||||
@ -1754,7 +1757,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the pricing enforcement with
|
// Create the pool to test the pricing enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
config := testTxPoolConfig
|
config := testTxPoolConfig
|
||||||
config.GlobalSlots = 128
|
config.GlobalSlots = 128
|
||||||
@ -1946,20 +1949,20 @@ func TestDualHeapEviction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
add := func(urgent bool) {
|
add := func(urgent bool) {
|
||||||
txs := make([]*types.Transaction, 20)
|
for i := 0; i < 20; i++ {
|
||||||
for i := range txs {
|
var tx *types.Transaction
|
||||||
// Create a test accounts and fund it
|
// Create a test accounts and fund it
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000))
|
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000))
|
||||||
if urgent {
|
if urgent {
|
||||||
txs[i] = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key)
|
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key)
|
||||||
highTip = txs[i]
|
highTip = tx
|
||||||
} else {
|
} else {
|
||||||
txs[i] = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
|
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
|
||||||
highCap = txs[i]
|
highCap = tx
|
||||||
}
|
}
|
||||||
|
pool.AddRemotesSync([]*types.Transaction{tx})
|
||||||
}
|
}
|
||||||
pool.AddRemotes(txs)
|
|
||||||
pending, queued := pool.Stats()
|
pending, queued := pool.Stats()
|
||||||
if pending+queued != 20 {
|
if pending+queued != 20 {
|
||||||
t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 10)
|
t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 10)
|
||||||
@ -1986,7 +1989,7 @@ func TestTransactionDeduplication(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the pricing enforcement with
|
// Create the pool to test the pricing enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
||||||
defer pool.Stop()
|
defer pool.Stop()
|
||||||
@ -2052,7 +2055,7 @@ func TestTransactionReplacement(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the pricing enforcement with
|
// Create the pool to test the pricing enforcement with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
||||||
defer pool.Stop()
|
defer pool.Stop()
|
||||||
@ -2257,7 +2260,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
|
|||||||
|
|
||||||
// Create the original pool to inject transaction into the journal
|
// Create the original pool to inject transaction into the journal
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
config := testTxPoolConfig
|
config := testTxPoolConfig
|
||||||
config.NoLocals = nolocals
|
config.NoLocals = nolocals
|
||||||
@ -2299,7 +2302,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
|
|||||||
// Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive
|
// Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive
|
||||||
pool.Stop()
|
pool.Stop()
|
||||||
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
|
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
|
||||||
blockchain = &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain = &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
pool = NewTxPool(config, params.TestChainConfig, blockchain)
|
pool = NewTxPool(config, params.TestChainConfig, blockchain)
|
||||||
|
|
||||||
@ -2326,7 +2329,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
|
|||||||
pool.Stop()
|
pool.Stop()
|
||||||
|
|
||||||
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
|
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
|
||||||
blockchain = &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain = &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
pool = NewTxPool(config, params.TestChainConfig, blockchain)
|
pool = NewTxPool(config, params.TestChainConfig, blockchain)
|
||||||
|
|
||||||
pending, queued = pool.Stats()
|
pending, queued = pool.Stats()
|
||||||
@ -2355,7 +2358,7 @@ func TestTransactionStatusCheck(t *testing.T) {
|
|||||||
|
|
||||||
// Create the pool to test the status retrievals with
|
// Create the pool to test the status retrievals with
|
||||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
|
||||||
|
|
||||||
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
||||||
defer pool.Stop()
|
defer pool.Stop()
|
||||||
|
@ -273,9 +273,6 @@ func TestDeriveFields(t *testing.T) {
|
|||||||
if receipts[i].Logs[j].TxHash != txs[i].Hash() {
|
if receipts[i].Logs[j].TxHash != txs[i].Hash() {
|
||||||
t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
|
t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
|
||||||
}
|
}
|
||||||
if receipts[i].Logs[j].TxHash != txs[i].Hash() {
|
|
||||||
t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
|
|
||||||
}
|
|
||||||
if receipts[i].Logs[j].TxIndex != uint(i) {
|
if receipts[i].Logs[j].TxIndex != uint(i) {
|
||||||
t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
|
t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
|
||||||
}
|
}
|
||||||
|
32
core/types/state_account.go
Normal file
32
core/types/state_account.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StateAccount is the Ethereum consensus representation of accounts.
|
||||||
|
// These objects are stored in the main account trie.
|
||||||
|
type StateAccount struct {
|
||||||
|
Nonce uint64
|
||||||
|
Balance *big.Int
|
||||||
|
Root common.Hash // merkle root of the storage trie
|
||||||
|
CodeHash []byte
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user