all: bloom-filter based pruning mechanism (#21724)

* cmd, core, tests: initial state pruner

core: fix db inspector

cmd/geth: add verify-state

cmd/geth: add verification tool

core/rawdb: implement flatdb

cmd, core: fix rebase

core/state: use new contract code layout

core/state/pruner: avoid deleting genesis state

cmd/geth: add helper function

core, cmd: fix extract genesis

core: minor fixes

contracts: remove useless

core/state/snapshot: plugin stacktrie

core: polish

core/state/snapshot: iterate storage concurrently

core/state/snapshot: fix iteration

core: add comments

core/state/snapshot: polish code

core/state: polish

core/state/snapshot: rebase

core/rawdb: add comments

core/rawdb: fix tests

core/rawdb: improve tests

core/state/snapshot: fix concurrent iteration

core/state: run pruning during the recovery

core, trie: implement martin's idea

core, eth: delete flatdb and polish pruner

trie: fix import

core/state/pruner: add log

core/state/pruner: fix issues

core/state/pruner: don't read back

core/state/pruner: fix contract code write

core/state/pruner: check root node presence

cmd, core: polish log

core/state: use HEAD-127 as the target

core/state/snapshot: improve tests

cmd/geth: fix verification tool

cmd/geth: use HEAD as the verification default target

all: replace the bloomfilter with martin's fork

cmd, core: polish code

core, cmd: forcibly delete state root

core/state/pruner: add hash64

core/state/pruner: fix blacklist

core/state: remove blacklist

cmd, core: delete trie clean cache before pruning

cmd, core: fix lint

cmd, core: fix rebase

core/state: fix the special case for clique networks

core/state/snapshot: remove useless code

core/state/pruner: capping the snapshot after pruning

cmd, core, eth: fixes

core/rawdb: update db inspector

cmd/geth: polish code

core/state/pruner: fsync bloom filter

cmd, core: print warning log

core/state/pruner: adjust the parameters for bloom filter

cmd, core: create the bloom filter by size

core: polish

core/state/pruner: sanitize invalid bloomfilter size

cmd: address comments

cmd/geth: address comments

cmd/geth: address comment

core/state/pruner: address comments

core/state/pruner: rename homedir to datadir

cmd, core: address comments

core/state/pruner: address comment

core/state: address comments

core, cmd, tests: address comments

core: address comments

core/state/pruner: release the iterator after each commit

core/state/pruner: improve pruner

cmd, core: adjust bloom paramters

core/state/pruner: fix lint

core/state/pruner: fix tests

core: fix rebase

core/state/pruner: remove atomic rename

core/state/pruner: address comments

all: run go mod tidy

core/state/pruner: avoid false-positive for the middle state roots

core/state/pruner: add checks for middle roots

cmd/geth: replace crit with error

* core/state/pruner: fix lint

* core: drop legacy bloom filter

* core/state/snapshot: improve pruner

* core/state/snapshot: polish concurrent logs to report ETA vs. hashes

* core/state/pruner: add progress report for pruning and compaction too

* core: fix snapshot test API

* core/state: fix some pruning logs

* core/state/pruner: support recovering from bloom flush fail

Co-authored-by: Péter Szilágyi <peterke@gmail.com>
This commit is contained in:
gary rong 2021-02-08 19:16:30 +08:00 committed by GitHub
parent bbe694fc52
commit f566dd305e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1491 additions and 148 deletions

View File

@ -107,6 +107,7 @@ var (
utils.UltraLightFractionFlag, utils.UltraLightFractionFlag,
utils.UltraLightOnlyAnnounceFlag, utils.UltraLightOnlyAnnounceFlag,
utils.WhitelistFlag, utils.WhitelistFlag,
utils.BloomFilterSizeFlag,
utils.CacheFlag, utils.CacheFlag,
utils.CacheDatabaseFlag, utils.CacheDatabaseFlag,
utils.CacheTrieFlag, utils.CacheTrieFlag,
@ -255,6 +256,8 @@ func init() {
dumpConfigCommand, dumpConfigCommand,
// See cmd/utils/flags_legacy.go // See cmd/utils/flags_legacy.go
utils.ShowDeprecated, utils.ShowDeprecated,
// See snapshot.go
snapshotCommand,
} }
sort.Sort(cli.CommandsByName(app.Commands)) sort.Sort(cli.CommandsByName(app.Commands))

437
cmd/geth/snapshot.go Normal file
View File

@ -0,0 +1,437 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"errors"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/pruner"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
cli "gopkg.in/urfave/cli.v1"
)
var (
// emptyRoot is the known root hash of an empty trie.
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
// emptyCode is the known hash of the empty EVM bytecode.
emptyCode = crypto.Keccak256(nil)
)
var (
snapshotCommand = cli.Command{
Name: "snapshot",
Usage: "A set of commands based on the snapshot",
Category: "MISCELLANEOUS COMMANDS",
Description: "",
Subcommands: []cli.Command{
{
Name: "prune-state",
Usage: "Prune stale ethereum state data based on the snapshot",
ArgsUsage: "<root>",
Action: utils.MigrateFlags(pruneState),
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.LegacyTestnetFlag,
utils.CacheTrieJournalFlag,
utils.BloomFilterSizeFlag,
},
Description: `
geth snapshot prune-state <state-root>
will prune historical state data with the help of the state snapshot.
All trie nodes and contract codes that do not belong to the specified
version state will be deleted from the database. After pruning, only
two version states are available: genesis and the specific one.
The default pruning target is the HEAD-127 state.
WARNING: It's necessary to delete the trie clean cache after the pruning.
If you specify another directory for the trie clean cache via "--cache.trie.journal"
during the use of Geth, please also specify it here for correct deletion. Otherwise
the trie clean cache with default directory will be deleted.
`,
},
{
Name: "verify-state",
Usage: "Recalculate state hash based on the snapshot for verification",
ArgsUsage: "<root>",
Action: utils.MigrateFlags(verifyState),
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.LegacyTestnetFlag,
},
Description: `
geth snapshot verify-state <state-root>
will traverse the whole accounts and storages set based on the specified
snapshot and recalculate the root hash of state for verification.
In other words, this command does the snapshot to trie conversion.
`,
},
{
Name: "traverse-state",
Usage: "Traverse the state with given root hash for verification",
ArgsUsage: "<root>",
Action: utils.MigrateFlags(traverseState),
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.LegacyTestnetFlag,
},
Description: `
geth snapshot traverse-state <state-root>
will traverse the whole state from the given state root and will abort if any
referenced trie node or contract code is missing. This command can be used for
state integrity verification. The default checking target is the HEAD state.
It's also usable without snapshot enabled.
`,
},
{
Name: "traverse-rawstate",
Usage: "Traverse the state with given root hash for verification",
ArgsUsage: "<root>",
Action: utils.MigrateFlags(traverseRawState),
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.LegacyTestnetFlag,
},
Description: `
geth snapshot traverse-rawstate <state-root>
will traverse the whole state from the given root and will abort if any referenced
trie node or contract code is missing. This command can be used for state integrity
verification. The default checking target is the HEAD state. It's basically identical
to traverse-state, but the check granularity is smaller.
It's also usable without snapshot enabled.
`,
},
},
}
)
func pruneState(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
defer stack.Close()
chain, chaindb := utils.MakeChain(ctx, stack, true)
defer chaindb.Close()
pruner, err := pruner.NewPruner(chaindb, chain.CurrentBlock().Header(), stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name))
if err != nil {
log.Error("Failed to open snapshot tree", "error", err)
return err
}
if ctx.NArg() > 1 {
log.Error("Too many arguments given")
return errors.New("too many arguments")
}
var targetRoot common.Hash
if ctx.NArg() == 1 {
targetRoot, err = parseRoot(ctx.Args()[0])
if err != nil {
log.Error("Failed to resolve state root", "error", err)
return err
}
}
if err = pruner.Prune(targetRoot); err != nil {
log.Error("Failed to prune state", "error", err)
return err
}
return nil
}
func verifyState(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chain, chaindb := utils.MakeChain(ctx, stack, true)
defer chaindb.Close()
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, chain.CurrentBlock().Root(), false, false, false)
if err != nil {
log.Error("Failed to open snapshot tree", "error", err)
return err
}
if ctx.NArg() > 1 {
log.Error("Too many arguments given")
return errors.New("too many arguments")
}
var root = chain.CurrentBlock().Root()
if ctx.NArg() == 1 {
root, err = parseRoot(ctx.Args()[0])
if err != nil {
log.Error("Failed to resolve state root", "error", err)
return err
}
}
if err := snaptree.Verify(root); err != nil {
log.Error("Failed to verfiy state", "error", err)
return err
}
log.Info("Verified the state")
return nil
}
// traverseState is a helper function used for pruning verification.
// Basically it just iterates the trie, ensure all nodes and associated
// contract codes are present.
func traverseState(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chain, chaindb := utils.MakeChain(ctx, stack, true)
defer chaindb.Close()
if ctx.NArg() > 1 {
log.Error("Too many arguments given")
return errors.New("too many arguments")
}
// Use the HEAD root as the default
head := chain.CurrentBlock()
if head == nil {
log.Error("Head block is missing")
return errors.New("head block is missing")
}
var (
root common.Hash
err error
)
if ctx.NArg() == 1 {
root, err = parseRoot(ctx.Args()[0])
if err != nil {
log.Error("Failed to resolve state root", "error", err)
return err
}
log.Info("Start traversing the state", "root", root)
} else {
root = head.Root()
log.Info("Start traversing the state", "root", root, "number", head.NumberU64())
}
triedb := trie.NewDatabase(chaindb)
t, err := trie.NewSecure(root, triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "error", err)
return err
}
var (
accounts int
slots int
codes int
lastReport time.Time
start = time.Now()
)
accIter := trie.NewIterator(t.NodeIterator(nil))
for accIter.Next() {
accounts += 1
var acc state.Account
if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil {
log.Error("Invalid account encountered during traversal", "error", err)
return err
}
if acc.Root != emptyRoot {
storageTrie, err := trie.NewSecure(acc.Root, triedb)
if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "error", err)
return err
}
storageIter := trie.NewIterator(storageTrie.NodeIterator(nil))
for storageIter.Next() {
slots += 1
}
if storageIter.Err != nil {
log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIter.Err)
return storageIter.Err
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
if len(code) == 0 {
log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash))
return errors.New("missing code")
}
codes += 1
}
if time.Since(lastReport) > time.Second*8 {
log.Info("Traversing state", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
lastReport = time.Now()
}
}
if accIter.Err != nil {
log.Error("Failed to traverse state trie", "root", root, "error", accIter.Err)
return accIter.Err
}
log.Info("State is complete", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
}
// traverseRawState is a helper function used for pruning verification.
// Basically it just iterates the trie, ensure all nodes and associated
// contract codes are present. It's basically identical to traverseState
// but it will check each trie node.
func traverseRawState(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chain, chaindb := utils.MakeChain(ctx, stack, true)
defer chaindb.Close()
if ctx.NArg() > 1 {
log.Error("Too many arguments given")
return errors.New("too many arguments")
}
// Use the HEAD root as the default
head := chain.CurrentBlock()
if head == nil {
log.Error("Head block is missing")
return errors.New("head block is missing")
}
var (
root common.Hash
err error
)
if ctx.NArg() == 1 {
root, err = parseRoot(ctx.Args()[0])
if err != nil {
log.Error("Failed to resolve state root", "error", err)
return err
}
log.Info("Start traversing the state", "root", root)
} else {
root = head.Root()
log.Info("Start traversing the state", "root", root, "number", head.NumberU64())
}
triedb := trie.NewDatabase(chaindb)
t, err := trie.NewSecure(root, triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "error", err)
return err
}
var (
nodes int
accounts int
slots int
codes int
lastReport time.Time
start = time.Now()
)
accIter := t.NodeIterator(nil)
for accIter.Next(true) {
nodes += 1
node := accIter.Hash()
if node != (common.Hash{}) {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account")
}
}
// If it's a leaf node, yes we are touching an account,
// dig into the storage trie further.
if accIter.Leaf() {
accounts += 1
var acc state.Account
if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
log.Error("Invalid account encountered during traversal", "error", err)
return errors.New("invalid account")
}
if acc.Root != emptyRoot {
storageTrie, err := trie.NewSecure(acc.Root, triedb)
if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "error", err)
return errors.New("missing storage trie")
}
storageIter := storageTrie.NodeIterator(nil)
for storageIter.Next(true) {
nodes += 1
node := storageIter.Hash()
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
log.Error("Missing trie node(storage)", "hash", node)
return errors.New("missing storage")
}
}
// Bump the counter if it's leaf node.
if storageIter.Leaf() {
slots += 1
}
}
if storageIter.Error() != nil {
log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIter.Error())
return storageIter.Error()
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
if len(code) == 0 {
log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey()))
return errors.New("missing code")
}
codes += 1
}
if time.Since(lastReport) > time.Second*8 {
log.Info("Traversing state", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
lastReport = time.Now()
}
}
}
if accIter.Error() != nil {
log.Error("Failed to traverse state trie", "root", root, "error", accIter.Error())
return accIter.Error()
}
log.Info("State is complete", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
}
func parseRoot(input string) (common.Hash, error) {
var h common.Hash
if err := h.UnmarshalText([]byte(input)); err != nil {
return h, err
}
return h, nil
}

View File

@ -245,6 +245,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Name: "MISC", Name: "MISC",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.SnapshotFlag, utils.SnapshotFlag,
utils.BloomFilterSizeFlag,
cli.HelpFlag, cli.HelpFlag,
}, },
}, },

View File

@ -225,6 +225,11 @@ var (
Name: "whitelist", Name: "whitelist",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)", Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)",
} }
BloomFilterSizeFlag = cli.Uint64Flag{
Name: "bloomfilter.size",
Usage: "Megabytes of memory allocated to bloom-filter for pruning",
Value: 2048,
}
// Light server and client settings // Light server and client settings
LightServeFlag = cli.IntFlag{ LightServeFlag = cli.IntFlag{
Name: "light.serve", Name: "light.serve",

View File

@ -372,7 +372,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer) log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
recover = true recover = true
} }
bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, recover) bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
} }
// Take ownership of this particular state // Take ownership of this particular state
go bc.update() go bc.update()

View File

@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
@ -163,7 +162,7 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
} }
// Check the snapshot, ensure it's integrated // Check the snapshot, ensure it's integrated
if err := snapshot.VerifyState(chain.snaps, block.Root()); err != nil { if err := chain.snaps.Verify(block.Root()); err != nil {
t.Errorf("The disk layer is not integrated %v", err) t.Errorf("The disk layer is not integrated %v", err)
} }
} }

View File

@ -171,7 +171,6 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
} }
return genesis.Config, block.Hash(), nil return genesis.Config, block.Hash(), nil
} }
// We have the genesis block in database(perhaps in ancient database) // We have the genesis block in database(perhaps in ancient database)
// but the corresponding state is missing. // but the corresponding state is missing.
header := rawdb.ReadHeader(db, stored, 0) header := rawdb.ReadHeader(db, stored, 0)
@ -190,7 +189,6 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
} }
return genesis.Config, block.Hash(), nil return genesis.Config, block.Hash(), nil
} }
// Check whether the genesis block is already written. // Check whether the genesis block is already written.
if genesis != nil { if genesis != nil {
hash := genesis.ToBlock(nil).Hash() hash := genesis.ToBlock(nil).Hash()
@ -198,7 +196,6 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
return genesis.Config, hash, &GenesisMismatchError{stored, hash} return genesis.Config, hash, &GenesisMismatchError{stored, hash}
} }
} }
// Get the existing chain configuration. // Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored) newcfg := genesis.configOrDefault(stored)
if err := newcfg.CheckConfigForkOrder(); err != nil { if err := newcfg.CheckConfigForkOrder(); err != nil {
@ -216,7 +213,6 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
if genesis == nil && stored != params.MainnetGenesisHash { if genesis == nil && stored != params.MainnetGenesisHash {
return storedcfg, stored, nil return storedcfg, stored, nil
} }
// Check config compatibility and write the config. Compatibility errors // Check config compatibility and write the config. Compatibility errors
// are returned to the caller unless we're already at block zero. // are returned to the caller unless we're already at block zero.
height := rawdb.ReadHeaderNumber(db, rawdb.ReadHeadHeaderHash(db)) height := rawdb.ReadHeaderNumber(db, rawdb.ReadHeadHeaderHash(db))

View File

@ -335,7 +335,7 @@ func InspectDatabase(db ethdb.Database) error {
hashNumPairings.Add(size) hashNumPairings.Add(size)
case len(key) == common.HashLength: case len(key) == common.HashLength:
tries.Add(size) tries.Add(size)
case bytes.HasPrefix(key, codePrefix) && len(key) == len(codePrefix)+common.HashLength: case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
codes.Add(size) codes.Add(size)
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
txLookups.Add(size) txLookups.Add(size)
@ -347,15 +347,26 @@ func InspectDatabase(db ethdb.Database) error {
preimages.Add(size) preimages.Add(size)
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
bloomBits.Add(size) bloomBits.Add(size)
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
bloomBits.Add(size)
case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength: case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
cliqueSnaps.Add(size) cliqueSnaps.Add(size)
case bytes.HasPrefix(key, []byte("cht-")) && len(key) == 4+common.HashLength: case bytes.HasPrefix(key, []byte("cht-")) ||
bytes.HasPrefix(key, []byte("chtIndexV2-")) ||
bytes.HasPrefix(key, []byte("chtRootV2-")): // Canonical hash trie
chtTrieNodes.Add(size) chtTrieNodes.Add(size)
case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength: case bytes.HasPrefix(key, []byte("blt-")) ||
bytes.HasPrefix(key, []byte("bltIndex-")) ||
bytes.HasPrefix(key, []byte("bltRoot-")): // Bloomtrie sub
bloomTrieNodes.Add(size) bloomTrieNodes.Add(size)
default: default:
var accounted bool var accounted bool
for _, meta := range [][]byte{databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, fastTrieProgressKey, uncleanShutdownKey, badBlockKey} { for _, meta := range [][]byte{
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
fastTrieProgressKey, snapshotRootKey, snapshotJournalKey, snapshotGeneratorKey,
snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, uncleanShutdownKey,
badBlockKey,
} {
if bytes.Equal(key, meta) { if bytes.Equal(key, meta) {
metadata.Add(size) metadata.Add(size)
accounted = true accounted = true

View File

@ -85,7 +85,7 @@ var (
bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
codePrefix = []byte("c") // codePrefix + code hash -> account code CodePrefix = []byte("c") // CodePrefix + code hash -> account code
preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db configPrefix = []byte("ethereum-config-") // config prefix for the db
@ -209,16 +209,16 @@ func preimageKey(hash common.Hash) []byte {
return append(preimagePrefix, hash.Bytes()...) return append(preimagePrefix, hash.Bytes()...)
} }
// codeKey = codePrefix + hash // codeKey = CodePrefix + hash
func codeKey(hash common.Hash) []byte { func codeKey(hash common.Hash) []byte {
return append(codePrefix, hash.Bytes()...) return append(CodePrefix, hash.Bytes()...)
} }
// IsCodeKey reports whether the given byte slice is the key of contract code, // IsCodeKey reports whether the given byte slice is the key of contract code,
// if so return the raw code hash as well. // if so return the raw code hash as well.
func IsCodeKey(key []byte) (bool, []byte) { func IsCodeKey(key []byte) (bool, []byte) {
if bytes.HasPrefix(key, codePrefix) && len(key) == common.HashLength+len(codePrefix) { if bytes.HasPrefix(key, CodePrefix) && len(key) == common.HashLength+len(CodePrefix) {
return true, key[len(codePrefix):] return true, key[len(CodePrefix):]
} }
return false, nil return false, nil
} }

132
core/state/pruner/bloom.go Normal file
View File

@ -0,0 +1,132 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pruner
import (
"encoding/binary"
"errors"
"os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/log"
bloomfilter "github.com/holiman/bloomfilter/v2"
)
// stateBloomHasher is a wrapper around a byte blob to satisfy the interface API
// requirements of the bloom library used. It's used to convert a trie hash or
// contract code hash into a 64 bit mini hash.
type stateBloomHasher []byte
func (f stateBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
func (f stateBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
func (f stateBloomHasher) Reset() { panic("not implemented") }
func (f stateBloomHasher) BlockSize() int { panic("not implemented") }
func (f stateBloomHasher) Size() int { return 8 }
func (f stateBloomHasher) Sum64() uint64 { return binary.BigEndian.Uint64(f) }
// stateBloom is a bloom filter used during the state convesion(snapshot->state).
// The keys of all generated entries will be recorded here so that in the pruning
// stage the entries belong to the specific version can be avoided for deletion.
//
// The false-positive is allowed here. The "false-positive" entries means they
// actually don't belong to the specific version but they are not deleted in the
// pruning. The downside of the false-positive allowance is we may leave some "dangling"
// nodes in the disk. But in practice the it's very unlike the dangling node is
// state root. So in theory this pruned state shouldn't be visited anymore. Another
// potential issue is for fast sync. If we do another fast sync upon the pruned
// database, it's problematic which will stop the expansion during the syncing.
// TODO address it @rjl493456442 @holiman @karalabe.
//
// After the entire state is generated, the bloom filter should be persisted into
// the disk. It indicates the whole generation procedure is finished.
type stateBloom struct {
bloom *bloomfilter.Filter
}
// newStateBloomWithSize creates a brand new state bloom for state generation.
// The bloom filter will be created by the passing bloom filter size. According
// to the https://hur.st/bloomfilter/?n=600000000&p=&m=2048MB&k=4, the parameters
// are picked so that the false-positive rate for mainnet is low enough.
func newStateBloomWithSize(size uint64) (*stateBloom, error) {
bloom, err := bloomfilter.New(size*1024*1024*8, 4)
if err != nil {
return nil, err
}
log.Info("Initialized state bloom", "size", common.StorageSize(float64(bloom.M()/8)))
return &stateBloom{bloom: bloom}, nil
}
// NewStateBloomFromDisk loads the state bloom from the given file.
// In this case the assumption is held the bloom filter is complete.
func NewStateBloomFromDisk(filename string) (*stateBloom, error) {
bloom, _, err := bloomfilter.ReadFile(filename)
if err != nil {
return nil, err
}
return &stateBloom{bloom: bloom}, nil
}
// Commit flushes the bloom filter content into the disk and marks the bloom
// as complete.
func (bloom *stateBloom) Commit(filename, tempname string) error {
// Write the bloom out into a temporary file
_, err := bloom.bloom.WriteFile(tempname)
if err != nil {
return err
}
// Ensure the file is synced to disk
f, err := os.Open(tempname)
if err != nil {
return err
}
if err := f.Sync(); err != nil {
f.Close()
return err
}
f.Close()
// Move the teporary file into it's final location
return os.Rename(tempname, filename)
}
// Put implements the KeyValueWriter interface. But here only the key is needed.
func (bloom *stateBloom) Put(key []byte, value []byte) error {
// If the key length is not 32bytes, ensure it's contract code
// entry with new scheme.
if len(key) != common.HashLength {
isCode, codeKey := rawdb.IsCodeKey(key)
if !isCode {
return errors.New("invalid entry")
}
bloom.bloom.Add(stateBloomHasher(codeKey))
return nil
}
bloom.bloom.Add(stateBloomHasher(key))
return nil
}
// Delete removes the key from the key-value data store.
func (bloom *stateBloom) Delete(key []byte) error { panic("not supported") }
// Contain is the wrapper of the underlying contains function which
// reports whether the key is contained.
// - If it says yes, the key may be contained
// - If it says no, the key is definitely not contained.
func (bloom *stateBloom) Contain(key []byte) (bool, error) {
return bloom.bloom.Contains(stateBloomHasher(key)), nil
}

537
core/state/pruner/pruner.go Normal file
View File

@ -0,0 +1,537 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pruner
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math"
"os"
"path/filepath"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
const (
// stateBloomFilePrefix is the filename prefix of state bloom filter.
stateBloomFilePrefix = "statebloom"
// stateBloomFilePrefix is the filename suffix of state bloom filter.
stateBloomFileSuffix = "bf.gz"
// stateBloomFileTempSuffix is the filename suffix of state bloom filter
// while it is being written out to detect write aborts.
stateBloomFileTempSuffix = ".tmp"
// rangeCompactionThreshold is the minimal deleted entry number for
// triggering range compaction. It's a quite arbitrary number but just
// to avoid triggering range compaction because of small deletion.
rangeCompactionThreshold = 100000
)
var (
// emptyRoot is the known root hash of an empty trie.
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
// emptyCode is the known hash of the empty EVM bytecode.
emptyCode = crypto.Keccak256(nil)
)
// Pruner is an offline tool to prune the stale state with the
// help of the snapshot. The workflow of pruner is very simple:
//
// - iterate the snapshot, reconstruct the relevant state
// - iterate the database, delete all other state entries which
// don't belong to the target state and the genesis state
//
// It can take several hours(around 2 hours for mainnet) to finish
// the whole pruning work. It's recommended to run this offline tool
// periodically in order to release the disk usage and improve the
// disk read performance to some extent.
type Pruner struct {
db ethdb.Database
stateBloom *stateBloom
datadir string
trieCachePath string
headHeader *types.Header
snaptree *snapshot.Tree
}
// NewPruner creates the pruner instance.
func NewPruner(db ethdb.Database, headHeader *types.Header, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) {
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, false)
if err != nil {
return nil, err // The relevant snapshot(s) might not exist
}
// Sanitize the bloom filter size if it's too small.
if bloomSize < 256 {
log.Warn("Sanitizing bloomfilter size", "provided(MB)", bloomSize, "updated(MB)", 256)
bloomSize = 256
}
stateBloom, err := newStateBloomWithSize(bloomSize)
if err != nil {
return nil, err
}
return &Pruner{
db: db,
stateBloom: stateBloom,
datadir: datadir,
trieCachePath: trieCachePath,
headHeader: headHeader,
snaptree: snaptree,
}, nil
}
func prune(maindb ethdb.Database, stateBloom *stateBloom, middleStateRoots map[common.Hash]struct{}, start time.Time) error {
// Delete all stale trie nodes in the disk. With the help of state bloom
// the trie nodes(and codes) belong to the active state will be filtered
// out. A very small part of stale tries will also be filtered because of
// the false-positive rate of bloom filter. But the assumption is held here
// that the false-positive is low enough(~0.05%). The probablity of the
// dangling node is the state root is super low. So the dangling nodes in
// theory will never ever be visited again.
var (
count int
size common.StorageSize
pstart = time.Now()
logged = time.Now()
batch = maindb.NewBatch()
iter = maindb.NewIterator(nil, nil)
)
for iter.Next() {
key := iter.Key()
// All state entries don't belong to specific state and genesis are deleted here
// - trie node
// - legacy contract code
// - new-scheme contract code
isCode, codeKey := rawdb.IsCodeKey(key)
if len(key) == common.HashLength || isCode {
checkKey := key
if isCode {
checkKey = codeKey
}
if _, exist := middleStateRoots[common.BytesToHash(checkKey)]; exist {
log.Debug("Forcibly delete the middle state roots", "hash", common.BytesToHash(checkKey))
} else {
if ok, err := stateBloom.Contain(checkKey); err != nil {
return err
} else if ok {
continue
}
}
count += 1
size += common.StorageSize(len(key) + len(iter.Value()))
batch.Delete(key)
var eta time.Duration // Realistically will never remain uninited
if done := binary.BigEndian.Uint64(key[:8]); done > 0 {
var (
left = math.MaxUint64 - binary.BigEndian.Uint64(key[:8])
speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
)
eta = time.Duration(left/speed) * time.Millisecond
}
if time.Since(logged) > 8*time.Second {
log.Info("Pruning state data", "nodes", count, "size", size,
"elapsed", common.PrettyDuration(time.Since(pstart)), "eta", common.PrettyDuration(eta))
logged = time.Now()
}
// Recreate the iterator after every batch commit in order
// to allow the underlying compactor to delete the entries.
if batch.ValueSize() >= ethdb.IdealBatchSize {
batch.Write()
batch.Reset()
iter.Release()
iter = maindb.NewIterator(nil, key)
}
}
}
if batch.ValueSize() > 0 {
batch.Write()
batch.Reset()
}
iter.Release()
log.Info("Pruned state data", "nodes", count, "size", size, "elapsed", common.PrettyDuration(time.Since(pstart)))
// Start compactions, will remove the deleted data from the disk immediately.
// Note for small pruning, the compaction is skipped.
if count >= rangeCompactionThreshold {
cstart := time.Now()
for b := byte(0); b < byte(16); b++ {
log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", b, b+1), "elapsed", common.PrettyDuration(time.Since(cstart)))
if err := maindb.Compact([]byte{b}, []byte{b + 1}); err != nil {
log.Error("Database compaction failed", "error", err)
return err
}
}
log.Info("Database compaction finished", "elapsed", common.PrettyDuration(time.Since(cstart)))
}
log.Info("State pruning successful", "pruned", size, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
}
// Prune deletes all historical state nodes except the nodes belong to the
// specified state version. If user doesn't specify the state version, use
// the bottom-most snapshot diff layer as the target.
func (p *Pruner) Prune(root common.Hash) error {
// If the state bloom filter is already committed previously,
// reuse it for pruning instead of generating a new one. It's
// mandatory because a part of state may already be deleted,
// the recovery procedure is necessary.
_, stateBloomRoot, err := findBloomFilter(p.datadir)
if err != nil {
return err
}
if stateBloomRoot != (common.Hash{}) {
return RecoverPruning(p.datadir, p.db, p.trieCachePath)
}
// If the target state root is not specified, use the HEAD-127 as the
// target. The reason for picking it is:
// - in most of the normal cases, the related state is available
// - the probability of this layer being reorg is very low
var layers []snapshot.Snapshot
if root == (common.Hash{}) {
// Retrieve all snapshot layers from the current HEAD.
// In theory there are 128 difflayers + 1 disk layer present,
// so 128 diff layers are expected to be returned.
layers = p.snaptree.Snapshots(p.headHeader.Root, 128, true)
if len(layers) != 128 {
// Reject if the accumulated diff layers are less than 128. It
// means in most of normal cases, there is no associated state
// with bottom-most diff layer.
return errors.New("the snapshot difflayers are less than 128")
}
// Use the bottom-most diff layer as the target
root = layers[len(layers)-1].Root()
}
// Ensure the root is really present. The weak assumption
// is the presence of root can indicate the presence of the
// entire trie.
if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 {
// The special case is for clique based networks(rinkeby, goerli
// and some other private networks), it's possible that two
// consecutive blocks will have same root. In this case snapshot
// difflayer won't be created. So HEAD-127 may not paired with
// head-127 layer. Instead the paired layer is higher than the
// bottom-most diff layer. Try to find the bottom-most snapshot
// layer with state available.
//
// Note HEAD and HEAD-1 is ignored. Usually there is the associated
// state available, but we don't want to use the topmost state
// as the pruning target.
var found bool
for i := len(layers) - 2; i >= 2; i-- {
if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 {
root = layers[i].Root()
found = true
log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)
break
}
}
if !found {
if len(layers) > 0 {
return errors.New("no snapshot paired state")
}
return fmt.Errorf("associated state[%x] is not present", root)
}
} else {
if len(layers) > 0 {
log.Info("Selecting bottom-most difflayer as the pruning target", "root", root, "height", p.headHeader.Number.Uint64()-127)
} else {
log.Info("Selecting user-specified state as the pruning target", "root", root)
}
}
// Before start the pruning, delete the clean trie cache first.
// It's necessary otherwise in the next restart we will hit the
// deleted state root in the "clean cache" so that the incomplete
// state is picked for usage.
deleteCleanTrieCache(p.trieCachePath)
// All the state roots of the middle layer should be forcibly pruned,
// otherwise the dangling state will be left.
middleRoots := make(map[common.Hash]struct{})
for _, layer := range layers {
if layer.Root() == root {
break
}
middleRoots[layer.Root()] = struct{}{}
}
// Traverse the target state, re-construct the whole state trie and
// commit to the given bloom filter.
start := time.Now()
if err := snapshot.GenerateTrie(p.snaptree, root, p.db, p.stateBloom); err != nil {
return err
}
// Traverse the genesis, put all genesis state entries into the
// bloom filter too.
if err := extractGenesis(p.db, p.stateBloom); err != nil {
return err
}
filterName := bloomFilterName(p.datadir, root)
log.Info("Writing state bloom to disk", "name", filterName)
if err := p.stateBloom.Commit(filterName, filterName+stateBloomFileTempSuffix); err != nil {
return err
}
log.Info("State bloom filter committed", "name", filterName)
if err := prune(p.db, p.stateBloom, middleRoots, start); err != nil {
return err
}
// Pruning is done, now drop the "useless" layers from the snapshot.
// Firstly, flushing the target layer into the disk. After that all
// diff layers below the target will all be merged into the disk.
if err := p.snaptree.Cap(root, 0); err != nil {
return err
}
// Secondly, flushing the snapshot journal into the disk. All diff
// layers upon the target layer are dropped silently. Eventually the
// entire snapshot tree is converted into a single disk layer with
// the pruning target as the root.
if _, err := p.snaptree.Journal(root); err != nil {
return err
}
// Delete the state bloom, it marks the entire pruning procedure is
// finished. If any crashes or manual exit happens before this,
// `RecoverPruning` will pick it up in the next restarts to redo all
// the things.
os.RemoveAll(filterName)
return nil
}
// RecoverPruning will resume the pruning procedure during the system restart.
// This function is used in this case: user tries to prune state data, but the
// system was interrupted midway because of crash or manual-kill. In this case
// if the bloom filter for filtering active state is already constructed, the
// pruning can be resumed. What's more if the bloom filter is constructed, the
// pruning **has to be resumed**. Otherwise a lot of dangling nodes may be left
// in the disk.
func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) error {
stateBloomPath, stateBloomRoot, err := findBloomFilter(datadir)
if err != nil {
return err
}
if stateBloomPath == "" {
return nil // nothing to recover
}
headHeader, err := getHeadHeader(db)
if err != nil {
return err
}
// Initialize the snapshot tree in recovery mode to handle this special case:
// - Users run the `prune-state` command multiple times
// - Neither these `prune-state` running is finished(e.g. interrupted manually)
// - The state bloom filter is already generated, a part of state is deleted,
// so that resuming the pruning here is mandatory
// - The state HEAD is rewound already because of multiple incomplete `prune-state`
// In this case, even the state HEAD is not exactly matched with snapshot, it
// still feasible to recover the pruning correctly.
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, true)
if err != nil {
return err // The relevant snapshot(s) might not exist
}
stateBloom, err := NewStateBloomFromDisk(stateBloomPath)
if err != nil {
return err
}
log.Info("Loaded state bloom filter", "path", stateBloomPath)
// Before start the pruning, delete the clean trie cache first.
// It's necessary otherwise in the next restart we will hit the
// deleted state root in the "clean cache" so that the incomplete
// state is picked for usage.
deleteCleanTrieCache(trieCachePath)
// All the state roots of the middle layers should be forcibly pruned,
// otherwise the dangling state will be left.
var (
found bool
layers = snaptree.Snapshots(headHeader.Root, 128, true)
middleRoots = make(map[common.Hash]struct{})
)
for _, layer := range layers {
if layer.Root() == stateBloomRoot {
found = true
break
}
middleRoots[layer.Root()] = struct{}{}
}
if !found {
log.Error("Pruning target state is not existent")
return errors.New("non-existent target state")
}
if err := prune(db, stateBloom, middleRoots, time.Now()); err != nil {
return err
}
// Pruning is done, now drop the "useless" layers from the snapshot.
// Firstly, flushing the target layer into the disk. After that all
// diff layers below the target will all be merged into the disk.
if err := snaptree.Cap(stateBloomRoot, 0); err != nil {
return err
}
// Secondly, flushing the snapshot journal into the disk. All diff
// layers upon are dropped silently. Eventually the entire snapshot
// tree is converted into a single disk layer with the pruning target
// as the root.
if _, err := snaptree.Journal(stateBloomRoot); err != nil {
return err
}
// Delete the state bloom, it marks the entire pruning procedure is
// finished. If any crashes or manual exit happens before this,
// `RecoverPruning` will pick it up in the next restarts to redo all
// the things.
os.RemoveAll(stateBloomPath)
return nil
}
// extractGenesis loads the genesis state and commits all the state entries
// into the given bloomfilter.
func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
genesisHash := rawdb.ReadCanonicalHash(db, 0)
if genesisHash == (common.Hash{}) {
return errors.New("missing genesis hash")
}
genesis := rawdb.ReadBlock(db, genesisHash, 0)
if genesis == nil {
return errors.New("missing genesis block")
}
t, err := trie.NewSecure(genesis.Root(), trie.NewDatabase(db))
if err != nil {
return err
}
accIter := t.NodeIterator(nil)
for accIter.Next(true) {
hash := accIter.Hash()
// Embedded nodes don't have hash.
if hash != (common.Hash{}) {
stateBloom.Put(hash.Bytes(), nil)
}
// If it's a leaf node, yes we are touching an account,
// dig into the storage trie further.
if accIter.Leaf() {
var acc state.Account
if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
return err
}
if acc.Root != emptyRoot {
storageTrie, err := trie.NewSecure(acc.Root, trie.NewDatabase(db))
if err != nil {
return err
}
storageIter := storageTrie.NodeIterator(nil)
for storageIter.Next(true) {
hash := storageIter.Hash()
if hash != (common.Hash{}) {
stateBloom.Put(hash.Bytes(), nil)
}
}
if storageIter.Error() != nil {
return storageIter.Error()
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
stateBloom.Put(acc.CodeHash, nil)
}
}
}
return accIter.Error()
}
func bloomFilterName(datadir string, hash common.Hash) string {
return filepath.Join(datadir, fmt.Sprintf("%s.%s.%s", stateBloomFilePrefix, hash.Hex(), stateBloomFileSuffix))
}
func isBloomFilter(filename string) (bool, common.Hash) {
filename = filepath.Base(filename)
if strings.HasPrefix(filename, stateBloomFilePrefix) && strings.HasSuffix(filename, stateBloomFileSuffix) {
return true, common.HexToHash(filename[len(stateBloomFilePrefix)+1 : len(filename)-len(stateBloomFileSuffix)-1])
}
return false, common.Hash{}
}
func findBloomFilter(datadir string) (string, common.Hash, error) {
var (
stateBloomPath string
stateBloomRoot common.Hash
)
if err := filepath.Walk(datadir, func(path string, info os.FileInfo, err error) error {
if info != nil && !info.IsDir() {
ok, root := isBloomFilter(path)
if ok {
stateBloomPath = path
stateBloomRoot = root
}
}
return nil
}); err != nil {
return "", common.Hash{}, err
}
return stateBloomPath, stateBloomRoot, nil
}
func getHeadHeader(db ethdb.Database) (*types.Header, error) {
headHeaderHash := rawdb.ReadHeadBlockHash(db)
if headHeaderHash == (common.Hash{}) {
return nil, errors.New("empty head block hash")
}
headHeaderNumber := rawdb.ReadHeaderNumber(db, headHeaderHash)
if headHeaderNumber == nil {
return nil, errors.New("empty head block number")
}
headHeader := rawdb.ReadHeader(db, headHeaderHash, *headHeaderNumber)
if headHeader == nil {
return nil, errors.New("empty head header")
}
return headHeader, nil
}
const warningLog = `
WARNING!
The clean trie cache is not found. Please delete it by yourself after the
pruning. Remember don't start the Geth without deleting the clean trie cache
otherwise the entire database may be damaged!
Check the command description "geth snapshot prune-state --help" for more details.
`
func deleteCleanTrieCache(path string) {
if _, err := os.Stat(path); os.IsNotExist(err) {
log.Warn(warningLog)
return
}
os.RemoveAll(path)
log.Info("Deleted trie clean cache", "path", path)
}

View File

@ -18,12 +18,17 @@ package snapshot
import ( import (
"bytes" "bytes"
"encoding/binary"
"errors"
"fmt" "fmt"
"math"
"runtime"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
@ -38,46 +43,56 @@ type trieKV struct {
type ( type (
// trieGeneratorFn is the interface of trie generation which can // trieGeneratorFn is the interface of trie generation which can
// be implemented by different trie algorithm. // be implemented by different trie algorithm.
trieGeneratorFn func(in chan trieKV, out chan common.Hash) trieGeneratorFn func(db ethdb.KeyValueWriter, in chan (trieKV), out chan (common.Hash))
// leafCallbackFn is the callback invoked at the leaves of the trie, // leafCallbackFn is the callback invoked at the leaves of the trie,
// returns the subtrie root with the specified subtrie identifier. // returns the subtrie root with the specified subtrie identifier.
leafCallbackFn func(hash common.Hash, stat *generateStats) common.Hash leafCallbackFn func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error)
) )
// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash. // GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) { func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) {
return generateTrieRoot(it, common.Hash{}, stdGenerate, nil, &generateStats{start: time.Now()}, true) return generateTrieRoot(nil, it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true)
} }
// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash. // GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) { func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) {
return generateTrieRoot(it, account, stdGenerate, nil, &generateStats{start: time.Now()}, true) return generateTrieRoot(nil, it, account, stackTrieGenerate, nil, newGenerateStats(), true)
} }
// VerifyState takes the whole snapshot tree as the input, traverses all the accounts // GenerateTrie takes the whole snapshot tree as the input, traverses all the
// as well as the corresponding storages and compares the re-computed hash with the // accounts as well as the corresponding storages and regenerate the whole state
// original one(state root and the storage root). // (account trie + all storage tries).
func VerifyState(snaptree *Tree, root common.Hash) error { func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethdb.KeyValueWriter) error {
// Traverse all state by snapshot, re-generate the whole state trie
acctIt, err := snaptree.AccountIterator(root, common.Hash{}) acctIt, err := snaptree.AccountIterator(root, common.Hash{})
if err != nil { if err != nil {
return err return err // The required snapshot might not exist.
} }
defer acctIt.Release() defer acctIt.Release()
got, err := generateTrieRoot(acctIt, common.Hash{}, stdGenerate, func(account common.Hash, stat *generateStats) common.Hash { got, err := generateTrieRoot(dst, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
storageIt, err := snaptree.StorageIterator(root, account, common.Hash{}) // Migrate the code first, commit the contract code into the tmp db.
if codeHash != emptyCode {
code := rawdb.ReadCode(src, codeHash)
if len(code) == 0 {
return common.Hash{}, errors.New("failed to read contract code")
}
rawdb.WriteCode(dst, codeHash, code)
}
// Then migrate all storage trie nodes into the tmp db.
storageIt, err := snaptree.StorageIterator(root, accountHash, common.Hash{})
if err != nil { if err != nil {
return common.Hash{} return common.Hash{}, err
} }
defer storageIt.Release() defer storageIt.Release()
hash, err := generateTrieRoot(storageIt, account, stdGenerate, nil, stat, false) hash, err := generateTrieRoot(dst, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
if err != nil { if err != nil {
return common.Hash{} return common.Hash{}, err
} }
return hash return hash, nil
}, &generateStats{start: time.Now()}, true) }, newGenerateStats(), true)
if err != nil { if err != nil {
return err return err
@ -91,23 +106,64 @@ func VerifyState(snaptree *Tree, root common.Hash) error {
// generateStats is a collection of statistics gathered by the trie generator // generateStats is a collection of statistics gathered by the trie generator
// for logging purposes. // for logging purposes.
type generateStats struct { type generateStats struct {
accounts uint64 head common.Hash
slots uint64 start time.Time
curAccount common.Hash
curSlot common.Hash accounts uint64 // Number of accounts done (including those being crawled)
start time.Time slots uint64 // Number of storage slots done (including those being crawled)
lock sync.RWMutex
slotsStart map[common.Hash]time.Time // Start time for account slot crawling
slotsHead map[common.Hash]common.Hash // Slot head for accounts being crawled
lock sync.RWMutex
} }
// progress records the progress trie generator made recently. // newGenerateStats creates a new generator stats.
func (stat *generateStats) progress(accounts, slots uint64, curAccount common.Hash, curSlot common.Hash) { func newGenerateStats() *generateStats {
return &generateStats{
slotsStart: make(map[common.Hash]time.Time),
slotsHead: make(map[common.Hash]common.Hash),
start: time.Now(),
}
}
// progressAccounts updates the generator stats for the account range.
func (stat *generateStats) progressAccounts(account common.Hash, done uint64) {
stat.lock.Lock() stat.lock.Lock()
defer stat.lock.Unlock() defer stat.lock.Unlock()
stat.accounts += accounts stat.accounts += done
stat.slots += slots stat.head = account
stat.curAccount = curAccount }
stat.curSlot = curSlot
// finishAccounts updates the gemerator stats for the finished account range.
func (stat *generateStats) finishAccounts(done uint64) {
stat.lock.Lock()
defer stat.lock.Unlock()
stat.accounts += done
}
// progressContract updates the generator stats for a specific in-progress contract.
func (stat *generateStats) progressContract(account common.Hash, slot common.Hash, done uint64) {
stat.lock.Lock()
defer stat.lock.Unlock()
stat.slots += done
stat.slotsHead[account] = slot
if _, ok := stat.slotsStart[account]; !ok {
stat.slotsStart[account] = time.Now()
}
}
// finishContract updates the generator stats for a specific just-finished contract.
func (stat *generateStats) finishContract(account common.Hash, done uint64) {
stat.lock.Lock()
defer stat.lock.Unlock()
stat.slots += done
delete(stat.slotsHead, account)
delete(stat.slotsStart, account)
} }
// report prints the cumulative progress statistic smartly. // report prints the cumulative progress statistic smartly.
@ -115,22 +171,39 @@ func (stat *generateStats) report() {
stat.lock.RLock() stat.lock.RLock()
defer stat.lock.RUnlock() defer stat.lock.RUnlock()
var ctx []interface{} ctx := []interface{}{
if stat.curSlot != (common.Hash{}) { "accounts", stat.accounts,
ctx = append(ctx, []interface{}{ "slots", stat.slots,
"in", stat.curAccount, "elapsed", common.PrettyDuration(time.Since(stat.start)),
"at", stat.curSlot,
}...)
} else {
ctx = append(ctx, []interface{}{"at", stat.curAccount}...)
} }
// Add the usual measurements if stat.accounts > 0 {
ctx = append(ctx, []interface{}{"accounts", stat.accounts}...) // If there's progress on the account trie, estimate the time to finish crawling it
if stat.slots != 0 { if done := binary.BigEndian.Uint64(stat.head[:8]) / stat.accounts; done > 0 {
ctx = append(ctx, []interface{}{"slots", stat.slots}...) var (
left = (math.MaxUint64 - binary.BigEndian.Uint64(stat.head[:8])) / stat.accounts
speed = done/uint64(time.Since(stat.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
eta = time.Duration(left/speed) * time.Millisecond
)
// If there are large contract crawls in progress, estimate their finish time
for acc, head := range stat.slotsHead {
start := stat.slotsStart[acc]
if done := binary.BigEndian.Uint64(head[:8]); done > 0 {
var (
left = math.MaxUint64 - binary.BigEndian.Uint64(head[:8])
speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
)
// Override the ETA if larger than the largest until now
if slotETA := time.Duration(left/speed) * time.Millisecond; eta < slotETA {
eta = slotETA
}
}
}
ctx = append(ctx, []interface{}{
"eta", common.PrettyDuration(eta),
}...)
}
} }
ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...) log.Info("Iterating state snapshot", ctx...)
log.Info("Generating trie hash from snapshot", ctx...)
} }
// reportDone prints the last log when the whole generation is finished. // reportDone prints the last log when the whole generation is finished.
@ -144,13 +217,32 @@ func (stat *generateStats) reportDone() {
ctx = append(ctx, []interface{}{"slots", stat.slots}...) ctx = append(ctx, []interface{}{"slots", stat.slots}...)
} }
ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...) ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...)
log.Info("Generated trie hash from snapshot", ctx...) log.Info("Iterated snapshot", ctx...)
}
// runReport periodically prints the progress information.
func runReport(stats *generateStats, stop chan bool) {
timer := time.NewTimer(0)
defer timer.Stop()
for {
select {
case <-timer.C:
stats.report()
timer.Reset(time.Second * 8)
case success := <-stop:
if success {
stats.reportDone()
}
return
}
}
} }
// generateTrieRoot generates the trie hash based on the snapshot iterator. // generateTrieRoot generates the trie hash based on the snapshot iterator.
// It can be used for generating account trie, storage trie or even the // It can be used for generating account trie, storage trie or even the
// whole state which connects the accounts and the corresponding storages. // whole state which connects the accounts and the corresponding storages.
func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) {
var ( var (
in = make(chan trieKV) // chan to pass leaves in = make(chan trieKV) // chan to pass leaves
out = make(chan common.Hash, 1) // chan to collect result out = make(chan common.Hash, 1) // chan to collect result
@ -161,46 +253,43 @@ func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGenerato
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
generatorFn(in, out) generatorFn(db, in, out)
}() }()
// Spin up a go-routine for progress logging // Spin up a go-routine for progress logging
if report && stats != nil { if report && stats != nil {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
runReport(stats, stoplog)
timer := time.NewTimer(0)
defer timer.Stop()
for {
select {
case <-timer.C:
stats.report()
timer.Reset(time.Second * 8)
case success := <-stoplog:
if success {
stats.reportDone()
}
return
}
}
}() }()
} }
// Create a semaphore to assign tasks and collect results through. We'll pre-
// fill it with nils, thus using the same channel for both limiting concurrent
// processing and gathering results.
threads := runtime.NumCPU()
results := make(chan error, threads)
for i := 0; i < threads; i++ {
results <- nil // fill the semaphore
}
// stop is a helper function to shutdown the background threads // stop is a helper function to shutdown the background threads
// and return the re-generated trie hash. // and return the re-generated trie hash.
stop := func(success bool) common.Hash { stop := func(fail error) (common.Hash, error) {
close(in) close(in)
result := <-out result := <-out
stoplog <- success for i := 0; i < threads; i++ {
if err := <-results; err != nil && fail == nil {
fail = err
}
}
stoplog <- fail == nil
wg.Wait() wg.Wait()
return result return result, fail
} }
var ( var (
logged = time.Now() logged = time.Now()
processed = uint64(0) processed = uint64(0)
leaf trieKV leaf trieKV
last common.Hash
) )
// Start to feed leaves // Start to feed leaves
for it.Next() { for it.Next() {
@ -212,26 +301,35 @@ func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGenerato
if leafCallback == nil { if leafCallback == nil {
fullData, err = FullAccountRLP(it.(AccountIterator).Account()) fullData, err = FullAccountRLP(it.(AccountIterator).Account())
if err != nil { if err != nil {
stop(false) return stop(err)
return common.Hash{}, err
} }
} else { } else {
// Wait until the semaphore allows us to continue, aborting if
// a sub-task failed
if err := <-results; err != nil {
results <- nil // stop will drain the results, add a noop back for this error we just consumed
return stop(err)
}
// Fetch the next account and process it concurrently
account, err := FullAccount(it.(AccountIterator).Account()) account, err := FullAccount(it.(AccountIterator).Account())
if err != nil { if err != nil {
stop(false) return stop(err)
return common.Hash{}, err
}
// Apply the leaf callback. Normally the callback is used to traverse
// the storage trie and re-generate the subtrie root.
subroot := leafCallback(it.Hash(), stats)
if !bytes.Equal(account.Root, subroot.Bytes()) {
stop(false)
return common.Hash{}, fmt.Errorf("invalid subroot(%x), want %x, got %x", it.Hash(), account.Root, subroot)
} }
go func(hash common.Hash) {
subroot, err := leafCallback(db, hash, common.BytesToHash(account.CodeHash), stats)
if err != nil {
results <- err
return
}
if !bytes.Equal(account.Root, subroot.Bytes()) {
results <- fmt.Errorf("invalid subroot(%x), want %x, got %x", it.Hash(), account.Root, subroot)
return
}
results <- nil
}(it.Hash())
fullData, err = rlp.EncodeToBytes(account) fullData, err = rlp.EncodeToBytes(account)
if err != nil { if err != nil {
stop(false) return stop(err)
return common.Hash{}, err
} }
} }
leaf = trieKV{it.Hash(), fullData} leaf = trieKV{it.Hash(), fullData}
@ -244,32 +342,34 @@ func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGenerato
processed++ processed++
if time.Since(logged) > 3*time.Second && stats != nil { if time.Since(logged) > 3*time.Second && stats != nil {
if account == (common.Hash{}) { if account == (common.Hash{}) {
stats.progress(processed, 0, it.Hash(), common.Hash{}) stats.progressAccounts(it.Hash(), processed)
} else { } else {
stats.progress(0, processed, account, it.Hash()) stats.progressContract(account, it.Hash(), processed)
} }
logged, processed = time.Now(), 0 logged, processed = time.Now(), 0
} }
last = it.Hash()
} }
// Commit the last part statistic. // Commit the last part statistic.
if processed > 0 && stats != nil { if processed > 0 && stats != nil {
if account == (common.Hash{}) { if account == (common.Hash{}) {
stats.progress(processed, 0, last, common.Hash{}) stats.finishAccounts(processed)
} else { } else {
stats.progress(0, processed, account, last) stats.finishContract(account, processed)
} }
} }
result := stop(true) return stop(nil)
return result, nil
} }
// stdGenerate is a very basic hexary trie builder which uses the same Trie func stackTrieGenerate(db ethdb.KeyValueWriter, in chan trieKV, out chan common.Hash) {
// as the rest of geth, with no enhancements or optimizations t := trie.NewStackTrie(db)
func stdGenerate(in chan trieKV, out chan common.Hash) {
t, _ := trie.New(common.Hash{}, trie.NewDatabase(memorydb.New()))
for leaf := range in { for leaf := range in {
t.TryUpdate(leaf.key[:], leaf.value) t.TryUpdate(leaf.key[:], leaf.value)
} }
out <- t.Hash() var root common.Hash
if db == nil {
root = t.Hash()
} else {
root, _ = t.Commit()
}
out <- root
} }

View File

@ -178,7 +178,7 @@ type Tree struct {
// store, on a background thread. If the memory layers from the journal is not // store, on a background thread. If the memory layers from the journal is not
// continuous with disk layer or the journal is missing, all diffs will be discarded // continuous with disk layer or the journal is missing, all diffs will be discarded
// iff it's in "recovery" mode, otherwise rebuild is mandatory. // iff it's in "recovery" mode, otherwise rebuild is mandatory.
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, recovery bool) *Tree { func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
// Create a new, empty snapshot tree // Create a new, empty snapshot tree
snap := &Tree{ snap := &Tree{
diskdb: diskdb, diskdb: diskdb,
@ -192,16 +192,19 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
// Attempt to load a previously persisted snapshot and rebuild one if failed // Attempt to load a previously persisted snapshot and rebuild one if failed
head, err := loadSnapshot(diskdb, triedb, cache, root, recovery) head, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
if err != nil { if err != nil {
log.Warn("Failed to load snapshot, regenerating", "err", err) if rebuild {
snap.Rebuild(root) log.Warn("Failed to load snapshot, regenerating", "err", err)
return snap snap.Rebuild(root)
return snap, nil
}
return nil, err // Bail out the error, don't rebuild automatically.
} }
// Existing snapshot loaded, seed all the layers // Existing snapshot loaded, seed all the layers
for head != nil { for head != nil {
snap.layers[head.Root()] = head snap.layers[head.Root()] = head
head = head.Parent() head = head.Parent()
} }
return snap return snap, nil
} }
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant // waitBuild blocks until the snapshot finishes rebuilding. This method is meant
@ -234,6 +237,39 @@ func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
return t.layers[blockRoot] return t.layers[blockRoot]
} }
// Snapshots returns all visited layers from the topmost layer with specific
// root and traverses downward. The layer amount is limited by the given number.
// If nodisk is set, then disk layer is excluded.
func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
t.lock.RLock()
defer t.lock.RUnlock()
if limits == 0 {
return nil
}
layer := t.layers[root]
if layer == nil {
return nil
}
var ret []Snapshot
for {
if _, isdisk := layer.(*diskLayer); isdisk && nodisk {
break
}
ret = append(ret, layer)
limits -= 1
if limits == 0 {
break
}
parent := layer.Parent()
if parent == nil {
break
}
layer = parent
}
return ret
}
// Update adds a new snapshot into the tree, if that can be linked to an existing // Update adds a new snapshot into the tree, if that can be linked to an existing
// old parent. It is disallowed to insert a disk layer (the origin of all). // old parent. It is disallowed to insert a disk layer (the origin of all).
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
@ -681,6 +717,38 @@ func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek commo
return newFastStorageIterator(t, root, account, seek) return newFastStorageIterator(t, root, account, seek)
} }
// Verify iterates the whole state(all the accounts as well as the corresponding storages)
// with the specific root and compares the re-computed hash with the original one.
func (t *Tree) Verify(root common.Hash) error {
acctIt, err := t.AccountIterator(root, common.Hash{})
if err != nil {
return err
}
defer acctIt.Release()
got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
storageIt, err := t.StorageIterator(root, accountHash, common.Hash{})
if err != nil {
return common.Hash{}, err
}
defer storageIt.Release()
hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
if err != nil {
return common.Hash{}, err
}
return hash, nil
}, newGenerateStats(), true)
if err != nil {
return err
}
if got != root {
return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
}
return nil
}
// disklayer is an internal helper function to return the disk layer. // disklayer is an internal helper function to return the disk layer.
// The lock of snapTree is assumed to be held already. // The lock of snapTree is assumed to be held already.
func (t *Tree) disklayer() *diskLayer { func (t *Tree) disklayer() *diskLayer {

View File

@ -17,6 +17,7 @@
package snapshot package snapshot
import ( import (
"encoding/binary"
"fmt" "fmt"
"math/big" "math/big"
"math/rand" "math/rand"
@ -369,3 +370,69 @@ func TestPostCapBasicDataAccess(t *testing.T) {
t.Error("expected error capping the disk layer, got none") t.Error("expected error capping the disk layer, got none")
} }
} }
// TestSnaphots tests the functionality for retrieveing the snapshot
// with given head root and the desired depth.
func TestSnaphots(t *testing.T) {
// setAccount is a helper to construct a random account entry and assign it to
// an account slot in a snapshot
setAccount := func(accKey string) map[common.Hash][]byte {
return map[common.Hash][]byte{
common.HexToHash(accKey): randomAccount(),
}
}
makeRoot := func(height uint64) common.Hash {
var buffer [8]byte
binary.BigEndian.PutUint64(buffer[:], height)
return common.BytesToHash(buffer[:])
}
// Create a starting base layer and a snapshot tree out of it
base := &diskLayer{
diskdb: rawdb.NewMemoryDatabase(),
root: common.HexToHash("0x01"),
cache: fastcache.New(1024 * 500),
}
snaps := &Tree{
layers: map[common.Hash]snapshot{
base.root: base,
},
}
// Construct the snapshots with 128 layers
var (
last = common.HexToHash("0x01")
head common.Hash
)
// Flush another 128 layers, one diff will be flatten into the parent.
for i := 0; i < 128; i++ {
head = makeRoot(uint64(i + 2))
snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil)
last = head
snaps.Cap(head, 128) // 129 layers(128 diffs + 1 disk) are allowed, 129th is the persistent layer
}
var cases = []struct {
headRoot common.Hash
limit int
nodisk bool
expected int
expectBottom common.Hash
}{
{head, 0, false, 0, common.Hash{}},
{head, 64, false, 64, makeRoot(127 + 2 - 63)},
{head, 128, false, 128, makeRoot(2)}, // All diff layers
{head, 129, true, 128, makeRoot(2)}, // All diff layers
{head, 129, false, 129, common.HexToHash("0x01")}, // All diff layers + disk layer
}
for _, c := range cases {
layers := snaps.Snapshots(c.headRoot, c.limit, c.nodisk)
if len(layers) != c.expected {
t.Fatalf("Returned snapshot layers are mismatched, want %v, got %v", c.expected, len(layers))
}
if len(layers) == 0 {
continue
}
bottommost := layers[len(layers)-1]
if bottommost.Root() != c.expectBottom {
t.Fatalf("Snapshot mismatch, want %v, get %v", c.expectBottom, bottommost.Root())
}
}
}

View File

@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/pruner"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
@ -131,6 +132,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
} }
log.Info("Initialised chain configuration", "config", chainConfig) log.Info("Initialised chain configuration", "config", chainConfig)
if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil {
log.Error("Failed to recover state", "error", err)
}
eth := &Ethereum{ eth := &Ethereum{
config: config, config: config,
chainDb: chainDb, chainDb: chainDb,

16
go.mod
View File

@ -3,10 +3,7 @@ module github.com/ethereum/go-ethereum
go 1.13 go 1.13
require ( require (
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
github.com/Azure/azure-storage-blob-go v0.7.0 github.com/Azure/azure-storage-blob-go v0.7.0
github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/VictoriaMetrics/fastcache v1.5.7 github.com/VictoriaMetrics/fastcache v1.5.7
github.com/aws/aws-sdk-go v1.25.48 github.com/aws/aws-sdk-go v1.25.48
github.com/btcsuite/btcd v0.20.1-beta github.com/btcsuite/btcd v0.20.1-beta
@ -15,15 +12,12 @@ require (
github.com/consensys/gurvy v0.3.8 github.com/consensys/gurvy v0.3.8
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea
github.com/dlclark/regexp2 v1.2.0 // indirect
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf
github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498 github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498
github.com/edsrzf/mmap-go v1.0.0 github.com/edsrzf/mmap-go v1.0.0
github.com/fatih/color v1.3.0 github.com/fatih/color v1.7.0
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect
github.com/go-stack/stack v1.8.0 github.com/go-stack/stack v1.8.0
github.com/golang/protobuf v1.4.3 github.com/golang/protobuf v1.4.3
github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3
@ -39,15 +33,13 @@ require (
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
github.com/julienschmidt/httprouter v1.2.0 github.com/julienschmidt/httprouter v1.2.0
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.0 github.com/mattn/go-colorable v0.1.0
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 github.com/prometheus/tsdb v0.7.1
github.com/rjeczalik/notify v0.9.1 github.com/rjeczalik/notify v0.9.1
github.com/rs/cors v1.7.0 github.com/rs/cors v1.7.0
github.com/shirou/gopsutil v2.20.5+incompatible github.com/shirou/gopsutil v2.20.5+incompatible
@ -56,12 +48,10 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/net v0.0.0-20200822124328-c89045814202 // indirect golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8
golang.org/x/text v0.3.3 golang.org/x/text v0.3.3
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6
gopkg.in/urfave/cli.v1 v1.20.0 gopkg.in/urfave/cli.v1 v1.20.0
gotest.tools v2.2.0+incompatible // indirect
) )

11
go.sum
View File

@ -109,6 +109,7 @@ github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
@ -253,8 +254,8 @@ github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZ
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw= github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw=
github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031 h1:HarGZ5h9HD9LgEg1yRVMXyfiw4wlXiLiYM2oMjeA/SE= github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031 h1:HarGZ5h9HD9LgEg1yRVMXyfiw4wlXiLiYM2oMjeA/SE=
github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo=
@ -263,6 +264,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8=
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
@ -275,12 +277,10 @@ github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJye
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@ -298,7 +298,6 @@ github.com/kilic/bls12-381 v0.0.0-20201226121925-69dacb279461/go.mod h1:vDTTHJON
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
@ -401,7 +400,6 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 h1:ZeU+auZj1iNzN8iVhff6M38Mfu73FQiJve/GEXYJBjE=
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
@ -440,13 +438,11 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57N
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@ -570,7 +566,6 @@ golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 h1:AvbQYmiaaaza3cW3QXRyPo5kYgpFIzOAfeAAN7m3qQ4=
golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -32,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -147,7 +146,7 @@ func (t *BlockTest) Run(snapshotter bool) error {
} }
// Cross-check the snapshot-to-hash against the trie hash // Cross-check the snapshot-to-hash against the trie hash
if snapshotter { if snapshotter {
if err := snapshot.VerifyState(chain.Snapshots(), chain.CurrentBlock().Root()); err != nil { if err := chain.Snapshots().Verify(chain.CurrentBlock().Root()); err != nil {
return err return err
} }
} }

View File

@ -236,7 +236,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
var snaps *snapshot.Tree var snaps *snapshot.Tree
if snapshotter { if snapshotter {
snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false, false) snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, root, false, true, false)
} }
statedb, _ = state.New(root, sdb, snaps) statedb, _ = state.New(root, sdb, snaps)
return snaps, statedb return snaps, statedb

View File

@ -35,7 +35,7 @@ var stPool = sync.Pool{
}, },
} }
func stackTrieFromPool(db ethdb.KeyValueStore) *StackTrie { func stackTrieFromPool(db ethdb.KeyValueWriter) *StackTrie {
st := stPool.Get().(*StackTrie) st := stPool.Get().(*StackTrie)
st.db = db st.db = db
return st return st
@ -50,24 +50,23 @@ func returnToPool(st *StackTrie) {
// in order. Once it determines that a subtree will no longer be inserted // in order. Once it determines that a subtree will no longer be inserted
// into, it will hash it and free up the memory it uses. // into, it will hash it and free up the memory it uses.
type StackTrie struct { type StackTrie struct {
nodeType uint8 // node type (as in branch, ext, leaf) nodeType uint8 // node type (as in branch, ext, leaf)
val []byte // value contained by this node if it's a leaf val []byte // value contained by this node if it's a leaf
key []byte // key chunk covered by this (full|ext) node key []byte // key chunk covered by this (full|ext) node
keyOffset int // offset of the key chunk inside a full key keyOffset int // offset of the key chunk inside a full key
children [16]*StackTrie // list of children (for fullnodes and exts) children [16]*StackTrie // list of children (for fullnodes and exts)
db ethdb.KeyValueWriter // Pointer to the commit db, can be nil
db ethdb.KeyValueStore // Pointer to the commit db, can be nil
} }
// NewStackTrie allocates and initializes an empty trie. // NewStackTrie allocates and initializes an empty trie.
func NewStackTrie(db ethdb.KeyValueStore) *StackTrie { func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie {
return &StackTrie{ return &StackTrie{
nodeType: emptyNode, nodeType: emptyNode,
db: db, db: db,
} }
} }
func newLeaf(ko int, key, val []byte, db ethdb.KeyValueStore) *StackTrie { func newLeaf(ko int, key, val []byte, db ethdb.KeyValueWriter) *StackTrie {
st := stackTrieFromPool(db) st := stackTrieFromPool(db)
st.nodeType = leafNode st.nodeType = leafNode
st.keyOffset = ko st.keyOffset = ko
@ -76,7 +75,7 @@ func newLeaf(ko int, key, val []byte, db ethdb.KeyValueStore) *StackTrie {
return st return st
} }
func newExt(ko int, key []byte, child *StackTrie, db ethdb.KeyValueStore) *StackTrie { func newExt(ko int, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie {
st := stackTrieFromPool(db) st := stackTrieFromPool(db)
st.nodeType = extNode st.nodeType = extNode
st.keyOffset = ko st.keyOffset = ko