2018-02-14 12:49:11 +00:00
// Copyright 2018 The go-ethereum Authors
2018-02-05 16:40:32 +00:00
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
2019-04-05 10:09:28 +00:00
"errors"
2018-06-21 09:28:05 +00:00
"fmt"
"io"
2018-11-22 12:09:04 +00:00
"reflect"
2020-07-28 13:30:31 +00:00
"runtime"
2018-02-05 16:40:32 +00:00
"sync"
"time"
2019-11-25 08:58:15 +00:00
"github.com/VictoriaMetrics/fastcache"
2018-02-05 16:40:32 +00:00
"github.com/ethereum/go-ethereum/common"
2020-08-21 12:10:40 +00:00
"github.com/ethereum/go-ethereum/core/rawdb"
2022-08-04 08:03:20 +00:00
"github.com/ethereum/go-ethereum/core/types"
2018-02-05 16:40:32 +00:00
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
2018-06-04 07:47:43 +00:00
"github.com/ethereum/go-ethereum/metrics"
2018-06-21 09:28:05 +00:00
"github.com/ethereum/go-ethereum/rlp"
2018-06-04 07:47:43 +00:00
)
var (
2018-11-12 16:47:34 +00:00
memcacheCleanHitMeter = metrics . NewRegisteredMeter ( "trie/memcache/clean/hit" , nil )
memcacheCleanMissMeter = metrics . NewRegisteredMeter ( "trie/memcache/clean/miss" , nil )
memcacheCleanReadMeter = metrics . NewRegisteredMeter ( "trie/memcache/clean/read" , nil )
memcacheCleanWriteMeter = metrics . NewRegisteredMeter ( "trie/memcache/clean/write" , nil )
2019-12-02 10:14:44 +00:00
memcacheDirtyHitMeter = metrics . NewRegisteredMeter ( "trie/memcache/dirty/hit" , nil )
memcacheDirtyMissMeter = metrics . NewRegisteredMeter ( "trie/memcache/dirty/miss" , nil )
memcacheDirtyReadMeter = metrics . NewRegisteredMeter ( "trie/memcache/dirty/read" , nil )
memcacheDirtyWriteMeter = metrics . NewRegisteredMeter ( "trie/memcache/dirty/write" , nil )
2018-06-04 07:47:43 +00:00
memcacheFlushTimeTimer = metrics . NewRegisteredResettingTimer ( "trie/memcache/flush/time" , nil )
memcacheFlushNodesMeter = metrics . NewRegisteredMeter ( "trie/memcache/flush/nodes" , nil )
memcacheFlushSizeMeter = metrics . NewRegisteredMeter ( "trie/memcache/flush/size" , nil )
memcacheGCTimeTimer = metrics . NewRegisteredResettingTimer ( "trie/memcache/gc/time" , nil )
memcacheGCNodesMeter = metrics . NewRegisteredMeter ( "trie/memcache/gc/nodes" , nil )
memcacheGCSizeMeter = metrics . NewRegisteredMeter ( "trie/memcache/gc/size" , nil )
memcacheCommitTimeTimer = metrics . NewRegisteredResettingTimer ( "trie/memcache/commit/time" , nil )
memcacheCommitNodesMeter = metrics . NewRegisteredMeter ( "trie/memcache/commit/nodes" , nil )
memcacheCommitSizeMeter = metrics . NewRegisteredMeter ( "trie/memcache/commit/size" , nil )
2018-02-05 16:40:32 +00:00
)
// Database is an intermediate write layer between the trie data structures and
// the disk database. The aim is to accumulate trie writes in-memory and only
// periodically flush a couple tries to disk, garbage collecting the remainder.
2019-03-26 14:48:31 +00:00
//
// Note, the trie Database is **not** thread safe in its mutations, but it **is**
// thread safe in providing individual, independent node access. The rationale
// behind this split design is to provide read access to RPC handlers and sync
// servers even while the trie is executing expensive garbage collection.
2018-02-05 16:40:32 +00:00
type Database struct {
2018-09-24 12:57:49 +00:00
diskdb ethdb . KeyValueStore // Persistent storage for matured trie nodes
2018-02-05 16:40:32 +00:00
2019-11-25 08:58:15 +00:00
cleans * fastcache . Cache // GC friendly memory cache of clean node RLPs
2020-08-21 12:10:40 +00:00
dirties map [ common . Hash ] * cachedNode // Data and references relationships of dirty trie nodes
2018-11-12 16:47:34 +00:00
oldest common . Hash // Oldest tracked node, flush-list head
newest common . Hash // Newest tracked node, flush-list tail
2018-06-04 07:47:43 +00:00
2018-02-05 16:40:32 +00:00
gctime time . Duration // Time spent on garbage collection since last commit
gcnodes uint64 // Nodes garbage collected since last commit
gcsize common . StorageSize // Data storage garbage collected since last commit
2018-06-04 07:47:43 +00:00
flushtime time . Duration // Time spent on data flushing since last commit
flushnodes uint64 // Nodes flushed since last commit
flushsize common . StorageSize // Data storage flushed since last commit
2022-07-27 18:37:04 +00:00
dirtiesSize common . StorageSize // Storage size of the dirty node cache (exc. metadata)
childrenSize common . StorageSize // Storage size of the external children tracking
preimages * preimageStore // The store for caching preimages
2018-02-05 16:40:32 +00:00
lock sync . RWMutex
}
2018-06-21 09:28:05 +00:00
// rawNode is a simple binary blob used to differentiate between collapsed trie
// nodes and already encoded RLP binary blobs (while at the same time store them
// in the same cache fields).
type rawNode [ ] byte
2019-11-22 15:24:48 +00:00
func ( n rawNode ) cache ( ) ( hashNode , bool ) { panic ( "this should never end up in a live trie" ) }
func ( n rawNode ) fstring ( ind string ) string { panic ( "this should never end up in a live trie" ) }
2018-06-21 09:28:05 +00:00
2020-09-29 15:38:13 +00:00
func ( n rawNode ) EncodeRLP ( w io . Writer ) error {
2020-11-25 20:00:23 +00:00
_ , err := w . Write ( n )
2020-09-29 15:38:13 +00:00
return err
}
2018-06-21 09:28:05 +00:00
// rawFullNode represents only the useful data content of a full node, with the
// caches and flags stripped out to minimize its data storage. This type honors
// the same RLP encoding as the original parent.
type rawFullNode [ 17 ] node
2019-11-22 15:24:48 +00:00
func ( n rawFullNode ) cache ( ) ( hashNode , bool ) { panic ( "this should never end up in a live trie" ) }
func ( n rawFullNode ) fstring ( ind string ) string { panic ( "this should never end up in a live trie" ) }
2018-06-21 09:28:05 +00:00
func ( n rawFullNode ) EncodeRLP ( w io . Writer ) error {
2022-03-09 13:45:17 +00:00
eb := rlp . NewEncoderBuffer ( w )
n . encode ( eb )
return eb . Flush ( )
2018-06-21 09:28:05 +00:00
}
// rawShortNode represents only the useful data content of a short node, with the
// caches and flags stripped out to minimize its data storage. This type honors
// the same RLP encoding as the original parent.
type rawShortNode struct {
Key [ ] byte
Val node
}
2019-11-22 15:24:48 +00:00
func ( n rawShortNode ) cache ( ) ( hashNode , bool ) { panic ( "this should never end up in a live trie" ) }
func ( n rawShortNode ) fstring ( ind string ) string { panic ( "this should never end up in a live trie" ) }
2018-06-21 09:28:05 +00:00
2020-08-21 12:10:40 +00:00
// cachedNode is all the information we know about a single cached trie node
// in the memory database write layer.
2018-02-05 16:40:32 +00:00
type cachedNode struct {
2018-06-21 09:28:05 +00:00
node node // Cached collapsed trie node, or raw rlp data
size uint16 // Byte size of the useful cached data
2018-11-22 13:14:31 +00:00
parents uint32 // Number of live nodes referencing this one
2018-06-21 09:28:05 +00:00
children map [ common . Hash ] uint16 // External children referenced by this node
2018-06-04 07:47:43 +00:00
flushPrev common . Hash // Previous node in the flush-list
flushNext common . Hash // Next node in the flush-list
2018-02-05 16:40:32 +00:00
}
2018-11-22 12:09:04 +00:00
// cachedNodeSize is the raw size of a cachedNode data structure without any
// node data included. It's an approximate size, but should be a lot better
// than not counting them.
var cachedNodeSize = int ( reflect . TypeOf ( cachedNode { } ) . Size ( ) )
// cachedNodeChildrenSize is the raw size of an initialized but empty external
// reference map.
const cachedNodeChildrenSize = 48
2020-08-21 12:10:40 +00:00
// rlp returns the raw rlp encoded blob of the cached trie node, either directly
// from the cache, or by regenerating it from the collapsed node.
2018-06-21 09:28:05 +00:00
func ( n * cachedNode ) rlp ( ) [ ] byte {
if node , ok := n . node . ( rawNode ) ; ok {
return node
}
2022-03-09 13:45:17 +00:00
return nodeToBytes ( n . node )
2018-06-21 09:28:05 +00:00
}
// obj returns the decoded and expanded trie node, either directly from the cache,
// or by regenerating it from the rlp encoded blob.
2019-03-14 13:25:12 +00:00
func ( n * cachedNode ) obj ( hash common . Hash ) node {
2018-06-21 09:28:05 +00:00
if node , ok := n . node . ( rawNode ) ; ok {
2022-08-31 14:26:39 +00:00
// The raw-blob format nodes are loaded either from the
2022-08-18 22:39:47 +00:00
// clean cache or the database, they are all in their own
// copy and safe to use unsafe decoder.
return mustDecodeNodeUnsafe ( hash [ : ] , node )
2018-06-21 09:28:05 +00:00
}
2019-03-14 13:25:12 +00:00
return expandNode ( hash [ : ] , n . node )
2018-06-21 09:28:05 +00:00
}
2020-08-21 12:10:40 +00:00
// forChilds invokes the callback for all the tracked children of this node,
// both the implicit ones from inside the node as well as the explicit ones
// from outside the node.
2020-01-17 11:59:45 +00:00
func ( n * cachedNode ) forChilds ( onChild func ( hash common . Hash ) ) {
2018-06-21 09:28:05 +00:00
for child := range n . children {
2020-01-17 11:59:45 +00:00
onChild ( child )
2018-06-21 09:28:05 +00:00
}
if _ , ok := n . node . ( rawNode ) ; ! ok {
2020-01-17 11:59:45 +00:00
forGatherChildren ( n . node , onChild )
2018-06-21 09:28:05 +00:00
}
}
2020-01-17 11:59:45 +00:00
// forGatherChildren traverses the node hierarchy of a collapsed storage node and
// invokes the callback for all the hashnode children.
func forGatherChildren ( n node , onChild func ( hash common . Hash ) ) {
2018-06-21 09:28:05 +00:00
switch n := n . ( type ) {
case * rawShortNode :
2020-01-17 11:59:45 +00:00
forGatherChildren ( n . Val , onChild )
2018-06-21 09:28:05 +00:00
case rawFullNode :
for i := 0 ; i < 16 ; i ++ {
2020-01-17 11:59:45 +00:00
forGatherChildren ( n [ i ] , onChild )
2018-06-21 09:28:05 +00:00
}
case hashNode :
2020-01-17 11:59:45 +00:00
onChild ( common . BytesToHash ( n ) )
2020-09-29 15:38:13 +00:00
case valueNode , nil , rawNode :
2018-06-21 09:28:05 +00:00
default :
panic ( fmt . Sprintf ( "unknown node type: %T" , n ) )
}
}
// simplifyNode traverses the hierarchy of an expanded memory node and discards
// all the internal caches, returning a node that only contains the raw data.
func simplifyNode ( n node ) node {
switch n := n . ( type ) {
case * shortNode :
// Short nodes discard the flags and cascade
return & rawShortNode { Key : n . Key , Val : simplifyNode ( n . Val ) }
case * fullNode :
// Full nodes discard the flags and cascade
node := rawFullNode ( n . Children )
for i := 0 ; i < len ( node ) ; i ++ {
if node [ i ] != nil {
node [ i ] = simplifyNode ( node [ i ] )
}
}
return node
case valueNode , hashNode , rawNode :
return n
default :
panic ( fmt . Sprintf ( "unknown node type: %T" , n ) )
}
}
// expandNode traverses the node hierarchy of a collapsed storage node and converts
// all fields and keys into expanded memory form.
2019-03-14 13:25:12 +00:00
func expandNode ( hash hashNode , n node ) node {
2018-06-21 09:28:05 +00:00
switch n := n . ( type ) {
case * rawShortNode :
// Short nodes need key and child expansion
return & shortNode {
Key : compactToHex ( n . Key ) ,
2019-03-14 13:25:12 +00:00
Val : expandNode ( nil , n . Val ) ,
2018-06-21 09:28:05 +00:00
flags : nodeFlag {
hash : hash ,
} ,
}
case rawFullNode :
// Full nodes need child expansion
node := & fullNode {
flags : nodeFlag {
hash : hash ,
} ,
}
for i := 0 ; i < len ( node . Children ) ; i ++ {
if n [ i ] != nil {
2019-03-14 13:25:12 +00:00
node . Children [ i ] = expandNode ( nil , n [ i ] )
2018-06-21 09:28:05 +00:00
}
}
return node
case valueNode , hashNode :
return n
default :
panic ( fmt . Sprintf ( "unknown node type: %T" , n ) )
}
}
2020-11-18 09:51:33 +00:00
// Config defines all necessary options for database.
type Config struct {
Cache int // Memory allowance (MB) to use for caching trie nodes in memory
Journal string // Journal of clean cache to survive node restarts
Preimages bool // Flag whether the preimage of trie key is recorded
}
2018-02-05 16:40:32 +00:00
// NewDatabase creates a new trie database to store ephemeral trie content before
2018-11-12 16:47:34 +00:00
// its written out to disk or garbage collected. No read cache is created, so all
// data retrievals will hit the underlying disk database.
2018-09-24 12:57:49 +00:00
func NewDatabase ( diskdb ethdb . KeyValueStore ) * Database {
2020-11-18 09:51:33 +00:00
return NewDatabaseWithConfig ( diskdb , nil )
2018-11-12 16:47:34 +00:00
}
2020-11-18 09:51:33 +00:00
// NewDatabaseWithConfig creates a new trie database to store ephemeral trie content
2018-11-12 16:47:34 +00:00
// before its written out to disk or garbage collected. It also acts as a read cache
// for nodes loaded from disk.
2020-11-18 09:51:33 +00:00
func NewDatabaseWithConfig ( diskdb ethdb . KeyValueStore , config * Config ) * Database {
2019-11-25 08:58:15 +00:00
var cleans * fastcache . Cache
2020-11-18 09:51:33 +00:00
if config != nil && config . Cache > 0 {
if config . Journal == "" {
cleans = fastcache . New ( config . Cache * 1024 * 1024 )
2020-07-28 13:30:31 +00:00
} else {
2020-11-18 09:51:33 +00:00
cleans = fastcache . LoadFromFileOrNew ( config . Journal , config . Cache * 1024 * 1024 )
2020-07-28 13:30:31 +00:00
}
2018-11-12 16:47:34 +00:00
}
2022-07-27 18:37:04 +00:00
var preimage * preimageStore
if config != nil && config . Preimages {
preimage = newPreimageStore ( diskdb )
}
2020-11-18 09:51:33 +00:00
db := & Database {
2018-11-22 12:09:04 +00:00
diskdb : diskdb ,
cleans : cleans ,
dirties : map [ common . Hash ] * cachedNode { { } : {
children : make ( map [ common . Hash ] uint16 ) ,
} } ,
2022-07-27 18:37:04 +00:00
preimages : preimage ,
2020-11-18 09:51:33 +00:00
}
return db
2018-02-05 16:40:32 +00:00
}
// DiskDB retrieves the persistent storage backing the trie database.
2020-08-21 12:10:40 +00:00
func ( db * Database ) DiskDB ( ) ethdb . KeyValueStore {
2018-02-05 16:40:32 +00:00
return db . diskdb
}
2022-08-04 08:03:20 +00:00
// insert inserts a simplified trie node into the memory database.
2020-08-21 12:10:40 +00:00
// All nodes inserted by this function will be reference tracked
// and in theory should only used for **trie nodes** insertion.
2020-02-03 15:28:30 +00:00
func ( db * Database ) insert ( hash common . Hash , size int , node node ) {
2018-06-04 07:47:43 +00:00
// If the node's already cached, skip
2018-11-12 16:47:34 +00:00
if _ , ok := db . dirties [ hash ] ; ok {
2018-02-05 16:40:32 +00:00
return
}
2020-02-03 15:28:30 +00:00
memcacheDirtyWriteMeter . Mark ( int64 ( size ) )
2019-12-02 10:14:44 +00:00
2018-06-21 09:28:05 +00:00
// Create the cached entry for this node
entry := & cachedNode {
2022-08-04 08:03:20 +00:00
node : node ,
2020-02-03 15:28:30 +00:00
size : uint16 ( size ) ,
2018-06-04 07:47:43 +00:00
flushPrev : db . newest ,
}
2020-01-17 11:59:45 +00:00
entry . forChilds ( func ( child common . Hash ) {
2018-11-12 16:47:34 +00:00
if c := db . dirties [ child ] ; c != nil {
2018-06-21 09:28:05 +00:00
c . parents ++
}
2020-01-17 11:59:45 +00:00
} )
2018-11-12 16:47:34 +00:00
db . dirties [ hash ] = entry
2018-06-21 09:28:05 +00:00
2018-06-04 07:47:43 +00:00
// Update the flush-list endpoints
if db . oldest == ( common . Hash { } ) {
db . oldest , db . newest = hash , hash
} else {
2018-11-12 16:47:34 +00:00
db . dirties [ db . newest ] . flushNext , db . newest = hash , hash
2018-02-05 16:40:32 +00:00
}
2018-11-12 16:47:34 +00:00
db . dirtiesSize += common . StorageSize ( common . HashLength + entry . size )
2018-02-05 16:40:32 +00:00
}
2018-06-21 09:28:05 +00:00
// node retrieves a cached trie node from memory, or returns nil if none can be
// found in the memory cache.
2019-03-14 13:25:12 +00:00
func ( db * Database ) node ( hash common . Hash ) node {
2018-11-12 16:47:34 +00:00
// Retrieve the node from the clean cache if available
if db . cleans != nil {
2019-11-25 08:58:15 +00:00
if enc := db . cleans . Get ( nil , hash [ : ] ) ; enc != nil {
2018-11-12 16:47:34 +00:00
memcacheCleanHitMeter . Mark ( 1 )
memcacheCleanReadMeter . Mark ( int64 ( len ( enc ) ) )
2022-08-18 22:39:47 +00:00
// The returned value from cache is in its own copy,
// safe to use mustDecodeNodeUnsafe for decoding.
return mustDecodeNodeUnsafe ( hash [ : ] , enc )
2018-11-12 16:47:34 +00:00
}
}
// Retrieve the node from the dirty cache if available
2018-06-21 09:28:05 +00:00
db . lock . RLock ( )
2018-11-12 16:47:34 +00:00
dirty := db . dirties [ hash ]
2018-06-21 09:28:05 +00:00
db . lock . RUnlock ( )
2018-11-12 16:47:34 +00:00
if dirty != nil {
2019-12-02 10:14:44 +00:00
memcacheDirtyHitMeter . Mark ( 1 )
memcacheDirtyReadMeter . Mark ( int64 ( dirty . size ) )
2019-03-14 13:25:12 +00:00
return dirty . obj ( hash )
2018-06-21 09:28:05 +00:00
}
2019-12-02 10:14:44 +00:00
memcacheDirtyMissMeter . Mark ( 1 )
2018-06-21 09:28:05 +00:00
// Content unavailable in memory, attempt to retrieve from disk
enc , err := db . diskdb . Get ( hash [ : ] )
if err != nil || enc == nil {
return nil
}
2018-11-12 16:47:34 +00:00
if db . cleans != nil {
2019-11-25 08:58:15 +00:00
db . cleans . Set ( hash [ : ] , enc )
2018-11-12 16:47:34 +00:00
memcacheCleanMissMeter . Mark ( 1 )
memcacheCleanWriteMeter . Mark ( int64 ( len ( enc ) ) )
}
2022-08-18 22:39:47 +00:00
// The returned value from database is in its own copy,
// safe to use mustDecodeNodeUnsafe for decoding.
return mustDecodeNodeUnsafe ( hash [ : ] , enc )
2018-06-21 09:28:05 +00:00
}
// Node retrieves an encoded cached trie node from memory. If it cannot be found
// cached, the method queries the persistent database for the content.
2018-02-05 16:40:32 +00:00
func ( db * Database ) Node ( hash common . Hash ) ( [ ] byte , error ) {
2020-05-25 08:21:28 +00:00
// It doesn't make sense to retrieve the metaroot
2019-04-05 10:09:28 +00:00
if hash == ( common . Hash { } ) {
return nil , errors . New ( "not found" )
}
2018-11-12 16:47:34 +00:00
// Retrieve the node from the clean cache if available
if db . cleans != nil {
2019-11-25 08:58:15 +00:00
if enc := db . cleans . Get ( nil , hash [ : ] ) ; enc != nil {
2018-11-12 16:47:34 +00:00
memcacheCleanHitMeter . Mark ( 1 )
memcacheCleanReadMeter . Mark ( int64 ( len ( enc ) ) )
return enc , nil
}
}
// Retrieve the node from the dirty cache if available
2018-02-05 16:40:32 +00:00
db . lock . RLock ( )
2018-11-12 16:47:34 +00:00
dirty := db . dirties [ hash ]
2018-02-05 16:40:32 +00:00
db . lock . RUnlock ( )
2018-11-12 16:47:34 +00:00
if dirty != nil {
2019-12-02 10:14:44 +00:00
memcacheDirtyHitMeter . Mark ( 1 )
memcacheDirtyReadMeter . Mark ( int64 ( dirty . size ) )
2018-11-12 16:47:34 +00:00
return dirty . rlp ( ) , nil
2018-02-05 16:40:32 +00:00
}
2019-12-02 10:14:44 +00:00
memcacheDirtyMissMeter . Mark ( 1 )
2018-02-05 16:40:32 +00:00
// Content unavailable in memory, attempt to retrieve from disk
2020-08-21 12:10:40 +00:00
enc := rawdb . ReadTrieNode ( db . diskdb , hash )
if len ( enc ) != 0 {
2018-11-12 16:47:34 +00:00
if db . cleans != nil {
2019-11-25 08:58:15 +00:00
db . cleans . Set ( hash [ : ] , enc )
2018-11-12 16:47:34 +00:00
memcacheCleanMissMeter . Mark ( 1 )
memcacheCleanWriteMeter . Mark ( int64 ( len ( enc ) ) )
}
2020-08-21 12:10:40 +00:00
return enc , nil
2018-11-12 16:47:34 +00:00
}
2020-08-21 12:10:40 +00:00
return nil , errors . New ( "not found" )
2018-02-05 16:40:32 +00:00
}
// Nodes retrieves the hashes of all the nodes cached within the memory database.
// This method is extremely expensive and should only be used to validate internal
// states in test code.
func ( db * Database ) Nodes ( ) [ ] common . Hash {
db . lock . RLock ( )
defer db . lock . RUnlock ( )
2018-11-12 16:47:34 +00:00
var hashes = make ( [ ] common . Hash , 0 , len ( db . dirties ) )
for hash := range db . dirties {
2018-02-05 16:40:32 +00:00
if hash != ( common . Hash { } ) { // Special case for "root" references/nodes
hashes = append ( hashes , hash )
}
}
return hashes
}
// Reference adds a new reference from a parent node to a child node.
2020-08-21 12:10:40 +00:00
// This function is used to add reference between internal trie node
// and external node(e.g. storage trie root), all internal trie nodes
// are referenced together by database itself.
2018-02-05 16:40:32 +00:00
func ( db * Database ) Reference ( child common . Hash , parent common . Hash ) {
2019-03-26 14:48:31 +00:00
db . lock . Lock ( )
defer db . lock . Unlock ( )
2018-02-05 16:40:32 +00:00
db . reference ( child , parent )
}
// reference is the private locked version of Reference.
func ( db * Database ) reference ( child common . Hash , parent common . Hash ) {
// If the node does not exist, it's a node pulled from disk, skip
2018-11-12 16:47:34 +00:00
node , ok := db . dirties [ child ]
2018-02-05 16:40:32 +00:00
if ! ok {
return
}
// If the reference already exists, only duplicate for roots
2018-11-12 16:47:34 +00:00
if db . dirties [ parent ] . children == nil {
db . dirties [ parent ] . children = make ( map [ common . Hash ] uint16 )
2018-11-22 12:09:04 +00:00
db . childrenSize += cachedNodeChildrenSize
2018-11-12 16:47:34 +00:00
} else if _ , ok = db . dirties [ parent ] . children [ child ] ; ok && parent != ( common . Hash { } ) {
2018-02-05 16:40:32 +00:00
return
}
node . parents ++
2018-11-12 16:47:34 +00:00
db . dirties [ parent ] . children [ child ] ++
2018-11-22 12:09:04 +00:00
if db . dirties [ parent ] . children [ child ] == 1 {
db . childrenSize += common . HashLength + 2 // uint16 counter
}
2018-02-05 16:40:32 +00:00
}
2018-06-21 09:28:05 +00:00
// Dereference removes an existing reference from a root node.
func ( db * Database ) Dereference ( root common . Hash ) {
2018-08-08 14:16:38 +00:00
// Sanity check to ensure that the meta-root is not removed
if root == ( common . Hash { } ) {
log . Error ( "Attempted to dereference the trie cache meta root" )
return
}
2018-02-05 16:40:32 +00:00
db . lock . Lock ( )
defer db . lock . Unlock ( )
2018-11-12 16:47:34 +00:00
nodes , storage , start := len ( db . dirties ) , db . dirtiesSize , time . Now ( )
2018-06-21 09:28:05 +00:00
db . dereference ( root , common . Hash { } )
2018-02-05 16:40:32 +00:00
2018-11-12 16:47:34 +00:00
db . gcnodes += uint64 ( nodes - len ( db . dirties ) )
db . gcsize += storage - db . dirtiesSize
2018-02-05 16:40:32 +00:00
db . gctime += time . Since ( start )
2018-06-04 07:47:43 +00:00
memcacheGCTimeTimer . Update ( time . Since ( start ) )
2018-11-12 16:47:34 +00:00
memcacheGCSizeMeter . Mark ( int64 ( storage - db . dirtiesSize ) )
memcacheGCNodesMeter . Mark ( int64 ( nodes - len ( db . dirties ) ) )
2018-06-04 07:47:43 +00:00
2018-11-12 16:47:34 +00:00
log . Debug ( "Dereferenced trie from memory database" , "nodes" , nodes - len ( db . dirties ) , "size" , storage - db . dirtiesSize , "time" , time . Since ( start ) ,
"gcnodes" , db . gcnodes , "gcsize" , db . gcsize , "gctime" , db . gctime , "livenodes" , len ( db . dirties ) , "livesize" , db . dirtiesSize )
2018-02-05 16:40:32 +00:00
}
// dereference is the private locked version of Dereference.
func ( db * Database ) dereference ( child common . Hash , parent common . Hash ) {
// Dereference the parent-child
2018-11-12 16:47:34 +00:00
node := db . dirties [ parent ]
2018-02-05 16:40:32 +00:00
2018-06-21 09:28:05 +00:00
if node . children != nil && node . children [ child ] > 0 {
node . children [ child ] --
if node . children [ child ] == 0 {
delete ( node . children , child )
2018-11-22 12:09:04 +00:00
db . childrenSize -= ( common . HashLength + 2 ) // uint16 counter
2018-06-21 09:28:05 +00:00
}
2018-02-05 16:40:32 +00:00
}
2018-06-04 07:47:43 +00:00
// If the child does not exist, it's a previously committed node.
2018-11-12 16:47:34 +00:00
node , ok := db . dirties [ child ]
2018-02-05 16:40:32 +00:00
if ! ok {
return
}
// If there are no more references to the child, delete it and cascade
2018-07-02 09:19:41 +00:00
if node . parents > 0 {
// This is a special cornercase where a node loaded from disk (i.e. not in the
// memcache any more) gets reinjected as a new node (short node split into full,
// then reverted into short), causing a cached node to have no parents. That is
// no problem in itself, but don't make maxint parents out of it.
node . parents --
}
2018-02-05 16:40:32 +00:00
if node . parents == 0 {
2018-06-04 07:47:43 +00:00
// Remove the node from the flush-list
2018-07-30 13:31:17 +00:00
switch child {
case db . oldest :
2018-06-04 07:47:43 +00:00
db . oldest = node . flushNext
2018-11-12 16:47:34 +00:00
db . dirties [ node . flushNext ] . flushPrev = common . Hash { }
2018-07-30 13:31:17 +00:00
case db . newest :
db . newest = node . flushPrev
2018-11-12 16:47:34 +00:00
db . dirties [ node . flushPrev ] . flushNext = common . Hash { }
2018-07-30 13:31:17 +00:00
default :
2018-11-12 16:47:34 +00:00
db . dirties [ node . flushPrev ] . flushNext = node . flushNext
db . dirties [ node . flushNext ] . flushPrev = node . flushPrev
2018-06-04 07:47:43 +00:00
}
// Dereference all children and delete the node
2020-01-17 11:59:45 +00:00
node . forChilds ( func ( hash common . Hash ) {
2018-02-05 16:40:32 +00:00
db . dereference ( hash , child )
2020-01-17 11:59:45 +00:00
} )
2018-11-12 16:47:34 +00:00
delete ( db . dirties , child )
db . dirtiesSize -= common . StorageSize ( common . HashLength + int ( node . size ) )
2018-11-22 12:09:04 +00:00
if node . children != nil {
db . childrenSize -= cachedNodeChildrenSize
}
2018-02-05 16:40:32 +00:00
}
}
2018-06-04 07:47:43 +00:00
// Cap iteratively flushes old but still referenced trie nodes until the total
// memory usage goes below the given threshold.
2019-03-26 14:48:31 +00:00
//
// Note, this method is a non-synchronized mutator. It is unsafe to call this
// concurrently with other mutators.
2018-06-04 07:47:43 +00:00
func ( db * Database ) Cap ( limit common . StorageSize ) error {
// Create a database batch to flush persistent data out. It is important that
// outside code doesn't see an inconsistent state (referenced data removed from
// memory cache during commit but not yet in persistent storage). This is ensured
// by only uncaching existing data when the database write finalizes.
2018-11-12 16:47:34 +00:00
nodes , storage , start := len ( db . dirties ) , db . dirtiesSize , time . Now ( )
2018-06-04 07:47:43 +00:00
batch := db . diskdb . NewBatch ( )
2018-11-12 16:47:34 +00:00
// db.dirtiesSize only contains the useful data in the cache, but when reporting
2018-06-04 07:47:43 +00:00
// the total memory consumption, the maintenance metadata is also needed to be
2018-11-22 12:09:04 +00:00
// counted.
size := db . dirtiesSize + common . StorageSize ( ( len ( db . dirties ) - 1 ) * cachedNodeSize )
size += db . childrenSize - common . StorageSize ( len ( db . dirties [ common . Hash { } ] . children ) * ( common . HashLength + 2 ) )
2018-06-04 07:47:43 +00:00
// If the preimage cache got large enough, push to disk. If it's still small
// leave for later to deduplicate writes.
2022-07-27 18:37:04 +00:00
if db . preimages != nil {
db . preimages . commit ( false )
2018-06-04 07:47:43 +00:00
}
// Keep committing nodes from the flush-list until we're below allowance
oldest := db . oldest
for size > limit && oldest != ( common . Hash { } ) {
// Fetch the oldest referenced node and push into the batch
2018-11-12 16:47:34 +00:00
node := db . dirties [ oldest ]
2020-08-21 12:10:40 +00:00
rawdb . WriteTrieNode ( batch , oldest , node . rlp ( ) )
2018-06-04 07:47:43 +00:00
// If we exceeded the ideal batch size, commit and reset
if batch . ValueSize ( ) >= ethdb . IdealBatchSize {
if err := batch . Write ( ) ; err != nil {
log . Error ( "Failed to write flush list to disk" , "err" , err )
return err
}
batch . Reset ( )
}
// Iterate to the next flush item, or abort if the size cap was achieved. Size
2018-11-22 12:09:04 +00:00
// is the total size, including the useful cached data (hash -> blob), the
// cache item metadata, as well as external children mappings.
size -= common . StorageSize ( common . HashLength + int ( node . size ) + cachedNodeSize )
if node . children != nil {
size -= common . StorageSize ( cachedNodeChildrenSize + len ( node . children ) * ( common . HashLength + 2 ) )
}
2018-06-04 07:47:43 +00:00
oldest = node . flushNext
}
// Flush out any remainder data from the last batch
if err := batch . Write ( ) ; err != nil {
log . Error ( "Failed to write flush list to disk" , "err" , err )
return err
}
// Write successful, clear out the flushed data
db . lock . Lock ( )
defer db . lock . Unlock ( )
for db . oldest != oldest {
2018-11-12 16:47:34 +00:00
node := db . dirties [ db . oldest ]
delete ( db . dirties , db . oldest )
2018-06-04 07:47:43 +00:00
db . oldest = node . flushNext
2018-11-12 16:47:34 +00:00
db . dirtiesSize -= common . StorageSize ( common . HashLength + int ( node . size ) )
2018-11-22 12:09:04 +00:00
if node . children != nil {
db . childrenSize -= common . StorageSize ( cachedNodeChildrenSize + len ( node . children ) * ( common . HashLength + 2 ) )
}
2018-06-04 07:47:43 +00:00
}
if db . oldest != ( common . Hash { } ) {
2018-11-12 16:47:34 +00:00
db . dirties [ db . oldest ] . flushPrev = common . Hash { }
2018-06-04 07:47:43 +00:00
}
2018-11-12 16:47:34 +00:00
db . flushnodes += uint64 ( nodes - len ( db . dirties ) )
db . flushsize += storage - db . dirtiesSize
2018-06-04 07:47:43 +00:00
db . flushtime += time . Since ( start )
memcacheFlushTimeTimer . Update ( time . Since ( start ) )
2018-11-12 16:47:34 +00:00
memcacheFlushSizeMeter . Mark ( int64 ( storage - db . dirtiesSize ) )
memcacheFlushNodesMeter . Mark ( int64 ( nodes - len ( db . dirties ) ) )
2018-06-04 07:47:43 +00:00
2018-11-12 16:47:34 +00:00
log . Debug ( "Persisted nodes from memory database" , "nodes" , nodes - len ( db . dirties ) , "size" , storage - db . dirtiesSize , "time" , time . Since ( start ) ,
"flushnodes" , db . flushnodes , "flushsize" , db . flushsize , "flushtime" , db . flushtime , "livenodes" , len ( db . dirties ) , "livesize" , db . dirtiesSize )
2018-06-04 07:47:43 +00:00
return nil
}
2018-02-05 16:40:32 +00:00
// Commit iterates over all the children of a particular node, writes them out
2019-03-26 14:48:31 +00:00
// to disk, forcefully tearing down all references in both directions. As a side
// effect, all pre-images accumulated up to this point are also written.
2018-02-05 16:40:32 +00:00
//
2019-03-26 14:48:31 +00:00
// Note, this method is a non-synchronized mutator. It is unsafe to call this
// concurrently with other mutators.
2020-07-13 09:02:54 +00:00
func ( db * Database ) Commit ( node common . Hash , report bool , callback func ( common . Hash ) ) error {
2018-02-05 16:40:32 +00:00
// Create a database batch to flush persistent data out. It is important that
// outside code doesn't see an inconsistent state (referenced data removed from
// memory cache during commit but not yet in persistent storage). This is ensured
// by only uncaching existing data when the database write finalizes.
start := time . Now ( )
batch := db . diskdb . NewBatch ( )
// Move all of the accumulated preimages into a write batch
2020-11-18 09:51:33 +00:00
if db . preimages != nil {
2022-07-27 18:37:04 +00:00
db . preimages . commit ( true )
2018-02-05 16:40:32 +00:00
}
// Move the trie itself into the batch, flushing if enough data is accumulated
2018-11-12 16:47:34 +00:00
nodes , storage := len ( db . dirties ) , db . dirtiesSize
2019-03-26 14:48:31 +00:00
uncacher := & cleaner { db }
2020-07-13 09:02:54 +00:00
if err := db . commit ( node , batch , uncacher , callback ) ; err != nil {
2018-02-05 16:40:32 +00:00
log . Error ( "Failed to commit trie from trie database" , "err" , err )
return err
}
2019-03-26 14:48:31 +00:00
// Trie mostly committed to disk, flush any batch leftovers
2018-02-05 16:40:32 +00:00
if err := batch . Write ( ) ; err != nil {
log . Error ( "Failed to write trie to disk" , "err" , err )
return err
}
2019-03-26 14:48:31 +00:00
// Uncache any leftovers in the last batch
2018-02-05 16:40:32 +00:00
db . lock . Lock ( )
defer db . lock . Unlock ( )
2019-03-26 14:48:31 +00:00
batch . Replay ( uncacher )
batch . Reset ( )
2021-01-07 06:36:21 +00:00
// Reset the storage counters and bumped metrics
2018-06-04 07:47:43 +00:00
memcacheCommitTimeTimer . Update ( time . Since ( start ) )
2018-11-12 16:47:34 +00:00
memcacheCommitSizeMeter . Mark ( int64 ( storage - db . dirtiesSize ) )
memcacheCommitNodesMeter . Mark ( int64 ( nodes - len ( db . dirties ) ) )
2018-06-04 07:47:43 +00:00
2018-02-05 16:40:32 +00:00
logger := log . Info
if ! report {
logger = log . Debug
}
2018-11-12 16:47:34 +00:00
logger ( "Persisted trie from memory database" , "nodes" , nodes - len ( db . dirties ) + int ( db . flushnodes ) , "size" , storage - db . dirtiesSize + db . flushsize , "time" , time . Since ( start ) + db . flushtime ,
"gcnodes" , db . gcnodes , "gcsize" , db . gcsize , "gctime" , db . gctime , "livenodes" , len ( db . dirties ) , "livesize" , db . dirtiesSize )
2018-02-05 16:40:32 +00:00
// Reset the garbage collection statistics
db . gcnodes , db . gcsize , db . gctime = 0 , 0 , 0
2018-06-04 07:47:43 +00:00
db . flushnodes , db . flushsize , db . flushtime = 0 , 0 , 0
2018-02-05 16:40:32 +00:00
return nil
}
// commit is the private locked version of Commit.
2020-07-13 09:02:54 +00:00
func ( db * Database ) commit ( hash common . Hash , batch ethdb . Batch , uncacher * cleaner , callback func ( common . Hash ) ) error {
2018-02-05 16:40:32 +00:00
// If the node does not exist, it's a previously committed node
2018-11-12 16:47:34 +00:00
node , ok := db . dirties [ hash ]
2018-02-05 16:40:32 +00:00
if ! ok {
return nil
}
2020-01-17 11:59:45 +00:00
var err error
node . forChilds ( func ( child common . Hash ) {
if err == nil {
2020-07-13 09:02:54 +00:00
err = db . commit ( child , batch , uncacher , callback )
2018-02-05 16:40:32 +00:00
}
2020-01-17 11:59:45 +00:00
} )
if err != nil {
return err
2018-02-05 16:40:32 +00:00
}
2020-08-21 12:10:40 +00:00
// If we've reached an optimal batch size, commit and start over
rawdb . WriteTrieNode ( batch , hash , node . rlp ( ) )
2020-07-13 09:02:54 +00:00
if callback != nil {
callback ( hash )
}
2018-02-05 16:40:32 +00:00
if batch . ValueSize ( ) >= ethdb . IdealBatchSize {
if err := batch . Write ( ) ; err != nil {
return err
}
2019-03-26 14:48:31 +00:00
db . lock . Lock ( )
batch . Replay ( uncacher )
2018-02-05 16:40:32 +00:00
batch . Reset ( )
2019-03-26 14:48:31 +00:00
db . lock . Unlock ( )
2018-02-05 16:40:32 +00:00
}
return nil
}
2019-03-26 14:48:31 +00:00
// cleaner is a database batch replayer that takes a batch of write operations
// and cleans up the trie database from anything written to disk.
type cleaner struct {
db * Database
}
// Put reacts to database writes and implements dirty data uncaching. This is the
// post-processing step of a commit operation where the already persisted trie is
// removed from the dirty cache and moved into the clean cache. The reason behind
2021-10-18 18:53:56 +00:00
// the two-phase commit is to ensure data availability while moving from memory
// to disk.
2019-03-26 14:48:31 +00:00
func ( c * cleaner ) Put ( key [ ] byte , rlp [ ] byte ) error {
hash := common . BytesToHash ( key )
2018-02-05 16:40:32 +00:00
// If the node does not exist, we're done on this path
2019-03-26 14:48:31 +00:00
node , ok := c . db . dirties [ hash ]
2018-02-05 16:40:32 +00:00
if ! ok {
2019-03-26 14:48:31 +00:00
return nil
2018-02-05 16:40:32 +00:00
}
2018-06-04 07:47:43 +00:00
// Node still exists, remove it from the flush-list
2018-07-30 13:31:17 +00:00
switch hash {
2019-03-26 14:48:31 +00:00
case c . db . oldest :
c . db . oldest = node . flushNext
c . db . dirties [ node . flushNext ] . flushPrev = common . Hash { }
case c . db . newest :
c . db . newest = node . flushPrev
c . db . dirties [ node . flushPrev ] . flushNext = common . Hash { }
2018-07-30 13:31:17 +00:00
default :
2019-03-26 14:48:31 +00:00
c . db . dirties [ node . flushPrev ] . flushNext = node . flushNext
c . db . dirties [ node . flushNext ] . flushPrev = node . flushPrev
2018-06-04 07:47:43 +00:00
}
2019-03-26 14:48:31 +00:00
// Remove the node from the dirty cache
delete ( c . db . dirties , hash )
c . db . dirtiesSize -= common . StorageSize ( common . HashLength + int ( node . size ) )
2018-11-22 12:09:04 +00:00
if node . children != nil {
2022-06-21 09:00:37 +00:00
c . db . childrenSize -= common . StorageSize ( cachedNodeChildrenSize + len ( node . children ) * ( common . HashLength + 2 ) )
2018-11-22 12:09:04 +00:00
}
2019-03-26 14:48:31 +00:00
// Move the flushed node into the clean cache to prevent insta-reloads
if c . db . cleans != nil {
2019-11-25 08:58:15 +00:00
c . db . cleans . Set ( hash [ : ] , rlp )
2019-12-02 10:14:44 +00:00
memcacheCleanWriteMeter . Mark ( int64 ( len ( rlp ) ) )
2018-02-05 16:40:32 +00:00
}
2019-03-26 14:48:31 +00:00
return nil
}
func ( c * cleaner ) Delete ( key [ ] byte ) error {
2019-11-22 15:24:48 +00:00
panic ( "not implemented" )
2018-02-05 16:40:32 +00:00
}
2022-08-04 08:03:20 +00:00
// Update inserts the dirty nodes in provided nodeset into database and
// link the account trie with multiple storage tries if necessary.
func ( db * Database ) Update ( nodes * MergedNodeSet ) error {
db . lock . Lock ( )
defer db . lock . Unlock ( )
// Insert dirty nodes into the database. In the same tree, it must be
// ensured that children are inserted first, then parent so that children
2022-08-23 18:17:12 +00:00
// can be linked with their parent correctly.
//
// Note, the storage tries must be flushed before the account trie to
// retain the invariant that children go into the dirty cache first.
var order [ ] common . Hash
for owner := range nodes . sets {
if owner == ( common . Hash { } ) {
continue
}
order = append ( order , owner )
}
if _ , ok := nodes . sets [ common . Hash { } ] ; ok {
order = append ( order , common . Hash { } )
}
for _ , owner := range order {
subset := nodes . sets [ owner ]
2022-08-04 08:03:20 +00:00
for _ , path := range subset . paths {
n , ok := subset . nodes [ path ]
if ! ok {
return fmt . Errorf ( "missing node %x %v" , owner , path )
}
db . insert ( n . hash , int ( n . size ) , n . node )
}
}
// Link up the account trie and storage trie if the node points
// to an account trie leaf.
if set , present := nodes . sets [ common . Hash { } ] ; present {
for _ , n := range set . leaves {
var account types . StateAccount
if err := rlp . DecodeBytes ( n . blob , & account ) ; err != nil {
return err
}
if account . Root != emptyRoot {
db . reference ( account . Root , n . parent )
}
}
}
return nil
}
2018-02-05 16:40:32 +00:00
// Size returns the current storage size of the memory cache in front of the
// persistent database layer.
2018-06-04 07:47:43 +00:00
func ( db * Database ) Size ( ) ( common . StorageSize , common . StorageSize ) {
2018-02-05 16:40:32 +00:00
db . lock . RLock ( )
defer db . lock . RUnlock ( )
2018-11-12 16:47:34 +00:00
// db.dirtiesSize only contains the useful data in the cache, but when reporting
2018-06-04 07:47:43 +00:00
// the total memory consumption, the maintenance metadata is also needed to be
2018-11-22 12:09:04 +00:00
// counted.
var metadataSize = common . StorageSize ( ( len ( db . dirties ) - 1 ) * cachedNodeSize )
var metarootRefs = common . StorageSize ( len ( db . dirties [ common . Hash { } ] . children ) * ( common . HashLength + 2 ) )
2022-07-27 18:37:04 +00:00
var preimageSize common . StorageSize
if db . preimages != nil {
preimageSize = db . preimages . size ( )
}
return db . dirtiesSize + db . childrenSize + metadataSize - metarootRefs , preimageSize
2018-02-05 16:40:32 +00:00
}
2020-07-28 13:30:31 +00:00
// saveCache saves clean state cache to given directory path
// using specified CPU cores.
func ( db * Database ) saveCache ( dir string , threads int ) error {
if db . cleans == nil {
return nil
}
log . Info ( "Writing clean trie cache to disk" , "path" , dir , "threads" , threads )
start := time . Now ( )
err := db . cleans . SaveToFileConcurrent ( dir , threads )
if err != nil {
log . Error ( "Failed to persist clean trie cache" , "error" , err )
return err
}
log . Info ( "Persisted the clean trie cache" , "path" , dir , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
}
// SaveCache atomically saves fast cache data to the given dir using all
// available CPU cores.
func ( db * Database ) SaveCache ( dir string ) error {
return db . saveCache ( dir , runtime . GOMAXPROCS ( 0 ) )
}
// SaveCachePeriodically atomically saves fast cache data to the given dir with
// the specified interval. All dump operation will only use a single CPU core.
func ( db * Database ) SaveCachePeriodically ( dir string , interval time . Duration , stopCh <- chan struct { } ) {
ticker := time . NewTicker ( interval )
defer ticker . Stop ( )
for {
select {
case <- ticker . C :
db . saveCache ( dir , 1 )
case <- stopCh :
return
}
}
}
2022-08-17 11:12:10 +00:00
// CommitPreimages flushes the dangling preimages to disk. It is meant to be
// called when closing the blockchain object, so that preimages are persisted
// to the database.
func ( db * Database ) CommitPreimages ( ) error {
db . lock . Lock ( )
defer db . lock . Unlock ( )
if db . preimages == nil {
return nil
}
return db . preimages . commit ( true )
}