package trie import ( "fmt" "io" "reflect" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" ) // rawNode is a simple binary blob used to differentiate between collapsed trie // nodes and already encoded RLP binary blobs (while at the same time store them // in the same cache fields). type rawNode []byte func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } func (n rawNode) EncodeRLP(w io.Writer) error { _, err := w.Write(n) return err } // rawFullNode represents only the useful data content of a full node, with the // caches and flags stripped out to minimize its data storage. This type honors // the same RLP encoding as the original parent. type rawFullNode [17]node func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } func (n rawFullNode) EncodeRLP(w io.Writer) error { eb := rlp.NewEncoderBuffer(w) n.encode(eb) return eb.Flush() } // rawShortNode represents only the useful data content of a short node, with the // caches and flags stripped out to minimize its data storage. This type honors // the same RLP encoding as the original parent. type rawShortNode struct { Key []byte Val node } func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } // cachedNode is all the information we know about a single cached trie node // in the memory database write layer. type cachedNode struct { node node // Cached collapsed trie node, or raw rlp data size uint16 // Byte size of the useful cached data parents uint32 // Number of live nodes referencing this one children map[common.Hash]uint16 // External children referenced by this node flushPrev common.Hash // Previous node in the flush-list flushNext common.Hash // Next node in the flush-list } // cachedNodeSize is the raw size of a cachedNode data structure without any // node data included. It's an approximate size, but should be a lot better // than not counting them. var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) // cachedNodeChildrenSize is the raw size of an initialized but empty external // reference map. const cachedNodeChildrenSize = 48 // rlp returns the raw rlp encoded blob of the cached trie node, either directly // from the cache, or by regenerating it from the collapsed node. func (n *cachedNode) rlp() []byte { if node, ok := n.node.(rawNode); ok { return node } return nodeToBytes(n.node) } // obj returns the decoded and expanded trie node, either directly from the cache, // or by regenerating it from the rlp encoded blob. func (n *cachedNode) obj(hash common.Hash) node { if node, ok := n.node.(rawNode); ok { // The raw-blob format nodes are loaded either from the // clean cache or the database, they are all in their own // copy and safe to use unsafe decoder. return mustDecodeNodeUnsafe(hash[:], node) } return expandNode(hash[:], n.node) } // forChilds invokes the callback for all the tracked children of this node, // both the implicit ones from inside the node as well as the explicit ones // from outside the node. func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { for child := range n.children { onChild(child) } if _, ok := n.node.(rawNode); !ok { forGatherChildren(n.node, onChild) } } // forGatherChildren traverses the node hierarchy of a collapsed storage node and // invokes the callback for all the hashnode children. func forGatherChildren(n node, onChild func(hash common.Hash)) { switch n := n.(type) { case *rawShortNode: forGatherChildren(n.Val, onChild) case rawFullNode: for i := 0; i < 16; i++ { forGatherChildren(n[i], onChild) } case hashNode: onChild(common.BytesToHash(n)) case valueNode, nil, rawNode: default: panic(fmt.Sprintf("unknown node type: %T", n)) } } // simplifyNode traverses the hierarchy of an expanded memory node and discards // all the internal caches, returning a node that only contains the raw data. func simplifyNode(n node) node { switch n := n.(type) { case *shortNode: // Short nodes discard the flags and cascade return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} case *fullNode: // Full nodes discard the flags and cascade node := rawFullNode(n.Children) for i := 0; i < len(node); i++ { if node[i] != nil { node[i] = simplifyNode(node[i]) } } return node case valueNode, hashNode, rawNode: return n default: panic(fmt.Sprintf("unknown node type: %T", n)) } } // expandNode traverses the node hierarchy of a collapsed storage node and converts // all fields and keys into expanded memory form. func expandNode(hash hashNode, n node) node { switch n := n.(type) { case *rawShortNode: // Short nodes need key and child expansion return &shortNode{ Key: compactToHex(n.Key), Val: expandNode(nil, n.Val), flags: nodeFlag{ hash: hash, }, } case rawFullNode: // Full nodes need child expansion node := &fullNode{ flags: nodeFlag{ hash: hash, }, } for i := 0; i < len(node.Children); i++ { if n[i] != nil { node.Children[i] = expandNode(nil, n[i]) } } return node case valueNode, hashNode: return n default: panic(fmt.Sprintf("unknown node type: %T", n)) } }