This commit is contained in:
Roy Crihfield 2024-04-18 00:21:15 +08:00
parent 9248de596d
commit 53b6694af0
6 changed files with 1181 additions and 0 deletions

View File

@ -0,0 +1,62 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package testutil
import (
crand "crypto/rand"
"encoding/binary"
mrand "math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/cerc-io/ipld-eth-statedb/trie_by_cid/trie/trienode"
)
// Prng is a pseudo random number generator seeded by strong randomness.
// The randomness is printed on startup in order to make failures reproducible.
var prng = initRand()
func initRand() *mrand.Rand {
var seed [8]byte
crand.Read(seed[:])
rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:]))))
return rnd
}
// RandBytes generates a random byte slice with specified length.
func RandBytes(n int) []byte {
r := make([]byte, n)
prng.Read(r)
return r
}
// RandomHash generates a random blob of data and returns it as a hash.
func RandomHash() common.Hash {
return common.BytesToHash(RandBytes(common.HashLength))
}
// RandomAddress generates a random blob of data and returns it as an address.
func RandomAddress() common.Address {
return common.BytesToAddress(RandBytes(common.AddressLength))
}
// RandomNode generates a random node.
func RandomNode() *trienode.Node {
val := RandBytes(100)
return trienode.New(crypto.Keccak256Hash(val), val)
}

View File

@ -0,0 +1,199 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
package trienode
import (
"fmt"
"sort"
"strings"
"github.com/ethereum/go-ethereum/common"
)
// Node is a wrapper which contains the encoded blob of the trie node and its
// node hash. It is general enough that can be used to represent trie node
// corresponding to different trie implementations.
type Node struct {
Hash common.Hash // Node hash, empty for deleted node
Blob []byte // Encoded node blob, nil for the deleted node
}
// Size returns the total memory size used by this node.
func (n *Node) Size() int {
return len(n.Blob) + common.HashLength
}
// IsDeleted returns the indicator if the node is marked as deleted.
func (n *Node) IsDeleted() bool {
return len(n.Blob) == 0
}
// New constructs a node with provided node information.
func New(hash common.Hash, blob []byte) *Node {
return &Node{Hash: hash, Blob: blob}
}
// NewDeleted constructs a node which is deleted.
func NewDeleted() *Node { return New(common.Hash{}, nil) }
// leaf represents a trie leaf node
type leaf struct {
Blob []byte // raw blob of leaf
Parent common.Hash // the hash of parent node
}
// NodeSet contains a set of nodes collected during the commit operation.
// Each node is keyed by path. It's not thread-safe to use.
type NodeSet struct {
Owner common.Hash
Leaves []*leaf
Nodes map[string]*Node
updates int // the count of updated and inserted nodes
deletes int // the count of deleted nodes
}
// NewNodeSet initializes a node set. The owner is zero for the account trie and
// the owning account address hash for storage tries.
func NewNodeSet(owner common.Hash) *NodeSet {
return &NodeSet{
Owner: owner,
Nodes: make(map[string]*Node),
}
}
// ForEachWithOrder iterates the nodes with the order from bottom to top,
// right to left, nodes with the longest path will be iterated first.
func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) {
var paths []string
for path := range set.Nodes {
paths = append(paths, path)
}
// Bottom-up, the longest path first
sort.Sort(sort.Reverse(sort.StringSlice(paths)))
for _, path := range paths {
callback(path, set.Nodes[path])
}
}
// AddNode adds the provided node into set.
func (set *NodeSet) AddNode(path []byte, n *Node) {
if n.IsDeleted() {
set.deletes += 1
} else {
set.updates += 1
}
set.Nodes[string(path)] = n
}
// Merge adds a set of nodes into the set.
func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
if set.Owner != owner {
return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner)
}
for path, node := range nodes {
prev, ok := set.Nodes[path]
if ok {
// overwrite happens, revoke the counter
if prev.IsDeleted() {
set.deletes -= 1
} else {
set.updates -= 1
}
}
set.AddNode([]byte(path), node)
}
return nil
}
// AddLeaf adds the provided leaf node into set. TODO(rjl493456442) how can
// we get rid of it?
func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) {
set.Leaves = append(set.Leaves, &leaf{Blob: blob, Parent: parent})
}
// Size returns the number of dirty nodes in set.
func (set *NodeSet) Size() (int, int) {
return set.updates, set.deletes
}
// Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can
// we get rid of it?
func (set *NodeSet) Hashes() []common.Hash {
var ret []common.Hash
for _, node := range set.Nodes {
ret = append(ret, node.Hash)
}
return ret
}
// Summary returns a string-representation of the NodeSet.
func (set *NodeSet) Summary() string {
var out = new(strings.Builder)
fmt.Fprintf(out, "nodeset owner: %v\n", set.Owner)
if set.Nodes != nil {
for path, n := range set.Nodes {
// Deletion
if n.IsDeleted() {
fmt.Fprintf(out, " [-]: %x\n", path)
continue
}
// Insertion or update
fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash)
}
}
for _, n := range set.Leaves {
fmt.Fprintf(out, "[leaf]: %v\n", n)
}
return out.String()
}
// MergedNodeSet represents a merged node set for a group of tries.
type MergedNodeSet struct {
Sets map[common.Hash]*NodeSet
}
// NewMergedNodeSet initializes an empty merged set.
func NewMergedNodeSet() *MergedNodeSet {
return &MergedNodeSet{Sets: make(map[common.Hash]*NodeSet)}
}
// NewWithNodeSet constructs a merged nodeset with the provided single set.
func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
merged := NewMergedNodeSet()
merged.Merge(set)
return merged
}
// Merge merges the provided dirty nodes of a trie into the set. The assumption
// is held that no duplicated set belonging to the same trie will be merged twice.
func (set *MergedNodeSet) Merge(other *NodeSet) error {
subset, present := set.Sets[other.Owner]
if present {
return subset.Merge(other.Owner, other.Nodes)
}
set.Sets[other.Owner] = other
return nil
}
// Flatten returns a two-dimensional map for internal nodes.
func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node {
nodes := make(map[common.Hash]map[string]*Node)
for owner, set := range set.Sets {
nodes[owner] = set.Nodes
}
return nodes
}

View File

@ -0,0 +1,162 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trienode
import (
"errors"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
)
// ProofSet stores a set of trie nodes. It implements trie.Database and can also
// act as a cache for another trie.Database.
type ProofSet struct {
nodes map[string][]byte
order []string
dataSize int
lock sync.RWMutex
}
// NewProofSet creates an empty node set
func NewProofSet() *ProofSet {
return &ProofSet{
nodes: make(map[string][]byte),
}
}
// Put stores a new node in the set
func (db *ProofSet) Put(key []byte, value []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
if _, ok := db.nodes[string(key)]; ok {
return nil
}
keystr := string(key)
db.nodes[keystr] = common.CopyBytes(value)
db.order = append(db.order, keystr)
db.dataSize += len(value)
return nil
}
// Delete removes a node from the set
func (db *ProofSet) Delete(key []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
delete(db.nodes, string(key))
return nil
}
// Get returns a stored node
func (db *ProofSet) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
if entry, ok := db.nodes[string(key)]; ok {
return entry, nil
}
return nil, errors.New("not found")
}
// Has returns true if the node set contains the given key
func (db *ProofSet) Has(key []byte) (bool, error) {
_, err := db.Get(key)
return err == nil, nil
}
// KeyCount returns the number of nodes in the set
func (db *ProofSet) KeyCount() int {
db.lock.RLock()
defer db.lock.RUnlock()
return len(db.nodes)
}
// DataSize returns the aggregated data size of nodes in the set
func (db *ProofSet) DataSize() int {
db.lock.RLock()
defer db.lock.RUnlock()
return db.dataSize
}
// List converts the node set to a ProofList
func (db *ProofSet) List() ProofList {
db.lock.RLock()
defer db.lock.RUnlock()
var values ProofList
for _, key := range db.order {
values = append(values, db.nodes[key])
}
return values
}
// Store writes the contents of the set to the given database
func (db *ProofSet) Store(target ethdb.KeyValueWriter) {
db.lock.RLock()
defer db.lock.RUnlock()
for key, value := range db.nodes {
target.Put([]byte(key), value)
}
}
// ProofList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter.
type ProofList []rlp.RawValue
// Store writes the contents of the list to the given database
func (n ProofList) Store(db ethdb.KeyValueWriter) {
for _, node := range n {
db.Put(crypto.Keccak256(node), node)
}
}
// Set converts the node list to a ProofSet
func (n ProofList) Set() *ProofSet {
db := NewProofSet()
n.Store(db)
return db
}
// Put stores a new node at the end of the list
func (n *ProofList) Put(key []byte, value []byte) error {
*n = append(*n, value)
return nil
}
// Delete panics as there's no reason to remove a node from the list.
func (n *ProofList) Delete(key []byte) error {
panic("not supported")
}
// DataSize returns the aggregated data size of nodes in the list
func (n ProofList) DataSize() int {
var size int
for _, node := range n {
size += len(node)
}
return size
}

View File

@ -0,0 +1,277 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
package triestate
import (
"errors"
"fmt"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
"github.com/cerc-io/ipld-eth-statedb/trie_by_cid/trie/trienode"
)
// Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia
// tree or Verkle tree.
type Trie interface {
// Get returns the value for key stored in the trie.
Get(key []byte) ([]byte, error)
// Update associates key with value in the trie.
Update(key, value []byte) error
// Delete removes any existing value for key from the trie.
Delete(key []byte) error
// Commit the trie and returns a set of dirty nodes generated along with
// the new root hash.
Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
}
// TrieLoader wraps functions to load tries.
type TrieLoader interface {
// OpenTrie opens the main account trie.
OpenTrie(root common.Hash) (Trie, error)
// OpenStorageTrie opens the storage trie of an account.
OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
}
// Set represents a collection of mutated states during a state transition.
// The value refers to the original content of state before the transition
// is made. Nil means that the state was not present previously.
type Set struct {
Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion
size common.StorageSize // Approximate size of set
}
// New constructs the state set with provided data.
func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set {
return &Set{
Accounts: accounts,
Storages: storages,
Incomplete: incomplete,
}
}
// Size returns the approximate memory size occupied by the set.
func (s *Set) Size() common.StorageSize {
if s.size != 0 {
return s.size
}
for _, account := range s.Accounts {
s.size += common.StorageSize(common.AddressLength + len(account))
}
for _, slots := range s.Storages {
for _, val := range slots {
s.size += common.StorageSize(common.HashLength + len(val))
}
s.size += common.StorageSize(common.AddressLength)
}
s.size += common.StorageSize(common.AddressLength * len(s.Incomplete))
return s.size
}
// context wraps all fields for executing state diffs.
type context struct {
prevRoot common.Hash
postRoot common.Hash
accounts map[common.Address][]byte
storages map[common.Address]map[common.Hash][]byte
accountTrie Trie
nodes *trienode.MergedNodeSet
}
// Apply traverses the provided state diffs, apply them in the associated
// post-state and return the generated dirty trie nodes. The state can be
// loaded via the provided trie loader.
func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, loader TrieLoader) (map[common.Hash]map[string]*trienode.Node, error) {
tr, err := loader.OpenTrie(postRoot)
if err != nil {
return nil, err
}
ctx := &context{
prevRoot: prevRoot,
postRoot: postRoot,
accounts: accounts,
storages: storages,
accountTrie: tr,
nodes: trienode.NewMergedNodeSet(),
}
for addr, account := range accounts {
var err error
if len(account) == 0 {
err = deleteAccount(ctx, loader, addr)
} else {
err = updateAccount(ctx, loader, addr)
}
if err != nil {
return nil, fmt.Errorf("failed to revert state, err: %w", err)
}
}
root, result, err := tr.Commit(false)
if err != nil {
return nil, err
}
if root != prevRoot {
return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root)
}
if err := ctx.nodes.Merge(result); err != nil {
return nil, err
}
return ctx.nodes.Flatten(), nil
}
// updateAccount the account was present in prev-state, and may or may not
// existent in post-state. Apply the reverse diff and verify if the storage
// root matches the one in prev-state account.
func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error {
// The account was present in prev-state, decode it from the
// 'slim-rlp' format bytes.
h := newHasher()
defer h.release()
addrHash := h.hash(addr.Bytes())
prev, err := types.FullAccount(ctx.accounts[addr])
if err != nil {
return err
}
// The account may or may not existent in post-state, try to
// load it and decode if it's found.
blob, err := ctx.accountTrie.Get(addrHash.Bytes())
if err != nil {
return err
}
post := types.NewEmptyStateAccount()
if len(blob) != 0 {
if err := rlp.DecodeBytes(blob, &post); err != nil {
return err
}
}
// Apply all storage changes into the post-state storage trie.
st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
if err != nil {
return err
}
for key, val := range ctx.storages[addr] {
var err error
if len(val) == 0 {
err = st.Delete(key.Bytes())
} else {
err = st.Update(key.Bytes(), val)
}
if err != nil {
return err
}
}
root, result, err := st.Commit(false)
if err != nil {
return err
}
if root != prev.Root {
return errors.New("failed to reset storage trie")
}
// The returned set can be nil if storage trie is not changed
// at all.
if result != nil {
if err := ctx.nodes.Merge(result); err != nil {
return err
}
}
// Write the prev-state account into the main trie
full, err := rlp.EncodeToBytes(prev)
if err != nil {
return err
}
return ctx.accountTrie.Update(addrHash.Bytes(), full)
}
// deleteAccount the account was not present in prev-state, and is expected
// to be existent in post-state. Apply the reverse diff and verify if the
// account and storage is wiped out correctly.
func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error {
// The account must be existent in post-state, load the account.
h := newHasher()
defer h.release()
addrHash := h.hash(addr.Bytes())
blob, err := ctx.accountTrie.Get(addrHash.Bytes())
if err != nil {
return err
}
if len(blob) == 0 {
return fmt.Errorf("account is non-existent %#x", addrHash)
}
var post types.StateAccount
if err := rlp.DecodeBytes(blob, &post); err != nil {
return err
}
st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
if err != nil {
return err
}
for key, val := range ctx.storages[addr] {
if len(val) != 0 {
return errors.New("expect storage deletion")
}
if err := st.Delete(key.Bytes()); err != nil {
return err
}
}
root, result, err := st.Commit(false)
if err != nil {
return err
}
if root != types.EmptyRootHash {
return errors.New("failed to clear storage trie")
}
// The returned set can be nil if storage trie is not changed
// at all.
if result != nil {
if err := ctx.nodes.Merge(result); err != nil {
return err
}
}
// Delete the post-state account from the main trie.
return ctx.accountTrie.Delete(addrHash.Bytes())
}
// hasher is used to compute the sha256 hash of the provided data.
type hasher struct{ sha crypto.KeccakState }
var hasherPool = sync.Pool{
New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
}
func newHasher() *hasher {
return hasherPool.Get().(*hasher)
}
func (h *hasher) hash(data []byte) common.Hash {
return crypto.HashData(h.sha, data)
}
func (h *hasher) release() {
hasherPool.Put(h)
}

View File

@ -0,0 +1,342 @@
// Copyright 2023 go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"encoding/binary"
"sync"
"github.com/crate-crypto/go-ipa/bandersnatch/fr"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/metrics"
"github.com/gballet/go-verkle"
"github.com/holiman/uint256"
)
const (
// The spec of verkle key encoding can be found here.
// https://notes.ethereum.org/@vbuterin/verkle_tree_eip#Tree-embedding
VersionLeafKey = 0
BalanceLeafKey = 1
NonceLeafKey = 2
CodeKeccakLeafKey = 3
CodeSizeLeafKey = 4
)
var (
zero = uint256.NewInt(0)
verkleNodeWidthLog2 = 8
headerStorageOffset = uint256.NewInt(64)
mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(256), 31-uint(verkleNodeWidthLog2))
codeOffset = uint256.NewInt(128)
verkleNodeWidth = uint256.NewInt(256)
codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset)
index0Point *verkle.Point // pre-computed commitment of polynomial [2+256*64]
// cacheHitGauge is the metric to track how many cache hit occurred.
cacheHitGauge = metrics.NewRegisteredGauge("trie/verkle/cache/hit", nil)
// cacheMissGauge is the metric to track how many cache miss occurred.
cacheMissGauge = metrics.NewRegisteredGauge("trie/verkle/cache/miss", nil)
)
func init() {
// The byte array is the Marshalled output of the point computed as such:
//
// var (
// config = verkle.GetConfig()
// fr verkle.Fr
// )
// verkle.FromLEBytes(&fr, []byte{2, 64})
// point := config.CommitToPoly([]verkle.Fr{fr}, 1)
index0Point = new(verkle.Point)
err := index0Point.SetBytes([]byte{34, 25, 109, 242, 193, 5, 144, 224, 76, 52, 189, 92, 197, 126, 9, 145, 27, 152, 199, 130, 165, 3, 210, 27, 193, 131, 142, 28, 110, 26, 16, 191})
if err != nil {
panic(err)
}
}
// PointCache is the LRU cache for storing evaluated address commitment.
type PointCache struct {
lru lru.BasicLRU[string, *verkle.Point]
lock sync.RWMutex
}
// NewPointCache returns the cache with specified size.
func NewPointCache(maxItems int) *PointCache {
return &PointCache{
lru: lru.NewBasicLRU[string, *verkle.Point](maxItems),
}
}
// Get returns the cached commitment for the specified address, or computing
// it on the flight.
func (c *PointCache) Get(addr []byte) *verkle.Point {
c.lock.Lock()
defer c.lock.Unlock()
p, ok := c.lru.Get(string(addr))
if ok {
cacheHitGauge.Inc(1)
return p
}
cacheMissGauge.Inc(1)
p = evaluateAddressPoint(addr)
c.lru.Add(string(addr), p)
return p
}
// GetStem returns the first 31 bytes of the tree key as the tree stem. It only
// works for the account metadata whose treeIndex is 0.
func (c *PointCache) GetStem(addr []byte) []byte {
p := c.Get(addr)
return pointToHash(p, 0)[:31]
}
// GetTreeKey performs both the work of the spec's get_tree_key function, and that
// of pedersen_hash: it builds the polynomial in pedersen_hash without having to
// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte
// array. Since at most the first 5 coefficients of the polynomial will be non-zero,
// these 5 coefficients are created directly.
func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte {
if len(address) < 32 {
var aligned [32]byte
address = append(aligned[:32-len(address)], address...)
}
// poly = [2+256*64, address_le_low, address_le_high, tree_index_le_low, tree_index_le_high]
var poly [5]fr.Element
// 32-byte address, interpreted as two little endian
// 16-byte numbers.
verkle.FromLEBytes(&poly[1], address[:16])
verkle.FromLEBytes(&poly[2], address[16:])
// treeIndex must be interpreted as a 32-byte aligned little-endian integer.
// e.g: if treeIndex is 0xAABBCC, we need the byte representation to be 0xCCBBAA00...00.
// poly[3] = LE({CC,BB,AA,00...0}) (16 bytes), poly[4]=LE({00,00,...}) (16 bytes).
//
// To avoid unnecessary endianness conversions for go-ipa, we do some trick:
// - poly[3]'s byte representation is the same as the *top* 16 bytes (trieIndexBytes[16:]) of
// 32-byte aligned big-endian representation (BE({00,...,AA,BB,CC})).
// - poly[4]'s byte representation is the same as the *low* 16 bytes (trieIndexBytes[:16]) of
// the 32-byte aligned big-endian representation (BE({00,00,...}).
trieIndexBytes := treeIndex.Bytes32()
verkle.FromBytes(&poly[3], trieIndexBytes[16:])
verkle.FromBytes(&poly[4], trieIndexBytes[:16])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add a constant point corresponding to poly[0]=[2+256*64].
ret.Add(ret, index0Point)
return pointToHash(ret, subIndex)
}
// GetTreeKeyWithEvaluatedAddress is basically identical to GetTreeKey, the only
// difference is a part of polynomial is already evaluated.
//
// Specifically, poly = [2+256*64, address_le_low, address_le_high] is already
// evaluated.
func GetTreeKeyWithEvaluatedAddress(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte {
var poly [5]fr.Element
poly[0].SetZero()
poly[1].SetZero()
poly[2].SetZero()
// little-endian, 32-byte aligned treeIndex
var index [32]byte
for i := 0; i < len(treeIndex); i++ {
binary.LittleEndian.PutUint64(index[i*8:(i+1)*8], treeIndex[i])
}
verkle.FromLEBytes(&poly[3], index[:16])
verkle.FromLEBytes(&poly[4], index[16:])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add the pre-evaluated address
ret.Add(ret, evaluated)
return pointToHash(ret, subIndex)
}
// VersionKey returns the verkle tree key of the version field for the specified account.
func VersionKey(address []byte) []byte {
return GetTreeKey(address, zero, VersionLeafKey)
}
// BalanceKey returns the verkle tree key of the balance field for the specified account.
func BalanceKey(address []byte) []byte {
return GetTreeKey(address, zero, BalanceLeafKey)
}
// NonceKey returns the verkle tree key of the nonce field for the specified account.
func NonceKey(address []byte) []byte {
return GetTreeKey(address, zero, NonceLeafKey)
}
// CodeKeccakKey returns the verkle tree key of the code keccak field for
// the specified account.
func CodeKeccakKey(address []byte) []byte {
return GetTreeKey(address, zero, CodeKeccakLeafKey)
}
// CodeSizeKey returns the verkle tree key of the code size field for the
// specified account.
func CodeSizeKey(address []byte) []byte {
return GetTreeKey(address, zero, CodeSizeLeafKey)
}
func codeChunkIndex(chunk *uint256.Int) (*uint256.Int, byte) {
var (
chunkOffset = new(uint256.Int).Add(codeOffset, chunk)
treeIndex = new(uint256.Int).Div(chunkOffset, verkleNodeWidth)
subIndexMod = new(uint256.Int).Mod(chunkOffset, verkleNodeWidth)
)
var subIndex byte
if len(subIndexMod) != 0 {
subIndex = byte(subIndexMod[0])
}
return treeIndex, subIndex
}
// CodeChunkKey returns the verkle tree key of the code chunk for the
// specified account.
func CodeChunkKey(address []byte, chunk *uint256.Int) []byte {
treeIndex, subIndex := codeChunkIndex(chunk)
return GetTreeKey(address, treeIndex, subIndex)
}
func storageIndex(bytes []byte) (*uint256.Int, byte) {
// If the storage slot is in the header, we need to add the header offset.
var key uint256.Int
key.SetBytes(bytes)
if key.Cmp(codeStorageDelta) < 0 {
// This addition is always safe; it can't ever overflow since pos<codeStorageDelta.
key.Add(headerStorageOffset, &key)
// In this branch, the tree-index is zero since we're in the account header,
// and the sub-index is the LSB of the modified storage key.
return zero, byte(key[0] & 0xFF)
}
// We first divide by VerkleNodeWidth to create room to avoid an overflow next.
key.Rsh(&key, uint(verkleNodeWidthLog2))
// We add mainStorageOffset/VerkleNodeWidth which can't overflow.
key.Add(&key, mainStorageOffsetLshVerkleNodeWidth)
// The sub-index is the LSB of the original storage key, since mainStorageOffset
// doesn't affect this byte, so we can avoid masks or shifts.
return &key, byte(key[0] & 0xFF)
}
// StorageSlotKey returns the verkle tree key of the storage slot for the
// specified account.
func StorageSlotKey(address []byte, storageKey []byte) []byte {
treeIndex, subIndex := storageIndex(storageKey)
return GetTreeKey(address, treeIndex, subIndex)
}
// VersionKeyWithEvaluatedAddress returns the verkle tree key of the version
// field for the specified account. The difference between VersionKey is the
// address evaluation is already computed to minimize the computational overhead.
func VersionKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
return GetTreeKeyWithEvaluatedAddress(evaluated, zero, VersionLeafKey)
}
// BalanceKeyWithEvaluatedAddress returns the verkle tree key of the balance
// field for the specified account. The difference between BalanceKey is the
// address evaluation is already computed to minimize the computational overhead.
func BalanceKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
return GetTreeKeyWithEvaluatedAddress(evaluated, zero, BalanceLeafKey)
}
// NonceKeyWithEvaluatedAddress returns the verkle tree key of the nonce
// field for the specified account. The difference between NonceKey is the
// address evaluation is already computed to minimize the computational overhead.
func NonceKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
return GetTreeKeyWithEvaluatedAddress(evaluated, zero, NonceLeafKey)
}
// CodeKeccakKeyWithEvaluatedAddress returns the verkle tree key of the code
// keccak for the specified account. The difference between CodeKeccakKey is the
// address evaluation is already computed to minimize the computational overhead.
func CodeKeccakKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
return GetTreeKeyWithEvaluatedAddress(evaluated, zero, CodeKeccakLeafKey)
}
// CodeSizeKeyWithEvaluatedAddress returns the verkle tree key of the code
// size for the specified account. The difference between CodeSizeKey is the
// address evaluation is already computed to minimize the computational overhead.
func CodeSizeKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
return GetTreeKeyWithEvaluatedAddress(evaluated, zero, CodeSizeLeafKey)
}
// CodeChunkKeyWithEvaluatedAddress returns the verkle tree key of the code
// chunk for the specified account. The difference between CodeChunkKey is the
// address evaluation is already computed to minimize the computational overhead.
func CodeChunkKeyWithEvaluatedAddress(addressPoint *verkle.Point, chunk *uint256.Int) []byte {
treeIndex, subIndex := codeChunkIndex(chunk)
return GetTreeKeyWithEvaluatedAddress(addressPoint, treeIndex, subIndex)
}
// StorageSlotKeyWithEvaluatedAddress returns the verkle tree key of the storage
// slot for the specified account. The difference between StorageSlotKey is the
// address evaluation is already computed to minimize the computational overhead.
func StorageSlotKeyWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte {
treeIndex, subIndex := storageIndex(storageKey)
return GetTreeKeyWithEvaluatedAddress(evaluated, treeIndex, subIndex)
}
func pointToHash(evaluated *verkle.Point, suffix byte) []byte {
// The output of Byte() is big endian for banderwagon. This
// introduces an imbalance in the tree, because hashes are
// elements of a 253-bit field. This means more than half the
// tree would be empty. To avoid this problem, use a little
// endian commitment and chop the MSB.
bytes := evaluated.Bytes()
for i := 0; i < 16; i++ {
bytes[31-i], bytes[i] = bytes[i], bytes[31-i]
}
bytes[31] = suffix
return bytes[:]
}
func evaluateAddressPoint(address []byte) *verkle.Point {
if len(address) < 32 {
var aligned [32]byte
address = append(aligned[:32-len(address)], address...)
}
var poly [3]fr.Element
poly[0].SetZero()
// 32-byte address, interpreted as two little endian
// 16-byte numbers.
verkle.FromLEBytes(&poly[1], address[:16])
verkle.FromLEBytes(&poly[2], address[16:])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add a constant point
ret.Add(ret, index0Point)
return ret
}

View File

@ -0,0 +1,139 @@
// Copyright 2023 go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
package utils
import (
"bytes"
"testing"
"github.com/gballet/go-verkle"
"github.com/holiman/uint256"
)
func TestTreeKey(t *testing.T) {
var (
address = []byte{0x01}
addressEval = evaluateAddressPoint(address)
smallIndex = uint256.NewInt(1)
largeIndex = uint256.NewInt(10000)
smallStorage = []byte{0x1}
largeStorage = bytes.Repeat([]byte{0xff}, 16)
)
if !bytes.Equal(VersionKey(address), VersionKeyWithEvaluatedAddress(addressEval)) {
t.Fatal("Unmatched version key")
}
if !bytes.Equal(BalanceKey(address), BalanceKeyWithEvaluatedAddress(addressEval)) {
t.Fatal("Unmatched balance key")
}
if !bytes.Equal(NonceKey(address), NonceKeyWithEvaluatedAddress(addressEval)) {
t.Fatal("Unmatched nonce key")
}
if !bytes.Equal(CodeKeccakKey(address), CodeKeccakKeyWithEvaluatedAddress(addressEval)) {
t.Fatal("Unmatched code keccak key")
}
if !bytes.Equal(CodeSizeKey(address), CodeSizeKeyWithEvaluatedAddress(addressEval)) {
t.Fatal("Unmatched code size key")
}
if !bytes.Equal(CodeChunkKey(address, smallIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, smallIndex)) {
t.Fatal("Unmatched code chunk key")
}
if !bytes.Equal(CodeChunkKey(address, largeIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, largeIndex)) {
t.Fatal("Unmatched code chunk key")
}
if !bytes.Equal(StorageSlotKey(address, smallStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, smallStorage)) {
t.Fatal("Unmatched storage slot key")
}
if !bytes.Equal(StorageSlotKey(address, largeStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, largeStorage)) {
t.Fatal("Unmatched storage slot key")
}
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cerc-io/ipld-eth-statedb/trie_by_cid/trie/utils
// cpu: VirtualApple @ 2.50GHz
// BenchmarkTreeKey
// BenchmarkTreeKey-8 398731 2961 ns/op 32 B/op 1 allocs/op
func BenchmarkTreeKey(b *testing.B) {
// Initialize the IPA settings which can be pretty expensive.
verkle.GetConfig()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
BalanceKey([]byte{0x01})
}
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cerc-io/ipld-eth-statedb/trie_by_cid/trie/utils
// cpu: VirtualApple @ 2.50GHz
// BenchmarkTreeKeyWithEvaluation
// BenchmarkTreeKeyWithEvaluation-8 513855 2324 ns/op 32 B/op 1 allocs/op
func BenchmarkTreeKeyWithEvaluation(b *testing.B) {
// Initialize the IPA settings which can be pretty expensive.
verkle.GetConfig()
addr := []byte{0x01}
eval := evaluateAddressPoint(addr)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
BalanceKeyWithEvaluatedAddress(eval)
}
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cerc-io/ipld-eth-statedb/trie_by_cid/trie/utils
// cpu: VirtualApple @ 2.50GHz
// BenchmarkStorageKey
// BenchmarkStorageKey-8 230516 4584 ns/op 96 B/op 3 allocs/op
func BenchmarkStorageKey(b *testing.B) {
// Initialize the IPA settings which can be pretty expensive.
verkle.GetConfig()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
StorageSlotKey([]byte{0x01}, bytes.Repeat([]byte{0xff}, 32))
}
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cerc-io/ipld-eth-statedb/trie_by_cid/trie/utils
// cpu: VirtualApple @ 2.50GHz
// BenchmarkStorageKeyWithEvaluation
// BenchmarkStorageKeyWithEvaluation-8 320125 3753 ns/op 96 B/op 3 allocs/op
func BenchmarkStorageKeyWithEvaluation(b *testing.B) {
// Initialize the IPA settings which can be pretty expensive.
verkle.GetConfig()
addr := []byte{0x01}
eval := evaluateAddressPoint(addr)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
StorageSlotKeyWithEvaluatedAddress(eval, bytes.Repeat([]byte{0xff}, 32))
}
}