2022-05-24 18:39:40 +00:00
// Copyright 2021 The go-ethereum Authors
2021-01-07 16:12:41 +00:00
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package snap
import (
2021-01-25 06:17:05 +00:00
"bytes"
2021-01-07 16:12:41 +00:00
"crypto/rand"
2021-01-25 06:17:05 +00:00
"encoding/binary"
2021-01-07 16:12:41 +00:00
"fmt"
2021-01-25 06:17:05 +00:00
"math/big"
2021-03-24 14:33:34 +00:00
"sync"
2021-01-07 16:12:41 +00:00
"testing"
2021-01-25 06:17:05 +00:00
"time"
2021-01-07 16:12:41 +00:00
2021-01-25 06:17:05 +00:00
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
2021-09-28 08:48:07 +00:00
"github.com/ethereum/go-ethereum/core/types"
2021-01-07 16:12:41 +00:00
"github.com/ethereum/go-ethereum/crypto"
2021-03-24 14:33:34 +00:00
"github.com/ethereum/go-ethereum/ethdb"
2021-01-25 06:17:05 +00:00
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
2023-05-09 07:11:04 +00:00
"github.com/ethereum/go-ethereum/trie/trienode"
2021-01-07 16:12:41 +00:00
"golang.org/x/crypto/sha3"
2023-06-19 21:38:57 +00:00
"golang.org/x/exp/slices"
2021-01-07 16:12:41 +00:00
)
func TestHashing ( t * testing . T ) {
2021-01-25 06:17:05 +00:00
t . Parallel ( )
2021-01-07 16:12:41 +00:00
var bytecodes = make ( [ ] [ ] byte , 10 )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
buf := make ( [ ] byte , 100 )
rand . Read ( buf )
bytecodes [ i ] = buf
}
var want , got string
var old = func ( ) {
hasher := sha3 . NewLegacyKeccak256 ( )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
hasher . Reset ( )
hasher . Write ( bytecodes [ i ] )
hash := hasher . Sum ( nil )
got = fmt . Sprintf ( "%v\n%v" , got , hash )
}
}
var new = func ( ) {
hasher := sha3 . NewLegacyKeccak256 ( ) . ( crypto . KeccakState )
var hash = make ( [ ] byte , 32 )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
hasher . Reset ( )
hasher . Write ( bytecodes [ i ] )
hasher . Read ( hash )
want = fmt . Sprintf ( "%v\n%v" , want , hash )
}
}
old ( )
new ( )
if want != got {
t . Errorf ( "want\n%v\ngot\n%v\n" , want , got )
}
}
func BenchmarkHashing ( b * testing . B ) {
var bytecodes = make ( [ ] [ ] byte , 10000 )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
buf := make ( [ ] byte , 100 )
rand . Read ( buf )
bytecodes [ i ] = buf
}
var old = func ( ) {
hasher := sha3 . NewLegacyKeccak256 ( )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
hasher . Reset ( )
hasher . Write ( bytecodes [ i ] )
hasher . Sum ( nil )
}
}
var new = func ( ) {
hasher := sha3 . NewLegacyKeccak256 ( ) . ( crypto . KeccakState )
var hash = make ( [ ] byte , 32 )
for i := 0 ; i < len ( bytecodes ) ; i ++ {
hasher . Reset ( )
hasher . Write ( bytecodes [ i ] )
hasher . Read ( hash )
}
}
b . Run ( "old" , func ( b * testing . B ) {
b . ReportAllocs ( )
for i := 0 ; i < b . N ; i ++ {
old ( )
}
} )
b . Run ( "new" , func ( b * testing . B ) {
b . ReportAllocs ( )
for i := 0 ; i < b . N ; i ++ {
new ( )
}
} )
}
2021-01-25 06:17:05 +00:00
2021-03-24 14:33:34 +00:00
type (
accountHandlerFunc func ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error
storageHandlerFunc func ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error
trieHandlerFunc func ( t * testPeer , requestId uint64 , root common . Hash , paths [ ] TrieNodePathSet , cap uint64 ) error
codeHandlerFunc func ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error
)
2021-01-25 06:17:05 +00:00
type testPeer struct {
id string
test * testing . T
remote * Syncer
logger log . Logger
accountTrie * trie . Trie
2023-06-19 21:38:57 +00:00
accountValues [ ] * kv
2021-01-25 06:17:05 +00:00
storageTries map [ common . Hash ] * trie . Trie
2023-06-19 21:38:57 +00:00
storageValues map [ common . Hash ] [ ] * kv
2021-01-25 06:17:05 +00:00
accountRequestHandler accountHandlerFunc
storageRequestHandler storageHandlerFunc
trieRequestHandler trieHandlerFunc
codeRequestHandler codeHandlerFunc
2021-03-24 14:33:34 +00:00
term func ( )
2021-04-27 14:19:59 +00:00
// counters
nAccountRequests int
nStorageRequests int
nBytecodeRequests int
nTrienodeRequests int
2021-01-25 06:17:05 +00:00
}
2021-03-24 14:33:34 +00:00
func newTestPeer ( id string , t * testing . T , term func ( ) ) * testPeer {
2021-01-25 06:17:05 +00:00
peer := & testPeer {
id : id ,
test : t ,
logger : log . New ( "id" , id ) ,
accountRequestHandler : defaultAccountRequestHandler ,
trieRequestHandler : defaultTrieRequestHandler ,
storageRequestHandler : defaultStorageRequestHandler ,
codeRequestHandler : defaultCodeRequestHandler ,
2021-03-24 14:33:34 +00:00
term : term ,
2021-01-25 06:17:05 +00:00
}
//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
//peer.logger.SetHandler(stderrHandler)
return peer
}
2022-11-28 13:31:28 +00:00
func ( t * testPeer ) setStorageTries ( tries map [ common . Hash ] * trie . Trie ) {
t . storageTries = make ( map [ common . Hash ] * trie . Trie )
for root , trie := range tries {
t . storageTries [ root ] = trie . Copy ( )
}
}
2021-01-25 06:17:05 +00:00
func ( t * testPeer ) ID ( ) string { return t . id }
func ( t * testPeer ) Log ( ) log . Logger { return t . logger }
2021-04-27 14:19:59 +00:00
func ( t * testPeer ) Stats ( ) string {
return fmt . Sprintf ( ` Account requests : % d
Storage requests : % d
Bytecode requests : % d
Trienode requests : % d
` , t . nAccountRequests , t . nStorageRequests , t . nBytecodeRequests , t . nTrienodeRequests )
}
2021-01-25 06:17:05 +00:00
func ( t * testPeer ) RequestAccountRange ( id uint64 , root , origin , limit common . Hash , bytes uint64 ) error {
t . logger . Trace ( "Fetching range of accounts" , "reqid" , id , "root" , root , "origin" , origin , "limit" , limit , "bytes" , common . StorageSize ( bytes ) )
2021-04-27 14:19:59 +00:00
t . nAccountRequests ++
2021-03-24 14:33:34 +00:00
go t . accountRequestHandler ( t , id , root , origin , limit , bytes )
2021-01-25 06:17:05 +00:00
return nil
}
func ( t * testPeer ) RequestTrieNodes ( id uint64 , root common . Hash , paths [ ] TrieNodePathSet , bytes uint64 ) error {
t . logger . Trace ( "Fetching set of trie nodes" , "reqid" , id , "root" , root , "pathsets" , len ( paths ) , "bytes" , common . StorageSize ( bytes ) )
2021-04-27 14:19:59 +00:00
t . nTrienodeRequests ++
2021-01-25 06:17:05 +00:00
go t . trieRequestHandler ( t , id , root , paths , bytes )
return nil
}
func ( t * testPeer ) RequestStorageRanges ( id uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , bytes uint64 ) error {
2021-04-27 14:19:59 +00:00
t . nStorageRequests ++
2021-01-25 06:17:05 +00:00
if len ( accounts ) == 1 && origin != nil {
t . logger . Trace ( "Fetching range of large storage slots" , "reqid" , id , "root" , root , "account" , accounts [ 0 ] , "origin" , common . BytesToHash ( origin ) , "limit" , common . BytesToHash ( limit ) , "bytes" , common . StorageSize ( bytes ) )
} else {
t . logger . Trace ( "Fetching ranges of small storage slots" , "reqid" , id , "root" , root , "accounts" , len ( accounts ) , "first" , accounts [ 0 ] , "bytes" , common . StorageSize ( bytes ) )
}
go t . storageRequestHandler ( t , id , root , accounts , origin , limit , bytes )
return nil
}
func ( t * testPeer ) RequestByteCodes ( id uint64 , hashes [ ] common . Hash , bytes uint64 ) error {
2021-04-27 14:19:59 +00:00
t . nBytecodeRequests ++
2021-01-25 06:17:05 +00:00
t . logger . Trace ( "Fetching set of byte codes" , "reqid" , id , "hashes" , len ( hashes ) , "bytes" , common . StorageSize ( bytes ) )
go t . codeRequestHandler ( t , id , hashes , bytes )
return nil
}
// defaultTrieRequestHandler is a well-behaving handler for trie healing requests
func defaultTrieRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , paths [ ] TrieNodePathSet , cap uint64 ) error {
// Pass the response
var nodes [ ] [ ] byte
for _ , pathset := range paths {
switch len ( pathset ) {
case 1 :
2023-04-20 10:57:24 +00:00
blob , _ , err := t . accountTrie . GetNode ( pathset [ 0 ] )
2021-01-25 06:17:05 +00:00
if err != nil {
t . logger . Info ( "Error handling req" , "error" , err )
break
}
nodes = append ( nodes , blob )
default :
account := t . storageTries [ ( common . BytesToHash ( pathset [ 0 ] ) ) ]
for _ , path := range pathset [ 1 : ] {
2023-04-20 10:57:24 +00:00
blob , _ , err := account . GetNode ( path )
2021-01-25 06:17:05 +00:00
if err != nil {
t . logger . Info ( "Error handling req" , "error" , err )
break
}
nodes = append ( nodes , blob )
}
}
}
t . remote . OnTrieNodes ( t , requestId , nodes )
return nil
}
// defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
2021-03-24 14:33:34 +00:00
func defaultAccountRequestHandler ( t * testPeer , id uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
keys , vals , proofs := createAccountRequestResponse ( t , root , origin , limit , cap )
2021-01-25 06:17:05 +00:00
if err := t . remote . OnAccounts ( t , id , keys , vals , proofs ) ; err != nil {
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
2021-03-24 14:33:34 +00:00
t . term ( )
2021-01-25 06:17:05 +00:00
return err
}
return nil
}
2021-03-24 14:33:34 +00:00
func createAccountRequestResponse ( t * testPeer , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) ( keys [ ] common . Hash , vals [ ] [ ] byte , proofs [ ] [ ] byte ) {
2021-01-25 06:17:05 +00:00
var size uint64
2021-03-24 14:33:34 +00:00
if limit == ( common . Hash { } ) {
limit = common . HexToHash ( "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )
}
2021-01-25 06:17:05 +00:00
for _ , entry := range t . accountValues {
if size > cap {
break
}
if bytes . Compare ( origin [ : ] , entry . k ) <= 0 {
keys = append ( keys , common . BytesToHash ( entry . k ) )
vals = append ( vals , entry . v )
size += uint64 ( 32 + len ( entry . v ) )
}
2021-03-24 14:33:34 +00:00
// If we've exceeded the request threshold, abort
if bytes . Compare ( entry . k , limit [ : ] ) >= 0 {
break
}
2021-01-25 06:17:05 +00:00
}
// Unless we send the entire trie, we need to supply proofs
2021-03-24 14:33:34 +00:00
// Actually, we need to supply proofs either way! This seems to be an implementation
2021-01-25 06:17:05 +00:00
// quirk in go-ethereum
proof := light . NewNodeSet ( )
2023-06-19 14:28:40 +00:00
if err := t . accountTrie . Prove ( origin [ : ] , proof ) ; err != nil {
2021-03-24 14:33:34 +00:00
t . logger . Error ( "Could not prove inexistence of origin" , "origin" , origin , "error" , err )
2021-01-25 06:17:05 +00:00
}
if len ( keys ) > 0 {
lastK := ( keys [ len ( keys ) - 1 ] ) [ : ]
2023-06-19 14:28:40 +00:00
if err := t . accountTrie . Prove ( lastK , proof ) ; err != nil {
2021-03-24 14:33:34 +00:00
t . logger . Error ( "Could not prove last item" , "error" , err )
2021-01-25 06:17:05 +00:00
}
}
for _ , blob := range proof . NodeList ( ) {
proofs = append ( proofs , blob )
}
return keys , vals , proofs
}
// defaultStorageRequestHandler is a well-behaving storage request handler
func defaultStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , bOrigin , bLimit [ ] byte , max uint64 ) error {
hashes , slots , proofs := createStorageRequestResponse ( t , root , accounts , bOrigin , bLimit , max )
if err := t . remote . OnStorage ( t , requestId , hashes , slots , proofs ) ; err != nil {
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
2021-03-24 14:33:34 +00:00
t . term ( )
2021-01-25 06:17:05 +00:00
}
return nil
}
func defaultCodeRequestHandler ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error {
var bytecodes [ ] [ ] byte
for _ , h := range hashes {
2021-03-24 14:33:34 +00:00
bytecodes = append ( bytecodes , getCodeByHash ( h ) )
2021-01-25 06:17:05 +00:00
}
if err := t . remote . OnByteCodes ( t , id , bytecodes ) ; err != nil {
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
2021-03-24 14:33:34 +00:00
t . term ( )
2021-01-25 06:17:05 +00:00
}
return nil
}
2021-03-24 14:33:34 +00:00
func createStorageRequestResponse ( t * testPeer , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) ( hashes [ ] [ ] common . Hash , slots [ ] [ ] [ ] byte , proofs [ ] [ ] byte ) {
var size uint64
for _ , account := range accounts {
// The first account might start from a different origin and end sooner
var originHash common . Hash
if len ( origin ) > 0 {
originHash = common . BytesToHash ( origin )
}
var limitHash = common . HexToHash ( "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )
if len ( limit ) > 0 {
limitHash = common . BytesToHash ( limit )
}
var (
keys [ ] common . Hash
vals [ ] [ ] byte
abort bool
)
for _ , entry := range t . storageValues [ account ] {
if size >= max {
abort = true
break
}
if bytes . Compare ( entry . k , originHash [ : ] ) < 0 {
continue
}
keys = append ( keys , common . BytesToHash ( entry . k ) )
vals = append ( vals , entry . v )
size += uint64 ( 32 + len ( entry . v ) )
if bytes . Compare ( entry . k , limitHash [ : ] ) >= 0 {
break
}
}
2022-05-17 08:19:51 +00:00
if len ( keys ) > 0 {
hashes = append ( hashes , keys )
slots = append ( slots , vals )
}
2021-03-24 14:33:34 +00:00
// Generate the Merkle proofs for the first and last storage slot, but
// only if the response was capped. If the entire storage trie included
// in the response, no need for any proofs.
2022-05-17 08:19:51 +00:00
if originHash != ( common . Hash { } ) || ( abort && len ( keys ) > 0 ) {
2021-03-24 14:33:34 +00:00
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
proof := light . NewNodeSet ( )
stTrie := t . storageTries [ account ]
// Here's a potential gotcha: when constructing the proof, we cannot
// use the 'origin' slice directly, but must use the full 32-byte
// hash form.
2023-06-19 14:28:40 +00:00
if err := stTrie . Prove ( originHash [ : ] , proof ) ; err != nil {
2021-03-24 14:33:34 +00:00
t . logger . Error ( "Could not prove inexistence of origin" , "origin" , originHash , "error" , err )
}
if len ( keys ) > 0 {
lastK := ( keys [ len ( keys ) - 1 ] ) [ : ]
2023-06-19 14:28:40 +00:00
if err := stTrie . Prove ( lastK , proof ) ; err != nil {
2021-03-24 14:33:34 +00:00
t . logger . Error ( "Could not prove last item" , "error" , err )
}
}
for _ , blob := range proof . NodeList ( ) {
proofs = append ( proofs , blob )
}
break
}
2021-01-25 06:17:05 +00:00
}
2021-03-24 14:33:34 +00:00
return hashes , slots , proofs
}
2022-09-10 11:25:40 +00:00
// createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always
// supplies the proof for the last account, even if it is 'complete'.
2021-03-24 14:33:34 +00:00
func createStorageRequestResponseAlwaysProve ( t * testPeer , root common . Hash , accounts [ ] common . Hash , bOrigin , bLimit [ ] byte , max uint64 ) ( hashes [ ] [ ] common . Hash , slots [ ] [ ] [ ] byte , proofs [ ] [ ] byte ) {
var size uint64
max = max * 3 / 4
2021-01-25 06:17:05 +00:00
var origin common . Hash
if len ( bOrigin ) > 0 {
origin = common . BytesToHash ( bOrigin )
}
2021-03-24 14:33:34 +00:00
var exit bool
for i , account := range accounts {
2021-01-25 06:17:05 +00:00
var keys [ ] common . Hash
var vals [ ] [ ] byte
for _ , entry := range t . storageValues [ account ] {
if bytes . Compare ( entry . k , origin [ : ] ) < 0 {
2021-03-24 14:33:34 +00:00
exit = true
2021-01-25 06:17:05 +00:00
}
keys = append ( keys , common . BytesToHash ( entry . k ) )
vals = append ( vals , entry . v )
size += uint64 ( 32 + len ( entry . v ) )
if size > max {
2021-03-24 14:33:34 +00:00
exit = true
2021-01-25 06:17:05 +00:00
}
}
2021-03-24 14:33:34 +00:00
if i == len ( accounts ) - 1 {
exit = true
}
2021-01-25 06:17:05 +00:00
hashes = append ( hashes , keys )
slots = append ( slots , vals )
2021-03-24 14:33:34 +00:00
if exit {
2021-01-25 06:17:05 +00:00
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
proof := light . NewNodeSet ( )
stTrie := t . storageTries [ account ]
// Here's a potential gotcha: when constructing the proof, we cannot
// use the 'origin' slice directly, but must use the full 32-byte
// hash form.
2023-06-19 14:28:40 +00:00
if err := stTrie . Prove ( origin [ : ] , proof ) ; err != nil {
2021-01-25 06:17:05 +00:00
t . logger . Error ( "Could not prove inexistence of origin" , "origin" , origin ,
"error" , err )
}
if len ( keys ) > 0 {
lastK := ( keys [ len ( keys ) - 1 ] ) [ : ]
2023-06-19 14:28:40 +00:00
if err := stTrie . Prove ( lastK , proof ) ; err != nil {
2021-01-25 06:17:05 +00:00
t . logger . Error ( "Could not prove last item" , "error" , err )
}
}
for _ , blob := range proof . NodeList ( ) {
proofs = append ( proofs , blob )
}
break
}
}
return hashes , slots , proofs
}
// emptyRequestAccountRangeFn is a rejects AccountRangeRequests
2021-03-24 14:33:34 +00:00
func emptyRequestAccountRangeFn ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
t . remote . OnAccounts ( t , requestId , nil , nil , nil )
2021-01-25 06:17:05 +00:00
return nil
}
2021-03-24 14:33:34 +00:00
func nonResponsiveRequestAccountRangeFn ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
2021-01-25 06:17:05 +00:00
return nil
}
func emptyTrieRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , paths [ ] TrieNodePathSet , cap uint64 ) error {
2021-03-24 14:33:34 +00:00
t . remote . OnTrieNodes ( t , requestId , nil )
2021-01-25 06:17:05 +00:00
return nil
}
func nonResponsiveTrieRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , paths [ ] TrieNodePathSet , cap uint64 ) error {
return nil
}
func emptyStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
2021-03-24 14:33:34 +00:00
t . remote . OnStorage ( t , requestId , nil , nil , nil )
2021-01-25 06:17:05 +00:00
return nil
}
func nonResponsiveStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
return nil
}
2021-03-24 14:33:34 +00:00
func proofHappyStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
hashes , slots , proofs := createStorageRequestResponseAlwaysProve ( t , root , accounts , origin , limit , max )
if err := t . remote . OnStorage ( t , requestId , hashes , slots , proofs ) ; err != nil {
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
t . term ( )
}
return nil
}
2021-01-25 06:17:05 +00:00
//func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
// var bytecodes [][]byte
// t.remote.OnByteCodes(t, id, bytecodes)
// return nil
//}
func corruptCodeRequestHandler ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error {
var bytecodes [ ] [ ] byte
for _ , h := range hashes {
// Send back the hashes
bytecodes = append ( bytecodes , h [ : ] )
}
if err := t . remote . OnByteCodes ( t , id , bytecodes ) ; err != nil {
2021-03-24 14:33:34 +00:00
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
2021-01-25 06:17:05 +00:00
// Mimic the real-life handler, which drops a peer on errors
t . remote . Unregister ( t . id )
}
return nil
}
func cappedCodeRequestHandler ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error {
var bytecodes [ ] [ ] byte
for _ , h := range hashes [ : 1 ] {
2021-03-24 14:33:34 +00:00
bytecodes = append ( bytecodes , getCodeByHash ( h ) )
2021-01-25 06:17:05 +00:00
}
2021-03-24 14:33:34 +00:00
// Missing bytecode can be retrieved again, no error expected
2021-01-25 06:17:05 +00:00
if err := t . remote . OnByteCodes ( t , id , bytecodes ) ; err != nil {
2021-03-24 14:33:34 +00:00
t . test . Errorf ( "Remote side rejected our delivery: %v" , err )
t . term ( )
2021-01-25 06:17:05 +00:00
}
return nil
}
// starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
func starvingStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
return defaultStorageRequestHandler ( t , requestId , root , accounts , origin , limit , 500 )
}
2021-03-24 14:33:34 +00:00
func starvingAccountRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
return defaultAccountRequestHandler ( t , requestId , root , origin , limit , 500 )
2021-01-25 06:17:05 +00:00
}
//func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
// return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
//}
2021-03-24 14:33:34 +00:00
func corruptAccountRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
hashes , accounts , proofs := createAccountRequestResponse ( t , root , origin , limit , cap )
2021-01-25 06:17:05 +00:00
if len ( proofs ) > 0 {
proofs = proofs [ 1 : ]
}
if err := t . remote . OnAccounts ( t , requestId , hashes , accounts , proofs ) ; err != nil {
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
// Mimic the real-life handler, which drops a peer on errors
t . remote . Unregister ( t . id )
}
return nil
}
// corruptStorageRequestHandler doesn't provide good proofs
func corruptStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
hashes , slots , proofs := createStorageRequestResponse ( t , root , accounts , origin , limit , max )
if len ( proofs ) > 0 {
proofs = proofs [ 1 : ]
}
if err := t . remote . OnStorage ( t , requestId , hashes , slots , proofs ) ; err != nil {
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
// Mimic the real-life handler, which drops a peer on errors
t . remote . Unregister ( t . id )
}
return nil
}
func noProofStorageRequestHandler ( t * testPeer , requestId uint64 , root common . Hash , accounts [ ] common . Hash , origin , limit [ ] byte , max uint64 ) error {
hashes , slots , _ := createStorageRequestResponse ( t , root , accounts , origin , limit , max )
if err := t . remote . OnStorage ( t , requestId , hashes , slots , nil ) ; err != nil {
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
// Mimic the real-life handler, which drops a peer on errors
t . remote . Unregister ( t . id )
}
return nil
}
// TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
// also ship the entire trie inside the proof. If the attack is successful,
// the remote side does not do any follow-up requests
func TestSyncBloatedProof ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeAccountTrieNoStorage ( 100 )
2021-03-24 14:33:34 +00:00
source := newTestPeer ( "source" , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
2021-03-24 14:33:34 +00:00
source . accountRequestHandler = func ( t * testPeer , requestId uint64 , root common . Hash , origin common . Hash , limit common . Hash , cap uint64 ) error {
var (
proofs [ ] [ ] byte
keys [ ] common . Hash
vals [ ] [ ] byte
)
2021-01-25 06:17:05 +00:00
// The values
for _ , entry := range t . accountValues {
2021-03-24 14:33:34 +00:00
if bytes . Compare ( entry . k , origin [ : ] ) < 0 {
continue
}
if bytes . Compare ( entry . k , limit [ : ] ) > 0 {
continue
2021-01-25 06:17:05 +00:00
}
2021-03-24 14:33:34 +00:00
keys = append ( keys , common . BytesToHash ( entry . k ) )
vals = append ( vals , entry . v )
2021-01-25 06:17:05 +00:00
}
// The proofs
proof := light . NewNodeSet ( )
2023-06-19 14:28:40 +00:00
if err := t . accountTrie . Prove ( origin [ : ] , proof ) ; err != nil {
2021-01-25 06:17:05 +00:00
t . logger . Error ( "Could not prove origin" , "origin" , origin , "error" , err )
}
// The bloat: add proof of every single element
for _ , entry := range t . accountValues {
2023-06-19 14:28:40 +00:00
if err := t . accountTrie . Prove ( entry . k , proof ) ; err != nil {
2021-01-25 06:17:05 +00:00
t . logger . Error ( "Could not prove item" , "error" , err )
}
}
// And remove one item from the elements
if len ( keys ) > 2 {
keys = append ( keys [ : 1 ] , keys [ 2 : ] ... )
vals = append ( vals [ : 1 ] , vals [ 2 : ] ... )
}
for _ , blob := range proof . NodeList ( ) {
proofs = append ( proofs , blob )
}
if err := t . remote . OnAccounts ( t , requestId , keys , vals , proofs ) ; err != nil {
2021-03-24 14:33:34 +00:00
t . logger . Info ( "remote error on delivery (as expected)" , "error" , err )
t . term ( )
2021-01-25 06:17:05 +00:00
// This is actually correct, signal to exit the test successfully
}
return nil
}
2022-11-28 13:31:28 +00:00
syncer := setupSyncer ( nodeScheme , source )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err == nil {
t . Fatal ( "No error returned from incomplete/cancelled sync" )
}
}
2023-02-06 15:28:40 +00:00
func setupSyncer ( scheme string , peers ... * testPeer ) * Syncer {
2021-01-25 06:17:05 +00:00
stateDb := rawdb . NewMemoryDatabase ( )
2022-11-28 13:31:28 +00:00
syncer := NewSyncer ( stateDb , scheme )
2021-01-25 06:17:05 +00:00
for _ , peer := range peers {
syncer . Register ( peer )
peer . remote = syncer
}
return syncer
}
// TestSync tests a basic sync with one peer
func TestSync ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeAccountTrieNoStorage ( 100 )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
return source
}
2022-11-28 13:31:28 +00:00
syncer := setupSyncer ( nodeScheme , mkSource ( "source" ) )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 14:33:34 +00:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
// panic within the prover
func TestSyncTinyTriePanic ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeAccountTrieNoStorage ( 1 )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
return source
}
2022-11-28 13:31:28 +00:00
syncer := setupSyncer ( nodeScheme , mkSource ( "source" ) )
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 14:33:34 +00:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestMultiSync tests a basic sync with multiple peers
func TestMultiSync ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeAccountTrieNoStorage ( 100 )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
return source
}
2022-11-28 13:31:28 +00:00
syncer := setupSyncer ( nodeScheme , mkSource ( "sourceA" ) , mkSource ( "sourceB" ) )
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 14:33:34 +00:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestSyncWithStorage tests basic sync using accounts + storage + code
func TestSyncWithStorage ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 3 , 3000 , true , false )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
2022-11-28 13:31:28 +00:00
source . setStorageTries ( storageTries )
2021-01-25 06:17:05 +00:00
source . storageValues = storageElems
return source
}
2022-11-28 13:31:28 +00:00
syncer := setupSyncer ( nodeScheme , mkSource ( "sourceA" ) )
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 14:33:34 +00:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUseless ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 06:17:05 +00:00
2021-03-24 14:33:34 +00:00
mkSource := func ( name string , noAccount , noStorage , noTrieNode bool ) * testPeer {
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
2022-11-28 13:31:28 +00:00
source . setStorageTries ( storageTries )
2021-01-25 06:17:05 +00:00
source . storageValues = storageElems
2021-03-24 14:33:34 +00:00
if ! noAccount {
2021-01-25 06:17:05 +00:00
source . accountRequestHandler = emptyRequestAccountRangeFn
}
2021-03-24 14:33:34 +00:00
if ! noStorage {
2021-01-25 06:17:05 +00:00
source . storageRequestHandler = emptyStorageRequestHandler
}
2021-03-24 14:33:34 +00:00
if ! noTrieNode {
2021-01-25 06:17:05 +00:00
source . trieRequestHandler = emptyTrieRequestHandler
}
return source
}
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "full" , true , true , true ) ,
mkSource ( "noAccounts" , false , true , true ) ,
mkSource ( "noStorage" , true , false , true ) ,
mkSource ( "noTrie" , true , true , false ) ,
)
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 14:33:34 +00:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUselessWithLowTimeout ( t * testing . T ) {
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 06:17:05 +00:00
2021-03-24 14:33:34 +00:00
mkSource := func ( name string , noAccount , noStorage , noTrieNode bool ) * testPeer {
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
2022-11-28 13:31:28 +00:00
source . setStorageTries ( storageTries )
2021-01-25 06:17:05 +00:00
source . storageValues = storageElems
2021-03-24 14:33:34 +00:00
if ! noAccount {
2021-01-25 06:17:05 +00:00
source . accountRequestHandler = emptyRequestAccountRangeFn
}
2021-03-24 14:33:34 +00:00
if ! noStorage {
2021-01-25 06:17:05 +00:00
source . storageRequestHandler = emptyStorageRequestHandler
}
2021-03-24 14:33:34 +00:00
if ! noTrieNode {
2021-01-25 06:17:05 +00:00
source . trieRequestHandler = emptyTrieRequestHandler
}
return source
}
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "full" , true , true , true ) ,
mkSource ( "noAccounts" , false , true , true ) ,
mkSource ( "noStorage" , true , false , true ) ,
mkSource ( "noTrie" , true , true , false ) ,
)
2021-05-19 12:09:03 +00:00
// We're setting the timeout to very low, to increase the chance of the timeout
// being triggered. This was previously a cause of panic, when a response
// arrived simultaneously as a timeout was triggered.
syncer . rates . OverrideTTLLimit = time . Millisecond
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 14:33:34 +00:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
func TestMultiSyncManyUnresponsive ( t * testing . T ) {
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 06:17:05 +00:00
2021-03-24 14:33:34 +00:00
mkSource := func ( name string , noAccount , noStorage , noTrieNode bool ) * testPeer {
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
2022-11-28 13:31:28 +00:00
source . setStorageTries ( storageTries )
2021-01-25 06:17:05 +00:00
source . storageValues = storageElems
2021-03-24 14:33:34 +00:00
if ! noAccount {
2021-01-25 06:17:05 +00:00
source . accountRequestHandler = nonResponsiveRequestAccountRangeFn
}
2021-03-24 14:33:34 +00:00
if ! noStorage {
2021-01-25 06:17:05 +00:00
source . storageRequestHandler = nonResponsiveStorageRequestHandler
}
2021-03-24 14:33:34 +00:00
if ! noTrieNode {
2021-01-25 06:17:05 +00:00
source . trieRequestHandler = nonResponsiveTrieRequestHandler
}
return source
}
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "full" , true , true , true ) ,
mkSource ( "noAccounts" , false , true , true ) ,
mkSource ( "noStorage" , true , false , true ) ,
mkSource ( "noTrie" , true , true , false ) ,
)
2021-05-19 12:09:03 +00:00
// We're setting the timeout to very low, to make the test run a bit faster
syncer . rates . OverrideTTLLimit = time . Millisecond
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
2021-03-24 14:33:34 +00:00
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
2021-03-24 14:33:34 +00:00
func checkStall ( t * testing . T , term func ( ) ) chan struct { } {
2021-01-25 06:17:05 +00:00
testDone := make ( chan struct { } )
go func ( ) {
select {
case <- time . After ( time . Minute ) : // TODO(karalabe): Make tests smaller, this is too much
t . Log ( "Sync stalled" )
2021-03-24 14:33:34 +00:00
term ( )
2021-01-25 06:17:05 +00:00
case <- testDone :
return
}
} ( )
return testDone
}
2021-03-24 14:33:34 +00:00
// TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
// account trie has a few boundary elements.
func TestSyncBoundaryAccountTrie ( t * testing . T ) {
t . Parallel ( )
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeBoundaryAccountTrie ( 3000 )
2021-03-24 14:33:34 +00:00
mkSource := func ( name string ) * testPeer {
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-03-24 14:33:34 +00:00
source . accountValues = elems
return source
}
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-03-24 14:33:34 +00:00
mkSource ( "peer-a" ) ,
mkSource ( "peer-b" ) ,
)
done := checkStall ( t , term )
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
}
2021-01-25 06:17:05 +00:00
// TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
// consistently returning very small results
func TestSyncNoStorageAndOneCappedPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeAccountTrieNoStorage ( 3000 )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string , slow bool ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
if slow {
source . accountRequestHandler = starvingAccountRequestHandler
}
return source
}
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "nice-a" , false ) ,
mkSource ( "nice-b" , false ) ,
mkSource ( "nice-c" , false ) ,
mkSource ( "capped" , true ) ,
)
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 14:33:34 +00:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
// code requests properly.
func TestSyncNoStorageAndOneCodeCorruptPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeAccountTrieNoStorage ( 3000 )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string , codeFn codeHandlerFunc ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
source . codeRequestHandler = codeFn
return source
}
// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
// chance that the full set of codes requested are sent only to the
// non-corrupt peer, which delivers everything in one go, and makes the
// test moot
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "capped" , cappedCodeRequestHandler ) ,
mkSource ( "corrupt" , corruptCodeRequestHandler ) ,
)
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 14:33:34 +00:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
func TestSyncNoStorageAndOneAccountCorruptPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeAccountTrieNoStorage ( 3000 )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string , accFn accountHandlerFunc ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
source . accountRequestHandler = accFn
return source
}
// One is capped, one is corrupt. If we don't use a capped one, there's a 50%
// chance that the full set of codes requested are sent only to the
// non-corrupt peer, which delivers everything in one go, and makes the
// test moot
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "capped" , defaultAccountRequestHandler ) ,
mkSource ( "corrupt" , corruptAccountRequestHandler ) ,
)
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 14:33:34 +00:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
// one by one
func TestSyncNoStorageAndOneCodeCappedPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeAccountTrieNoStorage ( 3000 )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string , codeFn codeHandlerFunc ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
source . codeRequestHandler = codeFn
return source
}
// Count how many times it's invoked. Remember, there are only 8 unique hashes,
// so it shouldn't be more than that
var counter int
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "capped" , func ( t * testPeer , id uint64 , hashes [ ] common . Hash , max uint64 ) error {
counter ++
return cappedCodeRequestHandler ( t , id , hashes , max )
} ) ,
)
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2022-05-17 08:19:51 +00:00
2021-01-25 06:17:05 +00:00
// There are only 8 unique hashes, and 3K accounts. However, the code
// deduplication is per request batch. If it were a perfect global dedup,
// we would expect only 8 requests. If there were no dedup, there would be
// 3k requests.
2022-05-17 08:19:51 +00:00
// We expect somewhere below 100 requests for these 8 unique hashes. But
// the number can be flaky, so don't limit it so strictly.
2021-01-25 06:17:05 +00:00
if threshold := 100 ; counter > threshold {
2022-05-17 08:19:51 +00:00
t . Logf ( "Error, expected < %d invocations, got %d" , threshold , counter )
2021-01-25 06:17:05 +00:00
}
2021-03-24 14:33:34 +00:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
}
// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
// storage trie has a few boundary elements.
func TestSyncBoundaryStorageTrie ( t * testing . T ) {
t . Parallel ( )
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 10 , 1000 , false , true )
2021-03-24 14:33:34 +00:00
mkSource := func ( name string ) * testPeer {
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-03-24 14:33:34 +00:00
source . accountValues = elems
2022-11-28 13:31:28 +00:00
source . setStorageTries ( storageTries )
2021-03-24 14:33:34 +00:00
source . storageValues = storageElems
return source
}
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-03-24 14:33:34 +00:00
mkSource ( "peer-a" ) ,
mkSource ( "peer-b" ) ,
)
done := checkStall ( t , term )
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
// consistently returning very small results
func TestSyncWithStorageAndOneCappedPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 300 , 1000 , false , false )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string , slow bool ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
2022-11-28 13:31:28 +00:00
source . setStorageTries ( storageTries )
2021-01-25 06:17:05 +00:00
source . storageValues = storageElems
if slow {
source . storageRequestHandler = starvingStorageRequestHandler
}
return source
}
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "nice-a" , false ) ,
mkSource ( "slow" , true ) ,
)
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 14:33:34 +00:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
// TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
// sometimes sending bad proofs
func TestSyncWithStorageAndCorruptPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string , handler storageHandlerFunc ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
2022-11-28 13:31:28 +00:00
source . setStorageTries ( storageTries )
2021-01-25 06:17:05 +00:00
source . storageValues = storageElems
source . storageRequestHandler = handler
return source
}
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "nice-a" , defaultStorageRequestHandler ) ,
mkSource ( "nice-b" , defaultStorageRequestHandler ) ,
mkSource ( "nice-c" , defaultStorageRequestHandler ) ,
mkSource ( "corrupt" , corruptStorageRequestHandler ) ,
)
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 14:33:34 +00:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
func TestSyncWithStorageAndNonProvingPeer ( t * testing . T ) {
t . Parallel ( )
2021-03-24 14:33:34 +00:00
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorage ( 100 , 3000 , true , false )
2021-01-25 06:17:05 +00:00
mkSource := func ( name string , handler storageHandlerFunc ) * testPeer {
2021-03-24 14:33:34 +00:00
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-01-25 06:17:05 +00:00
source . accountValues = elems
2022-11-28 13:31:28 +00:00
source . setStorageTries ( storageTries )
2021-01-25 06:17:05 +00:00
source . storageValues = storageElems
source . storageRequestHandler = handler
return source
}
syncer := setupSyncer (
2022-11-28 13:31:28 +00:00
nodeScheme ,
2021-01-25 06:17:05 +00:00
mkSource ( "nice-a" , defaultStorageRequestHandler ) ,
mkSource ( "nice-b" , defaultStorageRequestHandler ) ,
mkSource ( "nice-c" , defaultStorageRequestHandler ) ,
mkSource ( "corrupt" , noProofStorageRequestHandler ) ,
)
2021-03-24 14:33:34 +00:00
done := checkStall ( t , term )
2021-01-25 06:17:05 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
close ( done )
2021-03-24 14:33:34 +00:00
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
}
// TestSyncWithStorage tests basic sync using accounts + storage + code, against
// a peer who insists on delivering full storage sets _and_ proofs. This triggered
// an error, where the recipient erroneously clipped the boundary nodes, but
// did not mark the account for healing.
func TestSyncWithStorageMisbehavingProve ( t * testing . T ) {
t . Parallel ( )
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems , storageTries , storageElems := makeAccountTrieWithStorageWithUniqueStorage ( 10 , 30 , false )
2021-03-24 14:33:34 +00:00
mkSource := func ( name string ) * testPeer {
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-03-24 14:33:34 +00:00
source . accountValues = elems
2022-11-28 13:31:28 +00:00
source . setStorageTries ( storageTries )
2021-03-24 14:33:34 +00:00
source . storageValues = storageElems
source . storageRequestHandler = proofHappyStorageRequestHandler
return source
}
2022-11-28 13:31:28 +00:00
syncer := setupSyncer ( nodeScheme , mkSource ( "sourceA" ) )
2021-03-24 14:33:34 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
2021-01-25 06:17:05 +00:00
}
type kv struct {
k , v [ ] byte
}
2023-06-19 21:38:57 +00:00
func ( k * kv ) less ( other * kv ) bool {
return bytes . Compare ( k . k , other . k ) < 0
}
2021-01-25 06:17:05 +00:00
func key32 ( i uint64 ) [ ] byte {
key := make ( [ ] byte , 32 )
binary . LittleEndian . PutUint64 ( key , i )
return key
}
var (
codehashes = [ ] common . Hash {
crypto . Keccak256Hash ( [ ] byte { 0 } ) ,
crypto . Keccak256Hash ( [ ] byte { 1 } ) ,
crypto . Keccak256Hash ( [ ] byte { 2 } ) ,
crypto . Keccak256Hash ( [ ] byte { 3 } ) ,
crypto . Keccak256Hash ( [ ] byte { 4 } ) ,
crypto . Keccak256Hash ( [ ] byte { 5 } ) ,
crypto . Keccak256Hash ( [ ] byte { 6 } ) ,
crypto . Keccak256Hash ( [ ] byte { 7 } ) ,
}
)
2021-03-24 14:33:34 +00:00
// getCodeHash returns a pseudo-random code hash
func getCodeHash ( i uint64 ) [ ] byte {
2021-01-25 06:17:05 +00:00
h := codehashes [ int ( i ) % len ( codehashes ) ]
return common . CopyBytes ( h [ : ] )
}
2021-03-24 14:33:34 +00:00
// getCodeByHash convenience function to lookup the code from the code hash
func getCodeByHash ( hash common . Hash ) [ ] byte {
2023-02-21 11:12:27 +00:00
if hash == types . EmptyCodeHash {
2021-01-25 06:17:05 +00:00
return nil
}
for i , h := range codehashes {
if h == hash {
return [ ] byte { byte ( i ) }
}
}
return nil
}
// makeAccountTrieNoStorage spits out a trie, along with the leafs
2023-06-19 21:38:57 +00:00
func makeAccountTrieNoStorage ( n int ) ( string , * trie . Trie , [ ] * kv ) {
2022-08-04 08:03:20 +00:00
var (
db = trie . NewDatabase ( rawdb . NewMemoryDatabase ( ) )
accTrie = trie . NewEmpty ( db )
2023-06-19 21:38:57 +00:00
entries [ ] * kv
2022-08-04 08:03:20 +00:00
)
2021-01-25 06:17:05 +00:00
for i := uint64 ( 1 ) ; i <= uint64 ( n ) ; i ++ {
2022-02-18 07:10:26 +00:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-01-25 06:17:05 +00:00
Nonce : i ,
Balance : big . NewInt ( int64 ( i ) ) ,
2023-02-21 11:12:27 +00:00
Root : types . EmptyRootHash ,
2021-03-24 14:33:34 +00:00
CodeHash : getCodeHash ( i ) ,
2021-01-25 06:17:05 +00:00
} )
key := key32 ( i )
2021-03-24 14:33:34 +00:00
elem := & kv { key , value }
2023-04-20 10:57:24 +00:00
accTrie . MustUpdate ( elem . k , elem . v )
2021-01-25 06:17:05 +00:00
entries = append ( entries , elem )
}
2023-06-19 21:38:57 +00:00
slices . SortFunc ( entries , ( * kv ) . less )
2022-08-04 08:03:20 +00:00
// Commit the state changes into db and re-create the trie
// for accessing later.
2023-06-27 12:36:38 +00:00
root , nodes , _ := accTrie . Commit ( false )
2023-07-11 13:43:23 +00:00
db . Update ( root , types . EmptyRootHash , trienode . NewWithNodeSet ( nodes ) , nil )
2022-08-04 08:03:20 +00:00
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 08:01:02 +00:00
accTrie , _ = trie . New ( trie . StateTrieID ( root ) , db )
2022-11-28 13:31:28 +00:00
return db . Scheme ( ) , accTrie , entries
2021-01-25 06:17:05 +00:00
}
2021-03-24 14:33:34 +00:00
// makeBoundaryAccountTrie constructs an account trie. Instead of filling
// accounts normally, this function will fill a few accounts which have
// boundary hash.
2023-06-19 21:38:57 +00:00
func makeBoundaryAccountTrie ( n int ) ( string , * trie . Trie , [ ] * kv ) {
2021-03-24 14:33:34 +00:00
var (
2023-06-19 21:38:57 +00:00
entries [ ] * kv
2021-03-24 14:33:34 +00:00
boundaries [ ] common . Hash
2021-01-25 06:17:05 +00:00
2022-08-04 08:03:20 +00:00
db = trie . NewDatabase ( rawdb . NewMemoryDatabase ( ) )
accTrie = trie . NewEmpty ( db )
2021-03-24 14:33:34 +00:00
)
// Initialize boundaries
var next common . Hash
step := new ( big . Int ) . Sub (
new ( big . Int ) . Div (
new ( big . Int ) . Exp ( common . Big2 , common . Big256 , nil ) ,
2021-04-27 14:19:59 +00:00
big . NewInt ( int64 ( accountConcurrency ) ) ,
2021-03-24 14:33:34 +00:00
) , common . Big1 ,
)
for i := 0 ; i < accountConcurrency ; i ++ {
last := common . BigToHash ( new ( big . Int ) . Add ( next . Big ( ) , step ) )
if i == accountConcurrency - 1 {
last = common . HexToHash ( "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )
}
boundaries = append ( boundaries , last )
next = common . BigToHash ( new ( big . Int ) . Add ( last . Big ( ) , common . Big1 ) )
}
// Fill boundary accounts
for i := 0 ; i < len ( boundaries ) ; i ++ {
2022-02-18 07:10:26 +00:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-03-24 14:33:34 +00:00
Nonce : uint64 ( 0 ) ,
Balance : big . NewInt ( int64 ( i ) ) ,
2023-02-21 11:12:27 +00:00
Root : types . EmptyRootHash ,
2021-03-24 14:33:34 +00:00
CodeHash : getCodeHash ( uint64 ( i ) ) ,
} )
elem := & kv { boundaries [ i ] . Bytes ( ) , value }
2023-04-20 10:57:24 +00:00
accTrie . MustUpdate ( elem . k , elem . v )
2021-03-24 14:33:34 +00:00
entries = append ( entries , elem )
}
// Fill other accounts if required
for i := uint64 ( 1 ) ; i <= uint64 ( n ) ; i ++ {
2022-02-18 07:10:26 +00:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-03-24 14:33:34 +00:00
Nonce : i ,
Balance : big . NewInt ( int64 ( i ) ) ,
2023-02-21 11:12:27 +00:00
Root : types . EmptyRootHash ,
2021-03-24 14:33:34 +00:00
CodeHash : getCodeHash ( i ) ,
} )
elem := & kv { key32 ( i ) , value }
2023-04-20 10:57:24 +00:00
accTrie . MustUpdate ( elem . k , elem . v )
2021-03-24 14:33:34 +00:00
entries = append ( entries , elem )
}
2023-06-19 21:38:57 +00:00
slices . SortFunc ( entries , ( * kv ) . less )
2022-08-04 08:03:20 +00:00
// Commit the state changes into db and re-create the trie
// for accessing later.
2023-06-27 12:36:38 +00:00
root , nodes , _ := accTrie . Commit ( false )
2023-07-11 13:43:23 +00:00
db . Update ( root , types . EmptyRootHash , trienode . NewWithNodeSet ( nodes ) , nil )
2022-08-04 08:03:20 +00:00
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 08:01:02 +00:00
accTrie , _ = trie . New ( trie . StateTrieID ( root ) , db )
2022-11-28 13:31:28 +00:00
return db . Scheme ( ) , accTrie , entries
2021-03-24 14:33:34 +00:00
}
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
// has a unique storage set.
2023-06-19 21:38:57 +00:00
func makeAccountTrieWithStorageWithUniqueStorage ( accounts , slots int , code bool ) ( string , * trie . Trie , [ ] * kv , map [ common . Hash ] * trie . Trie , map [ common . Hash ] [ ] * kv ) {
2021-01-25 06:17:05 +00:00
var (
db = trie . NewDatabase ( rawdb . NewMemoryDatabase ( ) )
2022-06-06 15:14:55 +00:00
accTrie = trie . NewEmpty ( db )
2023-06-19 21:38:57 +00:00
entries [ ] * kv
2022-08-04 08:03:20 +00:00
storageRoots = make ( map [ common . Hash ] common . Hash )
2021-01-25 06:17:05 +00:00
storageTries = make ( map [ common . Hash ] * trie . Trie )
2023-06-19 21:38:57 +00:00
storageEntries = make ( map [ common . Hash ] [ ] * kv )
2023-05-09 07:11:04 +00:00
nodes = trienode . NewMergedNodeSet ( )
2021-01-25 06:17:05 +00:00
)
2021-03-24 14:33:34 +00:00
// Create n accounts in the trie
for i := uint64 ( 1 ) ; i <= uint64 ( accounts ) ; i ++ {
key := key32 ( i )
2023-02-21 11:12:27 +00:00
codehash := types . EmptyCodeHash . Bytes ( )
2021-03-24 14:33:34 +00:00
if code {
codehash = getCodeHash ( i )
}
// Create a storage trie
2022-08-04 08:03:20 +00:00
stRoot , stNodes , stEntries := makeStorageTrieWithSeed ( common . BytesToHash ( key ) , uint64 ( slots ) , i , db )
nodes . Merge ( stNodes )
2022-02-18 07:10:26 +00:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-03-24 14:33:34 +00:00
Nonce : i ,
Balance : big . NewInt ( int64 ( i ) ) ,
Root : stRoot ,
CodeHash : codehash ,
} )
elem := & kv { key , value }
2023-04-20 10:57:24 +00:00
accTrie . MustUpdate ( elem . k , elem . v )
2021-03-24 14:33:34 +00:00
entries = append ( entries , elem )
2022-08-04 08:03:20 +00:00
storageRoots [ common . BytesToHash ( key ) ] = stRoot
2021-03-24 14:33:34 +00:00
storageEntries [ common . BytesToHash ( key ) ] = stEntries
}
2023-06-19 21:38:57 +00:00
slices . SortFunc ( entries , ( * kv ) . less )
2021-03-24 14:33:34 +00:00
2022-08-04 08:03:20 +00:00
// Commit account trie
2023-06-27 12:36:38 +00:00
root , set , _ := accTrie . Commit ( true )
2022-08-04 08:03:20 +00:00
nodes . Merge ( set )
// Commit gathered dirty nodes into database
2023-07-11 13:43:23 +00:00
db . Update ( root , types . EmptyRootHash , nodes , nil )
2022-08-04 08:03:20 +00:00
// Re-create tries with new root
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 08:01:02 +00:00
accTrie , _ = trie . New ( trie . StateTrieID ( root ) , db )
2022-08-04 08:03:20 +00:00
for i := uint64 ( 1 ) ; i <= uint64 ( accounts ) ; i ++ {
key := key32 ( i )
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 08:01:02 +00:00
id := trie . StorageTrieID ( root , common . BytesToHash ( key ) , storageRoots [ common . BytesToHash ( key ) ] )
trie , _ := trie . New ( id , db )
2022-08-04 08:03:20 +00:00
storageTries [ common . BytesToHash ( key ) ] = trie
}
2022-11-28 13:31:28 +00:00
return db . Scheme ( ) , accTrie , entries , storageTries , storageEntries
2021-03-24 14:33:34 +00:00
}
2021-01-25 06:17:05 +00:00
2021-03-24 14:33:34 +00:00
// makeAccountTrieWithStorage spits out a trie, along with the leafs
2023-06-19 21:38:57 +00:00
func makeAccountTrieWithStorage ( accounts , slots int , code , boundary bool ) ( string , * trie . Trie , [ ] * kv , map [ common . Hash ] * trie . Trie , map [ common . Hash ] [ ] * kv ) {
2021-03-24 14:33:34 +00:00
var (
db = trie . NewDatabase ( rawdb . NewMemoryDatabase ( ) )
2022-06-06 15:14:55 +00:00
accTrie = trie . NewEmpty ( db )
2023-06-19 21:38:57 +00:00
entries [ ] * kv
2022-08-04 08:03:20 +00:00
storageRoots = make ( map [ common . Hash ] common . Hash )
2021-03-24 14:33:34 +00:00
storageTries = make ( map [ common . Hash ] * trie . Trie )
2023-06-19 21:38:57 +00:00
storageEntries = make ( map [ common . Hash ] [ ] * kv )
2023-05-09 07:11:04 +00:00
nodes = trienode . NewMergedNodeSet ( )
2021-03-24 14:33:34 +00:00
)
2021-01-25 06:17:05 +00:00
// Create n accounts in the trie
for i := uint64 ( 1 ) ; i <= uint64 ( accounts ) ; i ++ {
key := key32 ( i )
2023-02-21 11:12:27 +00:00
codehash := types . EmptyCodeHash . Bytes ( )
2021-01-25 06:17:05 +00:00
if code {
2021-03-24 14:33:34 +00:00
codehash = getCodeHash ( i )
2021-01-25 06:17:05 +00:00
}
2022-06-06 15:14:55 +00:00
// Make a storage trie
var (
2022-08-04 08:03:20 +00:00
stRoot common . Hash
2023-05-09 07:11:04 +00:00
stNodes * trienode . NodeSet
2023-06-19 21:38:57 +00:00
stEntries [ ] * kv
2022-06-06 15:14:55 +00:00
)
if boundary {
2022-08-04 08:03:20 +00:00
stRoot , stNodes , stEntries = makeBoundaryStorageTrie ( common . BytesToHash ( key ) , slots , db )
2022-06-06 15:14:55 +00:00
} else {
2022-08-04 08:03:20 +00:00
stRoot , stNodes , stEntries = makeStorageTrieWithSeed ( common . BytesToHash ( key ) , uint64 ( slots ) , 0 , db )
2022-06-06 15:14:55 +00:00
}
2022-08-04 08:03:20 +00:00
nodes . Merge ( stNodes )
2022-06-06 15:14:55 +00:00
2022-02-18 07:10:26 +00:00
value , _ := rlp . EncodeToBytes ( & types . StateAccount {
2021-01-25 06:17:05 +00:00
Nonce : i ,
Balance : big . NewInt ( int64 ( i ) ) ,
Root : stRoot ,
CodeHash : codehash ,
} )
2021-03-24 14:33:34 +00:00
elem := & kv { key , value }
2023-04-20 10:57:24 +00:00
accTrie . MustUpdate ( elem . k , elem . v )
2021-01-25 06:17:05 +00:00
entries = append ( entries , elem )
2022-08-04 08:03:20 +00:00
2021-01-25 06:17:05 +00:00
// we reuse the same one for all accounts
2022-08-04 08:03:20 +00:00
storageRoots [ common . BytesToHash ( key ) ] = stRoot
2021-01-25 06:17:05 +00:00
storageEntries [ common . BytesToHash ( key ) ] = stEntries
}
2023-06-19 21:38:57 +00:00
slices . SortFunc ( entries , ( * kv ) . less )
2022-08-04 08:03:20 +00:00
// Commit account trie
2023-06-27 12:36:38 +00:00
root , set , _ := accTrie . Commit ( true )
2022-08-04 08:03:20 +00:00
nodes . Merge ( set )
// Commit gathered dirty nodes into database
2023-07-11 13:43:23 +00:00
db . Update ( root , types . EmptyRootHash , nodes , nil )
2022-08-04 08:03:20 +00:00
// Re-create tries with new root
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 08:01:02 +00:00
accTrie , err := trie . New ( trie . StateTrieID ( root ) , db )
2022-08-04 08:03:20 +00:00
if err != nil {
panic ( err )
}
for i := uint64 ( 1 ) ; i <= uint64 ( accounts ) ; i ++ {
key := key32 ( i )
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 08:01:02 +00:00
id := trie . StorageTrieID ( root , common . BytesToHash ( key ) , storageRoots [ common . BytesToHash ( key ) ] )
trie , err := trie . New ( id , db )
2022-08-04 08:03:20 +00:00
if err != nil {
panic ( err )
}
storageTries [ common . BytesToHash ( key ) ] = trie
}
2022-11-28 13:31:28 +00:00
return db . Scheme ( ) , accTrie , entries , storageTries , storageEntries
2021-01-25 06:17:05 +00:00
}
2021-03-24 14:33:34 +00:00
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
2023-06-19 21:38:57 +00:00
func makeStorageTrieWithSeed ( owner common . Hash , n , seed uint64 , db * trie . Database ) ( common . Hash , * trienode . NodeSet , [ ] * kv ) {
2023-05-11 07:19:42 +00:00
trie , _ := trie . New ( trie . StorageTrieID ( types . EmptyRootHash , owner , types . EmptyRootHash ) , db )
2023-06-19 21:38:57 +00:00
var entries [ ] * kv
2021-03-24 14:33:34 +00:00
for i := uint64 ( 1 ) ; i <= n ; i ++ {
// store 'x' at slot 'x'
slotValue := key32 ( i + seed )
2021-01-25 06:17:05 +00:00
rlpSlotValue , _ := rlp . EncodeToBytes ( common . TrimLeftZeroes ( slotValue [ : ] ) )
slotKey := key32 ( i )
key := crypto . Keccak256Hash ( slotKey [ : ] )
2021-03-24 14:33:34 +00:00
elem := & kv { key [ : ] , rlpSlotValue }
2023-04-20 10:57:24 +00:00
trie . MustUpdate ( elem . k , elem . v )
2021-03-24 14:33:34 +00:00
entries = append ( entries , elem )
}
2023-06-19 21:38:57 +00:00
slices . SortFunc ( entries , ( * kv ) . less )
2023-06-27 12:36:38 +00:00
root , nodes , _ := trie . Commit ( false )
2022-08-04 08:03:20 +00:00
return root , nodes , entries
2021-03-24 14:33:34 +00:00
}
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have
// boundary hash.
2023-06-19 21:38:57 +00:00
func makeBoundaryStorageTrie ( owner common . Hash , n int , db * trie . Database ) ( common . Hash , * trienode . NodeSet , [ ] * kv ) {
2021-03-24 14:33:34 +00:00
var (
2023-06-19 21:38:57 +00:00
entries [ ] * kv
2021-03-24 14:33:34 +00:00
boundaries [ ] common . Hash
2023-05-11 07:19:42 +00:00
trie , _ = trie . New ( trie . StorageTrieID ( types . EmptyRootHash , owner , types . EmptyRootHash ) , db )
2021-03-24 14:33:34 +00:00
)
// Initialize boundaries
var next common . Hash
step := new ( big . Int ) . Sub (
new ( big . Int ) . Div (
new ( big . Int ) . Exp ( common . Big2 , common . Big256 , nil ) ,
2021-04-27 14:19:59 +00:00
big . NewInt ( int64 ( accountConcurrency ) ) ,
2021-03-24 14:33:34 +00:00
) , common . Big1 ,
)
for i := 0 ; i < accountConcurrency ; i ++ {
last := common . BigToHash ( new ( big . Int ) . Add ( next . Big ( ) , step ) )
if i == accountConcurrency - 1 {
last = common . HexToHash ( "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )
}
boundaries = append ( boundaries , last )
next = common . BigToHash ( new ( big . Int ) . Add ( last . Big ( ) , common . Big1 ) )
}
// Fill boundary slots
for i := 0 ; i < len ( boundaries ) ; i ++ {
key := boundaries [ i ]
val := [ ] byte { 0xde , 0xad , 0xbe , 0xef }
elem := & kv { key [ : ] , val }
2023-04-20 10:57:24 +00:00
trie . MustUpdate ( elem . k , elem . v )
2021-03-24 14:33:34 +00:00
entries = append ( entries , elem )
}
// Fill other slots if required
for i := uint64 ( 1 ) ; i <= uint64 ( n ) ; i ++ {
slotKey := key32 ( i )
key := crypto . Keccak256Hash ( slotKey [ : ] )
slotValue := key32 ( i )
rlpSlotValue , _ := rlp . EncodeToBytes ( common . TrimLeftZeroes ( slotValue [ : ] ) )
elem := & kv { key [ : ] , rlpSlotValue }
2023-04-20 10:57:24 +00:00
trie . MustUpdate ( elem . k , elem . v )
2021-01-25 06:17:05 +00:00
entries = append ( entries , elem )
}
2023-06-19 21:38:57 +00:00
slices . SortFunc ( entries , ( * kv ) . less )
2023-06-27 12:36:38 +00:00
root , nodes , _ := trie . Commit ( false )
2022-08-04 08:03:20 +00:00
return root , nodes , entries
2021-01-25 06:17:05 +00:00
}
2021-03-24 14:33:34 +00:00
func verifyTrie ( db ethdb . KeyValueStore , root common . Hash , t * testing . T ) {
t . Helper ( )
2022-11-28 13:31:28 +00:00
triedb := trie . NewDatabase ( rawdb . NewDatabase ( db ) )
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 08:01:02 +00:00
accTrie , err := trie . New ( trie . StateTrieID ( root ) , triedb )
2021-03-24 14:33:34 +00:00
if err != nil {
t . Fatal ( err )
}
accounts , slots := 0 , 0
cmd, core/state, eth, tests, trie: improve state reader (#27428)
The state availability is checked during the creation of a state reader.
- In hash-based database, if the specified root node does not exist on disk disk, then
the state reader won't be created and an error will be returned.
- In path-based database, if the specified state layer is not available, then the
state reader won't be created and an error will be returned.
This change also contains a stricter semantics regarding the `Commit` operation: once it has been performed, the trie is no longer usable, and certain operations will return an error.
2023-06-20 19:31:45 +00:00
accIt := trie . NewIterator ( accTrie . MustNodeIterator ( nil ) )
2021-03-24 14:33:34 +00:00
for accIt . Next ( ) {
var acc struct {
Nonce uint64
Balance * big . Int
Root common . Hash
CodeHash [ ] byte
}
if err := rlp . DecodeBytes ( accIt . Value , & acc ) ; err != nil {
log . Crit ( "Invalid account encountered during snapshot creation" , "err" , err )
}
accounts ++
2023-02-21 11:12:27 +00:00
if acc . Root != types . EmptyRootHash {
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 08:01:02 +00:00
id := trie . StorageTrieID ( root , common . BytesToHash ( accIt . Key ) , acc . Root )
storeTrie , err := trie . NewStateTrie ( id , triedb )
2021-03-24 14:33:34 +00:00
if err != nil {
t . Fatal ( err )
}
cmd, core/state, eth, tests, trie: improve state reader (#27428)
The state availability is checked during the creation of a state reader.
- In hash-based database, if the specified root node does not exist on disk disk, then
the state reader won't be created and an error will be returned.
- In path-based database, if the specified state layer is not available, then the
state reader won't be created and an error will be returned.
This change also contains a stricter semantics regarding the `Commit` operation: once it has been performed, the trie is no longer usable, and certain operations will return an error.
2023-06-20 19:31:45 +00:00
storeIt := trie . NewIterator ( storeTrie . MustNodeIterator ( nil ) )
2021-03-24 14:33:34 +00:00
for storeIt . Next ( ) {
slots ++
}
if err := storeIt . Err ; err != nil {
t . Fatal ( err )
}
}
}
if err := accIt . Err ; err != nil {
t . Fatal ( err )
}
t . Logf ( "accounts: %d, slots: %d" , accounts , slots )
}
2021-04-27 14:19:59 +00:00
// TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
// state healing
func TestSyncAccountPerformance ( t * testing . T ) {
// Set the account concurrency to 1. This _should_ result in the
// range root to become correct, and there should be no healing needed
defer func ( old int ) { accountConcurrency = old } ( accountConcurrency )
accountConcurrency = 1
var (
once sync . Once
cancel = make ( chan struct { } )
term = func ( ) {
once . Do ( func ( ) {
close ( cancel )
} )
}
)
2022-11-28 13:31:28 +00:00
nodeScheme , sourceAccountTrie , elems := makeAccountTrieNoStorage ( 100 )
2021-04-27 14:19:59 +00:00
mkSource := func ( name string ) * testPeer {
source := newTestPeer ( name , t , term )
2022-11-28 13:31:28 +00:00
source . accountTrie = sourceAccountTrie . Copy ( )
2021-04-27 14:19:59 +00:00
source . accountValues = elems
return source
}
src := mkSource ( "source" )
2022-11-28 13:31:28 +00:00
syncer := setupSyncer ( nodeScheme , src )
2021-04-27 14:19:59 +00:00
if err := syncer . Sync ( sourceAccountTrie . Hash ( ) , cancel ) ; err != nil {
t . Fatalf ( "sync failed: %v" , err )
}
verifyTrie ( syncer . db , sourceAccountTrie . Hash ( ) , t )
// The trie root will always be requested, since it is added when the snap
// sync cycle starts. When popping the queue, we do not look it up again.
// Doing so would bring this number down to zero in this artificial testcase,
// but only add extra IO for no reason in practice.
if have , want := src . nTrienodeRequests , 1 ; have != want {
2022-06-13 14:24:45 +00:00
fmt . Print ( src . Stats ( ) )
2021-04-27 14:19:59 +00:00
t . Errorf ( "trie node heal requests wrong, want %d, have %d" , want , have )
}
}
func TestSlotEstimation ( t * testing . T ) {
for i , tc := range [ ] struct {
last common . Hash
count int
want uint64
} {
{
// Half the space
common . HexToHash ( "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" ) ,
100 ,
100 ,
} ,
{
// 1 / 16th
common . HexToHash ( "0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" ) ,
100 ,
1500 ,
} ,
{
// Bit more than 1 / 16th
common . HexToHash ( "0x1000000000000000000000000000000000000000000000000000000000000000" ) ,
100 ,
1499 ,
} ,
{
// Almost everything
common . HexToHash ( "0xF000000000000000000000000000000000000000000000000000000000000000" ) ,
100 ,
6 ,
} ,
{
// Almost nothing -- should lead to error
common . HexToHash ( "0x0000000000000000000000000000000000000000000000000000000000000001" ) ,
1 ,
0 ,
} ,
{
// Nothing -- should lead to error
common . Hash { } ,
100 ,
0 ,
} ,
} {
have , _ := estimateRemainingSlots ( tc . count , tc . last )
if want := tc . want ; have != want {
t . Errorf ( "test %d: have %d want %d" , i , have , want )
}
}
}