eth/protocols/snap: fix snap sync failure on empty storage range (#28306)
This change addresses an issue in snap sync, specifically when the entire sync process can be halted due to an encountered empty storage range. Currently, on the snap sync client side, the response to an empty (partial) storage range is discarded as a non-delivery. However, this response can be a valid response, when the particular range requested does not contain any slots. For instance, consider a large contract where the entire key space is divided into 16 chunks, and there are no available slots in the last chunk [0xf] -> [end]. When the node receives a request for this particular range, the response includes: The proof with origin [0xf] A nil storage slot set If we simply discard this response, the finalization of the last range will be skipped, halting the entire sync process indefinitely. The test case TestSyncWithUnevenStorage can reproduce the scenario described above. In addition, this change also defines the common variables MaxAddress and MaxHash.
This commit is contained in:
parent
2f66d7c47c
commit
1cb3b6aee4
@ -1206,7 +1206,7 @@ func GenDoc(ctx *cli.Context) error {
|
|||||||
URL: accounts.URL{Path: ".. ignored .."},
|
URL: accounts.URL{Path: ".. ignored .."},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Address: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"),
|
Address: common.MaxAddress,
|
||||||
},
|
},
|
||||||
}})
|
}})
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ type accRangeTest struct {
|
|||||||
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||||
var (
|
var (
|
||||||
root = s.chain.RootAt(999)
|
root = s.chain.RootAt(999)
|
||||||
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
ffHash = common.MaxHash
|
||||||
zero = common.Hash{}
|
zero = common.Hash{}
|
||||||
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
|
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
|
||||||
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
@ -125,7 +125,7 @@ type stRangesTest struct {
|
|||||||
// TestSnapGetStorageRanges various forms of GetStorageRanges requests.
|
// TestSnapGetStorageRanges various forms of GetStorageRanges requests.
|
||||||
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
||||||
var (
|
var (
|
||||||
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
ffHash = common.MaxHash
|
||||||
zero = common.Hash{}
|
zero = common.Hash{}
|
||||||
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
||||||
|
@ -44,6 +44,12 @@ const (
|
|||||||
var (
|
var (
|
||||||
hashT = reflect.TypeOf(Hash{})
|
hashT = reflect.TypeOf(Hash{})
|
||||||
addressT = reflect.TypeOf(Address{})
|
addressT = reflect.TypeOf(Address{})
|
||||||
|
|
||||||
|
// MaxAddress represents the maximum possible address value.
|
||||||
|
MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff")
|
||||||
|
|
||||||
|
// MaxHash represents the maximum possible hash value.
|
||||||
|
MaxHash = HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
|
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
|
||||||
|
@ -132,7 +132,7 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
defer blockchain.Stop()
|
defer blockchain.Stop()
|
||||||
bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
|
bigNumber := new(big.Int).SetBytes(common.MaxHash.Bytes())
|
||||||
tooBigNumber := new(big.Int).Set(bigNumber)
|
tooBigNumber := new(big.Int).Set(bigNumber)
|
||||||
tooBigNumber.Add(tooBigNumber, common.Big1)
|
tooBigNumber.Add(tooBigNumber, common.Big1)
|
||||||
for i, tt := range []struct {
|
for i, tt := range []struct {
|
||||||
|
@ -367,7 +367,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
|
|||||||
if len(req.Origin) > 0 {
|
if len(req.Origin) > 0 {
|
||||||
origin, req.Origin = common.BytesToHash(req.Origin), nil
|
origin, req.Origin = common.BytesToHash(req.Origin), nil
|
||||||
}
|
}
|
||||||
var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
var limit = common.MaxHash
|
||||||
if len(req.Limit) > 0 {
|
if len(req.Limit) > 0 {
|
||||||
limit, req.Limit = common.BytesToHash(req.Limit), nil
|
limit, req.Limit = common.BytesToHash(req.Limit), nil
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ func (r *hashRange) End() common.Hash {
|
|||||||
// If the end overflows (non divisible range), return a shorter interval
|
// If the end overflows (non divisible range), return a shorter interval
|
||||||
next, overflow := new(uint256.Int).AddOverflow(r.current, r.step)
|
next, overflow := new(uint256.Int).AddOverflow(r.current, r.step)
|
||||||
if overflow {
|
if overflow {
|
||||||
return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
return common.MaxHash
|
||||||
}
|
}
|
||||||
return next.SubUint64(next, 1).Bytes32()
|
return next.SubUint64(next, 1).Bytes32()
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Split a divisible part of the hash range up into 2 chunks
|
// Split a divisible part of the hash range up into 2 chunks
|
||||||
@ -58,7 +58,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ends: []common.Hash{
|
ends: []common.Hash{
|
||||||
common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Split the entire hash range into a non divisible 3 chunks
|
// Split the entire hash range into a non divisible 3 chunks
|
||||||
@ -73,7 +73,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
ends: []common.Hash{
|
ends: []common.Hash{
|
||||||
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
|
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
|
||||||
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
|
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Split a part of hash range into a non divisible 3 chunks
|
// Split a part of hash range into a non divisible 3 chunks
|
||||||
@ -88,7 +88,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
ends: []common.Hash{
|
ends: []common.Hash{
|
||||||
common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
|
common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
|
||||||
common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"),
|
common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Split a part of hash range into a non divisible 3 chunks, but with a
|
// Split a part of hash range into a non divisible 3 chunks, but with a
|
||||||
@ -108,7 +108,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
ends: []common.Hash{
|
ends: []common.Hash{
|
||||||
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"),
|
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"),
|
||||||
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"),
|
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -798,7 +798,7 @@ func (s *Syncer) loadSyncStatus() {
|
|||||||
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
||||||
if i == accountConcurrency-1 {
|
if i == accountConcurrency-1 {
|
||||||
// Make sure we don't overflow if the step is not a proper divisor
|
// Make sure we don't overflow if the step is not a proper divisor
|
||||||
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
last = common.MaxHash
|
||||||
}
|
}
|
||||||
batch := ethdb.HookedBatch{
|
batch := ethdb.HookedBatch{
|
||||||
Batch: s.db.NewBatch(),
|
Batch: s.db.NewBatch(),
|
||||||
@ -1874,7 +1874,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Some accounts are incomplete, leave as is for the storage and contract
|
// Some accounts are incomplete, leave as is for the storage and contract
|
||||||
// task assigners to pick up and fill.
|
// task assigners to pick up and fill
|
||||||
}
|
}
|
||||||
|
|
||||||
// processBytecodeResponse integrates an already validated bytecode response
|
// processBytecodeResponse integrates an already validated bytecode response
|
||||||
@ -2624,7 +2624,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
|
|||||||
// the requested data. For storage range queries that means the state being
|
// the requested data. For storage range queries that means the state being
|
||||||
// retrieved was either already pruned remotely, or the peer is not yet
|
// retrieved was either already pruned remotely, or the peer is not yet
|
||||||
// synced to our head.
|
// synced to our head.
|
||||||
if len(hashes) == 0 {
|
if len(hashes) == 0 && len(proof) == 0 {
|
||||||
logger.Debug("Peer rejected storage request")
|
logger.Debug("Peer rejected storage request")
|
||||||
s.statelessPeers[peer.ID()] = struct{}{}
|
s.statelessPeers[peer.ID()] = struct{}{}
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
@ -2636,6 +2636,13 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
|
|||||||
// Reconstruct the partial tries from the response and verify them
|
// Reconstruct the partial tries from the response and verify them
|
||||||
var cont bool
|
var cont bool
|
||||||
|
|
||||||
|
// If a proof was attached while the response is empty, it indicates that the
|
||||||
|
// requested range specified with 'origin' is empty. Construct an empty state
|
||||||
|
// response locally to finalize the range.
|
||||||
|
if len(hashes) == 0 && len(proof) > 0 {
|
||||||
|
hashes = append(hashes, []common.Hash{})
|
||||||
|
slots = append(slots, [][]byte{})
|
||||||
|
}
|
||||||
for i := 0; i < len(hashes); i++ {
|
for i := 0; i < len(hashes); i++ {
|
||||||
// Convert the keys and proofs into an internal format
|
// Convert the keys and proofs into an internal format
|
||||||
keys := make([][]byte, len(hashes[i]))
|
keys := make([][]byte, len(hashes[i]))
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
mrand "math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -34,6 +35,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/testutil"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
@ -253,7 +255,7 @@ func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, orig
|
|||||||
func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
|
func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
|
||||||
var size uint64
|
var size uint64
|
||||||
if limit == (common.Hash{}) {
|
if limit == (common.Hash{}) {
|
||||||
limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
limit = common.MaxHash
|
||||||
}
|
}
|
||||||
for _, entry := range t.accountValues {
|
for _, entry := range t.accountValues {
|
||||||
if size > cap {
|
if size > cap {
|
||||||
@ -318,7 +320,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
|
|||||||
if len(origin) > 0 {
|
if len(origin) > 0 {
|
||||||
originHash = common.BytesToHash(origin)
|
originHash = common.BytesToHash(origin)
|
||||||
}
|
}
|
||||||
var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
var limitHash = common.MaxHash
|
||||||
if len(limit) > 0 {
|
if len(limit) > 0 {
|
||||||
limitHash = common.BytesToHash(limit)
|
limitHash = common.BytesToHash(limit)
|
||||||
}
|
}
|
||||||
@ -762,7 +764,7 @@ func testSyncWithStorage(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string) *testPeer {
|
mkSource := func(name string) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -772,7 +774,7 @@ func testSyncWithStorage(t *testing.T, scheme string) {
|
|||||||
source.storageValues = storageElems
|
source.storageValues = storageElems
|
||||||
return source
|
return source
|
||||||
}
|
}
|
||||||
syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
|
syncer := setupSyncer(scheme, mkSource("sourceA"))
|
||||||
done := checkStall(t, term)
|
done := checkStall(t, term)
|
||||||
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
|
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
|
||||||
t.Fatalf("sync failed: %v", err)
|
t.Fatalf("sync failed: %v", err)
|
||||||
@ -799,7 +801,7 @@ func testMultiSyncManyUseless(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -821,7 +823,7 @@ func testMultiSyncManyUseless(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("full", true, true, true),
|
mkSource("full", true, true, true),
|
||||||
mkSource("noAccounts", false, true, true),
|
mkSource("noAccounts", false, true, true),
|
||||||
mkSource("noStorage", true, false, true),
|
mkSource("noStorage", true, false, true),
|
||||||
@ -853,7 +855,7 @@ func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -875,7 +877,7 @@ func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("full", true, true, true),
|
mkSource("full", true, true, true),
|
||||||
mkSource("noAccounts", false, true, true),
|
mkSource("noAccounts", false, true, true),
|
||||||
mkSource("noStorage", true, false, true),
|
mkSource("noStorage", true, false, true),
|
||||||
@ -912,7 +914,7 @@ func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -934,7 +936,7 @@ func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("full", true, true, true),
|
mkSource("full", true, true, true),
|
||||||
mkSource("noAccounts", false, true, true),
|
mkSource("noAccounts", false, true, true),
|
||||||
mkSource("noStorage", true, false, true),
|
mkSource("noStorage", true, false, true),
|
||||||
@ -1215,7 +1217,7 @@ func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true, false)
|
||||||
|
|
||||||
mkSource := func(name string) *testPeer {
|
mkSource := func(name string) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -1226,7 +1228,7 @@ func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
|
|||||||
return source
|
return source
|
||||||
}
|
}
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("peer-a"),
|
mkSource("peer-a"),
|
||||||
mkSource("peer-b"),
|
mkSource("peer-b"),
|
||||||
)
|
)
|
||||||
@ -1257,7 +1259,7 @@ func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, slow bool) *testPeer {
|
mkSource := func(name string, slow bool) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -1273,7 +1275,7 @@ func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("nice-a", false),
|
mkSource("nice-a", false),
|
||||||
mkSource("slow", true),
|
mkSource("slow", true),
|
||||||
)
|
)
|
||||||
@ -1304,7 +1306,7 @@ func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
|
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -1317,7 +1319,7 @@ func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("nice-a", defaultStorageRequestHandler),
|
mkSource("nice-a", defaultStorageRequestHandler),
|
||||||
mkSource("nice-b", defaultStorageRequestHandler),
|
mkSource("nice-b", defaultStorageRequestHandler),
|
||||||
mkSource("nice-c", defaultStorageRequestHandler),
|
mkSource("nice-c", defaultStorageRequestHandler),
|
||||||
@ -1348,7 +1350,7 @@ func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
|
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -1360,7 +1362,7 @@ func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
|
|||||||
return source
|
return source
|
||||||
}
|
}
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("nice-a", defaultStorageRequestHandler),
|
mkSource("nice-a", defaultStorageRequestHandler),
|
||||||
mkSource("nice-b", defaultStorageRequestHandler),
|
mkSource("nice-b", defaultStorageRequestHandler),
|
||||||
mkSource("nice-c", defaultStorageRequestHandler),
|
mkSource("nice-c", defaultStorageRequestHandler),
|
||||||
@ -1413,6 +1415,45 @@ func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
|
|||||||
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
|
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestSyncWithUnevenStorage tests sync where the storage trie is not even
|
||||||
|
// and with a few empty ranges.
|
||||||
|
func TestSyncWithUnevenStorage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testSyncWithUnevenStorage(t, rawdb.HashScheme)
|
||||||
|
testSyncWithUnevenStorage(t, rawdb.PathScheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSyncWithUnevenStorage(t *testing.T, scheme string) {
|
||||||
|
var (
|
||||||
|
once sync.Once
|
||||||
|
cancel = make(chan struct{})
|
||||||
|
term = func() {
|
||||||
|
once.Do(func() {
|
||||||
|
close(cancel)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
)
|
||||||
|
accountTrie, accounts, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 256, false, false, true)
|
||||||
|
|
||||||
|
mkSource := func(name string) *testPeer {
|
||||||
|
source := newTestPeer(name, t, term)
|
||||||
|
source.accountTrie = accountTrie.Copy()
|
||||||
|
source.accountValues = accounts
|
||||||
|
source.setStorageTries(storageTries)
|
||||||
|
source.storageValues = storageElems
|
||||||
|
source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
|
||||||
|
return defaultStorageRequestHandler(t, reqId, root, accounts, origin, limit, 128) // retrieve storage in large mode
|
||||||
|
}
|
||||||
|
return source
|
||||||
|
}
|
||||||
|
syncer := setupSyncer(scheme, mkSource("source"))
|
||||||
|
if err := syncer.Sync(accountTrie.Hash(), cancel); err != nil {
|
||||||
|
t.Fatalf("sync failed: %v", err)
|
||||||
|
}
|
||||||
|
verifyTrie(scheme, syncer.db, accountTrie.Hash(), t)
|
||||||
|
}
|
||||||
|
|
||||||
type kv struct {
|
type kv struct {
|
||||||
k, v []byte
|
k, v []byte
|
||||||
}
|
}
|
||||||
@ -1511,7 +1552,7 @@ func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
|
|||||||
for i := 0; i < accountConcurrency; i++ {
|
for i := 0; i < accountConcurrency; i++ {
|
||||||
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
||||||
if i == accountConcurrency-1 {
|
if i == accountConcurrency-1 {
|
||||||
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
last = common.MaxHash
|
||||||
}
|
}
|
||||||
boundaries = append(boundaries, last)
|
boundaries = append(boundaries, last)
|
||||||
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
|
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
|
||||||
@ -1608,7 +1649,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots
|
|||||||
}
|
}
|
||||||
|
|
||||||
// makeAccountTrieWithStorage spits out a trie, along with the leafs
|
// makeAccountTrieWithStorage spits out a trie, along with the leafs
|
||||||
func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
|
func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
|
||||||
var (
|
var (
|
||||||
db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
|
db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
|
||||||
accTrie = trie.NewEmpty(db)
|
accTrie = trie.NewEmpty(db)
|
||||||
@ -1633,6 +1674,8 @@ func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, bounda
|
|||||||
)
|
)
|
||||||
if boundary {
|
if boundary {
|
||||||
stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
|
stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
|
||||||
|
} else if uneven {
|
||||||
|
stRoot, stNodes, stEntries = makeUnevenStorageTrie(common.BytesToHash(key), slots, db)
|
||||||
} else {
|
} else {
|
||||||
stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
|
stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
|
||||||
}
|
}
|
||||||
@ -1675,7 +1718,7 @@ func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, bounda
|
|||||||
}
|
}
|
||||||
storageTries[common.BytesToHash(key)] = trie
|
storageTries[common.BytesToHash(key)] = trie
|
||||||
}
|
}
|
||||||
return db.Scheme(), accTrie, entries, storageTries, storageEntries
|
return accTrie, entries, storageTries, storageEntries
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
|
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
|
||||||
@ -1721,7 +1764,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
|
|||||||
for i := 0; i < accountConcurrency; i++ {
|
for i := 0; i < accountConcurrency; i++ {
|
||||||
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
||||||
if i == accountConcurrency-1 {
|
if i == accountConcurrency-1 {
|
||||||
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
last = common.MaxHash
|
||||||
}
|
}
|
||||||
boundaries = append(boundaries, last)
|
boundaries = append(boundaries, last)
|
||||||
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
|
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
|
||||||
@ -1752,6 +1795,38 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
|
|||||||
return root, nodes, entries
|
return root, nodes, entries
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// makeUnevenStorageTrie constructs a storage tries will states distributed in
|
||||||
|
// different range unevenly.
|
||||||
|
func makeUnevenStorageTrie(owner common.Hash, slots int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
|
||||||
|
var (
|
||||||
|
entries []*kv
|
||||||
|
tr, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
|
||||||
|
chosen = make(map[byte]struct{})
|
||||||
|
)
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
var n int
|
||||||
|
for {
|
||||||
|
n = mrand.Intn(15) // the last range is set empty deliberately
|
||||||
|
if _, ok := chosen[byte(n)]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
chosen[byte(n)] = struct{}{}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for j := 0; j < slots/3; j++ {
|
||||||
|
key := append([]byte{byte(n)}, testutil.RandBytes(31)...)
|
||||||
|
val, _ := rlp.EncodeToBytes(testutil.RandBytes(32))
|
||||||
|
|
||||||
|
elem := &kv{key, val}
|
||||||
|
tr.MustUpdate(elem.k, elem.v)
|
||||||
|
entries = append(entries, elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slices.SortFunc(entries, (*kv).cmp)
|
||||||
|
root, nodes, _ := tr.Commit(false)
|
||||||
|
return root, nodes, entries
|
||||||
|
}
|
||||||
|
|
||||||
func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
|
func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
|
triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
|
||||||
|
@ -250,7 +250,7 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
|
|||||||
// Special case, two edge proofs for two edge key.
|
// Special case, two edge proofs for two edge key.
|
||||||
proof := memorydb.New()
|
proof := memorydb.New()
|
||||||
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
|
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
|
||||||
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
|
last := common.MaxHash.Bytes()
|
||||||
if err := trie.Prove(first, proof); err != nil {
|
if err := trie.Prove(first, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
@ -451,7 +451,7 @@ func TestAllElementsProof(t *testing.T) {
|
|||||||
// Even with non-existent edge proofs, it should still work.
|
// Even with non-existent edge proofs, it should still work.
|
||||||
proof = memorydb.New()
|
proof = memorydb.New()
|
||||||
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
|
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
|
||||||
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
|
last := common.MaxHash.Bytes()
|
||||||
if err := trie.Prove(first, proof); err != nil {
|
if err := trie.Prove(first, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
@ -517,7 +517,7 @@ func TestReverseSingleSideRangeProof(t *testing.T) {
|
|||||||
if err := trie.Prove(entries[pos].k, proof); err != nil {
|
if err := trie.Prove(entries[pos].k, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
last := common.MaxHash
|
||||||
if err := trie.Prove(last.Bytes(), proof); err != nil {
|
if err := trie.Prove(last.Bytes(), proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the last node %v", err)
|
||||||
}
|
}
|
||||||
@ -728,7 +728,7 @@ func TestHasRightElement(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.end == -1 {
|
if c.end == -1 {
|
||||||
lastKey, end = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), len(entries)
|
lastKey, end = common.MaxHash.Bytes(), len(entries)
|
||||||
if err := trie.Prove(lastKey, proof); err != nil {
|
if err := trie.Prove(lastKey, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user