eth/protocols/snap: fix the flaws in the snap sync (#22553)

* eth/protocols/snap: fix snap sync

* eth/protocols/snap: fix tests

* eth: fix tiny

* eth: update tests

* eth: update tests

* core/state/snapshot: testcase for #22534

* eth/protocols/snap: fix boundary loss on full-but-proven range

* core/state/snapshot: lintfix

* eth: address comment

* eth: fix handler

Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
This commit is contained in:
gary rong 2021-03-24 22:33:34 +08:00 committed by GitHub
parent ab8fd4d005
commit c5df05b9a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 703 additions and 191 deletions

View File

@ -256,8 +256,13 @@ func handleMessage(backend Backend, peer *Peer) error {
var ( var (
storage []*StorageData storage []*StorageData
last common.Hash last common.Hash
abort bool
) )
for it.Next() && size < hardLimit { for it.Next() {
if size >= hardLimit {
abort = true
break
}
hash, slot := it.Hash(), common.CopyBytes(it.Slot()) hash, slot := it.Hash(), common.CopyBytes(it.Slot())
// Track the returned interval for the Merkle proofs // Track the returned interval for the Merkle proofs
@ -280,7 +285,7 @@ func handleMessage(backend Backend, peer *Peer) error {
// Generate the Merkle proofs for the first and last storage slot, but // Generate the Merkle proofs for the first and last storage slot, but
// only if the response was capped. If the entire storage trie included // only if the response was capped. If the entire storage trie included
// in the response, no need for any proofs. // in the response, no need for any proofs.
if origin != (common.Hash{}) || size >= hardLimit { if origin != (common.Hash{}) || abort {
// Request started at a non-zero hash or was capped prematurely, add // Request started at a non-zero hash or was capped prematurely, add
// the endpoint Merkle proofs // the endpoint Merkle proofs
accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())

View File

@ -1551,7 +1551,14 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
// Ensure that the response doesn't overflow into the subsequent task // Ensure that the response doesn't overflow into the subsequent task
last := res.task.Last.Big() last := res.task.Last.Big()
for i, hash := range res.hashes { for i, hash := range res.hashes {
if hash.Big().Cmp(last) > 0 { // Mark the range complete if the last is already included.
// Keep iteration to delete the extra states if exists.
cmp := hash.Big().Cmp(last)
if cmp == 0 {
res.cont = false
continue
}
if cmp > 0 {
// Chunk overflown, cut off excess, but also update the boundary nodes // Chunk overflown, cut off excess, but also update the boundary nodes
for j := i; j < len(res.hashes); j++ { for j := i; j < len(res.hashes); j++ {
if err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil { if err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil {
@ -1758,7 +1765,14 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
// Ensure the response doesn't overflow into the subsequent task // Ensure the response doesn't overflow into the subsequent task
last := res.subTask.Last.Big() last := res.subTask.Last.Big()
for k, hash := range res.hashes[i] { for k, hash := range res.hashes[i] {
if hash.Big().Cmp(last) > 0 { // Mark the range complete if the last is already included.
// Keep iteration to delete the extra states if exists.
cmp := hash.Big().Cmp(last)
if cmp == 0 {
res.cont = false
continue
}
if cmp > 0 {
// Chunk overflown, cut off excess, but also update the boundary // Chunk overflown, cut off excess, but also update the boundary
for l := k; l < len(res.hashes[i]); l++ { for l := k; l < len(res.hashes[i]); l++ {
if err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil { if err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil {
@ -1785,11 +1799,15 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
it := res.nodes[i].NewIterator(nil, nil) it := res.nodes[i].NewIterator(nil, nil)
for it.Next() { for it.Next() {
// Boundary nodes are not written for the last result, since they are incomplete // Boundary nodes are not written for the last result, since they are incomplete
if i == len(res.hashes)-1 { if i == len(res.hashes)-1 && res.subTask != nil {
if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok { if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok {
skipped++ skipped++
continue continue
} }
if _, err := res.overflow.Get(it.Key()); err == nil {
skipped++
continue
}
} }
// Node is not a boundary, persist to disk // Node is not a boundary, persist to disk
batch.Put(it.Key(), it.Value()) batch.Put(it.Key(), it.Value())

File diff suppressed because it is too large Load Diff