eth/protocols/snap, eth/downloader: don't use bloom filter in snap sync

This commit is contained in:
Martin Holst Swende 2021-03-17 09:36:34 +01:00
parent 91726e8aad
commit 410089afea
No known key found for this signature in database
GPG Key ID: 683B438C05A5DDF0
4 changed files with 11 additions and 12 deletions

View File

@ -240,7 +240,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
headerProcCh: make(chan []*types.Header, 1), headerProcCh: make(chan []*types.Header, 1),
quitCh: make(chan struct{}), quitCh: make(chan struct{}),
stateCh: make(chan dataPack), stateCh: make(chan dataPack),
SnapSyncer: snap.NewSyncer(stateDb, stateBloom), SnapSyncer: snap.NewSyncer(stateDb),
stateSyncStart: make(chan *stateSync), stateSyncStart: make(chan *stateSync),
syncStatsState: stateSyncStats{ syncStatsState: stateSyncStats{
processed: rawdb.ReadFastTrieProgress(stateDb), processed: rawdb.ReadFastTrieProgress(stateDb),

View File

@ -177,7 +177,11 @@ func newHandler(config *handlerConfig) (*handler, error) {
// Construct the downloader (long sync) and its backing state bloom if fast // Construct the downloader (long sync) and its backing state bloom if fast
// sync is requested. The downloader is responsible for deallocating the state // sync is requested. The downloader is responsible for deallocating the state
// bloom when it's done. // bloom when it's done.
if atomic.LoadUint32(&h.fastSync) == 1 { // Note: we don't enable it if snap-sync is performed, since it's very heavy
// and the heal-portion of the snap sync is much lighter than fast. What we particularly
// want to avoid, is a 90%-finished (but restarted) snap-sync to begin
// indexing the entire trie
if atomic.LoadUint32(&h.fastSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 {
h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database) h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database)
} }
h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer) h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer)

View File

@ -376,8 +376,7 @@ type SyncPeer interface {
// - The peer delivers a stale response after a previous timeout // - The peer delivers a stale response after a previous timeout
// - The peer delivers a refusal to serve the requested state // - The peer delivers a refusal to serve the requested state
type Syncer struct { type Syncer struct {
db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
bloom *trie.SyncBloom // Bloom filter to deduplicate nodes for state fixup
root common.Hash // Current state trie root being synced root common.Hash // Current state trie root being synced
tasks []*accountTask // Current account task set being synced tasks []*accountTask // Current account task set being synced
@ -446,10 +445,9 @@ type Syncer struct {
// NewSyncer creates a new snapshot syncer to download the Ethereum state over the // NewSyncer creates a new snapshot syncer to download the Ethereum state over the
// snap protocol. // snap protocol.
func NewSyncer(db ethdb.KeyValueStore, bloom *trie.SyncBloom) *Syncer { func NewSyncer(db ethdb.KeyValueStore) *Syncer {
return &Syncer{ return &Syncer{
db: db, db: db,
bloom: bloom,
peers: make(map[string]SyncPeer), peers: make(map[string]SyncPeer),
peerJoin: new(event.Feed), peerJoin: new(event.Feed),
@ -546,7 +544,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.lock.Lock() s.lock.Lock()
s.root = root s.root = root
s.healer = &healTask{ s.healer = &healTask{
scheduler: state.NewStateSync(root, s.db, s.bloom), scheduler: state.NewStateSync(root, s.db, nil),
trieTasks: make(map[common.Hash]trie.SyncPath), trieTasks: make(map[common.Hash]trie.SyncPath),
codeTasks: make(map[common.Hash]struct{}), codeTasks: make(map[common.Hash]struct{}),
} }
@ -1660,7 +1658,6 @@ func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {
bytes += common.StorageSize(len(code)) bytes += common.StorageSize(len(code))
rawdb.WriteCode(batch, hash, code) rawdb.WriteCode(batch, hash, code)
s.bloom.Add(hash[:])
} }
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
log.Crit("Failed to persist bytecodes", "err", err) log.Crit("Failed to persist bytecodes", "err", err)
@ -1796,7 +1793,6 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
} }
// Node is not a boundary, persist to disk // Node is not a boundary, persist to disk
batch.Put(it.Key(), it.Value()) batch.Put(it.Key(), it.Value())
s.bloom.Add(it.Key())
bytes += common.StorageSize(common.HashLength + len(it.Value())) bytes += common.StorageSize(common.HashLength + len(it.Value()))
nodes++ nodes++
@ -1953,7 +1949,6 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
} }
// Node is neither a boundary, not an incomplete account, persist to disk // Node is neither a boundary, not an incomplete account, persist to disk
batch.Put(it.Key(), it.Value()) batch.Put(it.Key(), it.Value())
s.bloom.Add(it.Key())
bytes += common.StorageSize(common.HashLength + len(it.Value())) bytes += common.StorageSize(common.HashLength + len(it.Value()))
nodes++ nodes++

View File

@ -525,7 +525,7 @@ func TestSyncBloatedProof(t *testing.T) {
func setupSyncer(peers ...*testPeer) *Syncer { func setupSyncer(peers ...*testPeer) *Syncer {
stateDb := rawdb.NewMemoryDatabase() stateDb := rawdb.NewMemoryDatabase()
syncer := NewSyncer(stateDb, trie.NewSyncBloom(1, stateDb)) syncer := NewSyncer(stateDb)
for _, peer := range peers { for _, peer := range peers {
syncer.Register(peer) syncer.Register(peer)
peer.remote = syncer peer.remote = syncer