handle updated accounts in shared map
there's a possibly (though yet unseen) that an updated account could be moved outside the domain of a bounded iterater, in which case it would not see the update after traversal. previously this would have caused an error, but this should prevent it from happening.
This commit is contained in:
parent
6ebf8471fb
commit
1d84d12f75
101
builder.go
101
builder.go
@ -69,8 +69,20 @@ type accountUpdate struct {
|
|||||||
new sdtypes.AccountWrapper
|
new sdtypes.AccountWrapper
|
||||||
oldRoot common.Hash
|
oldRoot common.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
type accountUpdateMap map[string]*accountUpdate
|
type accountUpdateMap map[string]*accountUpdate
|
||||||
|
|
||||||
|
type accountUpdateLens struct {
|
||||||
|
state accountUpdateMap
|
||||||
|
sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *accountUpdateLens) update(fn func(accountUpdateMap)) {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
fn(l.state)
|
||||||
|
}
|
||||||
|
|
||||||
func appender[T any](to *[]T) func(T) error {
|
func appender[T any](to *[]T) func(T) error {
|
||||||
return func(a T) error {
|
return func(a T) error {
|
||||||
*to = append(*to, a)
|
*to = append(*to, a)
|
||||||
@ -141,8 +153,11 @@ func (sdb *builder) WriteStateDiff(
|
|||||||
subitersA := iterutils.SubtrieIterators(triea.NodeIterator, uint(sdb.subtrieWorkers))
|
subitersA := iterutils.SubtrieIterators(triea.NodeIterator, uint(sdb.subtrieWorkers))
|
||||||
subitersB := iterutils.SubtrieIterators(trieb.NodeIterator, uint(sdb.subtrieWorkers))
|
subitersB := iterutils.SubtrieIterators(trieb.NodeIterator, uint(sdb.subtrieWorkers))
|
||||||
|
|
||||||
|
updates := accountUpdateLens{
|
||||||
|
state: make(accountUpdateMap),
|
||||||
|
}
|
||||||
logger := log.New("hash", args.BlockHash, "number", args.BlockNumber)
|
logger := log.New("hash", args.BlockHash, "number", args.BlockNumber)
|
||||||
// errgroup will cancel if any group fails
|
// errgroup will cancel if any worker fails
|
||||||
g, ctx := errgroup.WithContext(context.Background())
|
g, ctx := errgroup.WithContext(context.Background())
|
||||||
for i := uint(0); i < sdb.subtrieWorkers; i++ {
|
for i := uint(0); i < sdb.subtrieWorkers; i++ {
|
||||||
func(subdiv uint) {
|
func(subdiv uint) {
|
||||||
@ -152,12 +167,35 @@ func (sdb *builder) WriteStateDiff(
|
|||||||
return sdb.processAccounts(ctx,
|
return sdb.processAccounts(ctx,
|
||||||
it, &it.SymmDiffState,
|
it, &it.SymmDiffState,
|
||||||
params.watchedAddressesLeafPaths,
|
params.watchedAddressesLeafPaths,
|
||||||
nodeSink, ipldSink, logger,
|
nodeSink, ipldSink, &updates,
|
||||||
|
logger,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
return g.Wait()
|
|
||||||
|
if err = g.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, update := range updates.state {
|
||||||
|
var storageDiff []sdtypes.StorageLeafNode
|
||||||
|
err := sdb.processStorageUpdates(
|
||||||
|
update.oldRoot, update.new.Account.Root,
|
||||||
|
appender(&storageDiff), ipldSink,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error processing incremental storage diffs for account with leafkey %x\r\nerror: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = nodeSink(sdtypes.StateLeafNode{
|
||||||
|
AccountWrapper: update.new,
|
||||||
|
StorageDiff: storageDiff,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteStateDiff writes a statediff object to output sinks
|
// WriteStateDiff writes a statediff object to output sinks
|
||||||
@ -191,7 +229,11 @@ func (sdb *builder) WriteStateSnapshot(
|
|||||||
subiters[i] = tracker.Tracked(subiters[i])
|
subiters[i] = tracker.Tracked(subiters[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// errgroup will cancel if any group fails
|
updates := accountUpdateLens{
|
||||||
|
state: make(accountUpdateMap),
|
||||||
|
}
|
||||||
|
|
||||||
|
// errgroup will cancel if any worker fails
|
||||||
g, ctx := errgroup.WithContext(context.Background())
|
g, ctx := errgroup.WithContext(context.Background())
|
||||||
for i := range subiters {
|
for i := range subiters {
|
||||||
func(subdiv uint) {
|
func(subdiv uint) {
|
||||||
@ -200,7 +242,8 @@ func (sdb *builder) WriteStateSnapshot(
|
|||||||
return sdb.processAccounts(ctx,
|
return sdb.processAccounts(ctx,
|
||||||
subiters[subdiv], &symdiff,
|
subiters[subdiv], &symdiff,
|
||||||
params.watchedAddressesLeafPaths,
|
params.watchedAddressesLeafPaths,
|
||||||
nodeSink, ipldSink, log.DefaultLogger,
|
nodeSink, ipldSink, &updates,
|
||||||
|
log.DefaultLogger,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
}(uint(i))
|
}(uint(i))
|
||||||
@ -215,13 +258,13 @@ func (sdb *builder) processAccounts(
|
|||||||
it trie.NodeIterator, symdiff *utils.SymmDiffState,
|
it trie.NodeIterator, symdiff *utils.SymmDiffState,
|
||||||
watchedAddressesLeafPaths [][]byte,
|
watchedAddressesLeafPaths [][]byte,
|
||||||
nodeSink sdtypes.StateNodeSink, ipldSink sdtypes.IPLDSink,
|
nodeSink sdtypes.StateNodeSink, ipldSink sdtypes.IPLDSink,
|
||||||
|
updateLens *accountUpdateLens,
|
||||||
logger log.Logger,
|
logger log.Logger,
|
||||||
) error {
|
) error {
|
||||||
logger.Trace("statediff/processAccounts BEGIN")
|
logger.Trace("statediff/processAccounts BEGIN")
|
||||||
defer metrics.ReportAndUpdateDuration("statediff/processAccounts END",
|
defer metrics.ReportAndUpdateDuration("statediff/processAccounts END",
|
||||||
time.Now(), logger, metrics.IndexerMetrics.ProcessAccountsTimer)
|
time.Now(), logger, metrics.IndexerMetrics.ProcessAccountsTimer)
|
||||||
|
|
||||||
updates := make(accountUpdateMap)
|
|
||||||
// Cache the RLP of the previous node. When we hit a value node this will be the parent blob.
|
// Cache the RLP of the previous node. When we hit a value node this will be the parent blob.
|
||||||
var prevBlob = it.NodeBlob()
|
var prevBlob = it.NodeBlob()
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
@ -245,12 +288,14 @@ func (sdb *builder) processAccounts(
|
|||||||
copy(leafKey, it.LeafKey())
|
copy(leafKey, it.LeafKey())
|
||||||
|
|
||||||
if symdiff.CommonPath() {
|
if symdiff.CommonPath() {
|
||||||
// If B also contains this leaf node, this is the old state of an updated account.
|
updateLens.update(func(updates accountUpdateMap) {
|
||||||
if update, ok := updates[string(leafKey)]; ok {
|
// If B also contains this leaf node, this is the old state of an updated account.
|
||||||
update.oldRoot = account.Root
|
if update, ok := updates[string(leafKey)]; ok {
|
||||||
} else {
|
update.oldRoot = account.Root
|
||||||
updates[string(leafKey)] = &accountUpdate{oldRoot: account.Root}
|
} else {
|
||||||
}
|
updates[string(leafKey)] = &accountUpdate{oldRoot: account.Root}
|
||||||
|
}
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
// This node was removed, meaning the account was deleted. Emit empty
|
// This node was removed, meaning the account was deleted. Emit empty
|
||||||
// "removed" records for the state node and all storage all storage slots.
|
// "removed" records for the state node and all storage all storage slots.
|
||||||
@ -270,12 +315,14 @@ func (sdb *builder) processAccounts(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if symdiff.CommonPath() {
|
if symdiff.CommonPath() {
|
||||||
// If A also contains this leaf node, this is the new state of an updated account.
|
updateLens.update(func(updates accountUpdateMap) {
|
||||||
if update, ok := updates[string(accountW.LeafKey)]; ok {
|
// If A also contains this leaf node, this is the new state of an updated account.
|
||||||
update.new = *accountW
|
if update, ok := updates[string(accountW.LeafKey)]; ok {
|
||||||
} else {
|
update.new = *accountW
|
||||||
updates[string(accountW.LeafKey)] = &accountUpdate{new: *accountW}
|
} else {
|
||||||
}
|
updates[string(accountW.LeafKey)] = &accountUpdate{new: *accountW}
|
||||||
|
}
|
||||||
|
})
|
||||||
} else { // account was created
|
} else { // account was created
|
||||||
err := sdb.processAccountCreation(accountW, ipldSink, nodeSink)
|
err := sdb.processAccountCreation(accountW, ipldSink, nodeSink)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -318,24 +365,6 @@ func (sdb *builder) processAccounts(
|
|||||||
}
|
}
|
||||||
prevBlob = nodeVal
|
prevBlob = nodeVal
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, update := range updates {
|
|
||||||
var storageDiff []sdtypes.StorageLeafNode
|
|
||||||
err := sdb.processStorageUpdates(
|
|
||||||
update.oldRoot, update.new.Account.Root,
|
|
||||||
appender(&storageDiff), ipldSink,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error processing incremental storage diffs for account with leafkey %x\r\nerror: %w", key, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = nodeSink(sdtypes.StateLeafNode{
|
|
||||||
AccountWrapper: update.new,
|
|
||||||
StorageDiff: storageDiff,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return it.Error()
|
return it.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user