Index Removed storage diffs on contract destruction
This commit is contained in:
parent
2aaf6bcda3
commit
4855d9304e
@ -202,7 +202,8 @@ func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args types2.StateRo
|
|||||||
// a map of their leafkey to all the accounts that were touched and exist at A
|
// a map of their leafkey to all the accounts that were touched and exist at A
|
||||||
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
||||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||||
diffPathsAtB, params.watchedAddressesLeafKeys, output)
|
diffPathsAtB, params.watchedAddressesLeafKeys,
|
||||||
|
params.IntermediateStorageNodes, output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
||||||
}
|
}
|
||||||
@ -256,7 +257,8 @@ func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args types2.Stat
|
|||||||
// a map of their leafkey to all the accounts that were touched and exist at A
|
// a map of their leafkey to all the accounts that were touched and exist at A
|
||||||
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
||||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||||
diffPathsAtB, params.watchedAddressesLeafKeys, output)
|
diffPathsAtB, params.watchedAddressesLeafKeys,
|
||||||
|
params.IntermediateStorageNodes, output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
||||||
}
|
}
|
||||||
@ -386,7 +388,7 @@ func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIt
|
|||||||
|
|
||||||
// deletedOrUpdatedState returns a slice of all the pathes that are emptied at B
|
// deletedOrUpdatedState returns a slice of all the pathes that are emptied at B
|
||||||
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
||||||
func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedAddressesLeafKeys map[common.Hash]struct{}, output types2.StateNodeSink) (types2.AccountMap, error) {
|
func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedAddressesLeafKeys map[common.Hash]struct{}, intermediateStorageNodes bool, output types2.StateNodeSink) (types2.AccountMap, error) {
|
||||||
diffAccountAtA := make(types2.AccountMap)
|
diffAccountAtA := make(types2.AccountMap)
|
||||||
it, _ := trie.NewDifferenceIterator(b, a)
|
it, _ := trie.NewDifferenceIterator(b, a)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
@ -420,13 +422,23 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m
|
|||||||
// if this node's path did not show up in diffPathsAtB
|
// if this node's path did not show up in diffPathsAtB
|
||||||
// that means the node at this path was deleted (or moved) in B
|
// that means the node at this path was deleted (or moved) in B
|
||||||
// emit an empty "removed" diff to signify as such
|
// emit an empty "removed" diff to signify as such
|
||||||
|
// emit emtpy "removed" diff for all storage nodes
|
||||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
||||||
if err := output(types2.StateNode{
|
diff := types2.StateNode{
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: []byte{},
|
|
||||||
NodeType: types2.Removed,
|
NodeType: types2.Removed,
|
||||||
|
Path: node.Path,
|
||||||
LeafKey: leafKey,
|
LeafKey: leafKey,
|
||||||
}); err != nil {
|
NodeValue: []byte{},
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageDiffs []types2.StorageNode
|
||||||
|
err := sdb.buildRemovedAccountStorageNodes(account.Root, intermediateStorageNodes, storageNodeAppender(&storageDiffs))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed building storage diffs for removed node %x\r\nerror: %v", node.Path, err)
|
||||||
|
}
|
||||||
|
diff.StorageNodes = storageDiffs
|
||||||
|
|
||||||
|
if err := output(diff); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -548,7 +560,6 @@ func (sdb *builder) buildStorageNodesEventual(sr common.Hash, intermediateNodes
|
|||||||
}
|
}
|
||||||
|
|
||||||
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator
|
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator
|
||||||
// if any storage keys are provided it will only return those leaf nodes
|
|
||||||
// including intermediate nodes can be turned on or off
|
// including intermediate nodes can be turned on or off
|
||||||
func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, intermediateNodes bool, output types2.StorageNodeSink) error {
|
func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
@ -591,6 +602,68 @@ func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, intermediate
|
|||||||
return it.Error()
|
return it.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// buildRemovedAccountStorageNodes builds the "removed" diffs for all the storage nodes for a destroyed account
|
||||||
|
func (sdb *builder) buildRemovedAccountStorageNodes(sr common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
|
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
log.Debug("Storage Root For Removed Diffs", "root", sr.Hex())
|
||||||
|
sTrie, err := sdb.stateCache.OpenTrie(sr)
|
||||||
|
if err != nil {
|
||||||
|
log.Info("error in build removed account storage diffs", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
it := sTrie.NodeIterator(make([]byte, 0))
|
||||||
|
err = sdb.buildRemovedStorageNodesFromTrie(it, intermediateNodes, output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildRemovedStorageNodesFromTrie returns diffs for all the storage nodes in the provided node interator
|
||||||
|
// including intermediate nodes can be turned on or off
|
||||||
|
func (sdb *builder) buildRemovedStorageNodesFromTrie(it trie.NodeIterator, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
|
for it.Next(true) {
|
||||||
|
// skip value nodes
|
||||||
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch node.NodeType {
|
||||||
|
case types2.Leaf:
|
||||||
|
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||||
|
valueNodePath := append(node.Path, partialPath...)
|
||||||
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
|
leafKey := encodedPath[1:]
|
||||||
|
if err := output(types2.StorageNode{
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
Path: node.Path,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
LeafKey: leafKey,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case types2.Extension, types2.Branch:
|
||||||
|
if intermediateNodes {
|
||||||
|
if err := output(types2.StorageNode{
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
Path: node.Path,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return it.Error()
|
||||||
|
}
|
||||||
|
|
||||||
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
||||||
func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
|
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
|
||||||
|
@ -382,10 +382,11 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||||
}
|
}
|
||||||
// publish the state node
|
// publish the state node
|
||||||
|
var stateModel models.StateNodeModel
|
||||||
if stateNode.NodeType == sdtypes.Removed {
|
if stateNode.NodeType == sdtypes.Removed {
|
||||||
// short circuit if it is a Removed node
|
// short circuit if it is a Removed node
|
||||||
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
||||||
stateModel := models.StateNodeModel{
|
stateModel = models.StateNodeModel{
|
||||||
HeaderID: headerID,
|
HeaderID: headerID,
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
@ -393,14 +394,12 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
MhKey: shared.RemovedNodeMhKey,
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprintf(sdi.dump, "%+v\r\n", stateModel)
|
} else {
|
||||||
return err
|
|
||||||
}
|
|
||||||
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
||||||
}
|
}
|
||||||
stateModel := models.StateNodeModel{
|
stateModel = models.StateNodeModel{
|
||||||
HeaderID: headerID,
|
HeaderID: headerID,
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
@ -408,10 +407,13 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
MhKey: stateMhKey,
|
MhKey: stateMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// index the state node, collect the stateID to reference by FK
|
// index the state node, collect the stateID to reference by FK
|
||||||
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", stateModel); err != nil {
|
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", stateModel); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we have a leaf, decode and index the account data
|
// if we have a leaf, decode and index the account data
|
||||||
if stateNode.NodeType == sdtypes.Leaf {
|
if stateNode.NodeType == sdtypes.Leaf {
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
@ -437,6 +439,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there are any storage nodes associated with this node, publish and index them
|
// if there are any storage nodes associated with this node, publish and index them
|
||||||
for _, storageNode := range stateNode.StorageNodes {
|
for _, storageNode := range stateNode.StorageNodes {
|
||||||
if storageNode.NodeType == sdtypes.Removed {
|
if storageNode.NodeType == sdtypes.Removed {
|
||||||
|
@ -392,10 +392,11 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
|
|||||||
// PushStateNode writes a state diff node object (including any child storage nodes) IPLD insert SQL stmt to a file
|
// PushStateNode writes a state diff node object (including any child storage nodes) IPLD insert SQL stmt to a file
|
||||||
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
|
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
|
||||||
// publish the state node
|
// publish the state node
|
||||||
|
var stateModel models.StateNodeModel
|
||||||
if stateNode.NodeType == sdtypes.Removed {
|
if stateNode.NodeType == sdtypes.Removed {
|
||||||
// short circuit if it is a Removed node
|
// short circuit if it is a Removed node
|
||||||
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
||||||
stateModel := models.StateNodeModel{
|
stateModel = models.StateNodeModel{
|
||||||
HeaderID: headerID,
|
HeaderID: headerID,
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
@ -403,14 +404,12 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
MhKey: shared.RemovedNodeMhKey,
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
sdi.fileWriter.upsertStateCID(stateModel)
|
} else {
|
||||||
return nil
|
|
||||||
}
|
|
||||||
stateCIDStr, stateMhKey, err := sdi.fileWriter.upsertIPLDRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
stateCIDStr, stateMhKey, err := sdi.fileWriter.upsertIPLDRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
||||||
}
|
}
|
||||||
stateModel := models.StateNodeModel{
|
stateModel = models.StateNodeModel{
|
||||||
HeaderID: headerID,
|
HeaderID: headerID,
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
@ -418,8 +417,11 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
MhKey: stateMhKey,
|
MhKey: stateMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// index the state node
|
// index the state node
|
||||||
sdi.fileWriter.upsertStateCID(stateModel)
|
sdi.fileWriter.upsertStateCID(stateModel)
|
||||||
|
|
||||||
// if we have a leaf, decode and index the account data
|
// if we have a leaf, decode and index the account data
|
||||||
if stateNode.NodeType == sdtypes.Leaf {
|
if stateNode.NodeType == sdtypes.Leaf {
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
@ -443,6 +445,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
}
|
}
|
||||||
sdi.fileWriter.upsertStateAccount(accountModel)
|
sdi.fileWriter.upsertStateAccount(accountModel)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there are any storage nodes associated with this node, publish and index them
|
// if there are any storage nodes associated with this node, publish and index them
|
||||||
for _, storageNode := range stateNode.StorageNodes {
|
for _, storageNode := range stateNode.StorageNodes {
|
||||||
if storageNode.NodeType == sdtypes.Removed {
|
if storageNode.NodeType == sdtypes.Removed {
|
||||||
|
@ -440,10 +440,11 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||||
}
|
}
|
||||||
// publish the state node
|
// publish the state node
|
||||||
|
var stateModel models.StateNodeModel
|
||||||
if stateNode.NodeType == sdtypes.Removed {
|
if stateNode.NodeType == sdtypes.Removed {
|
||||||
// short circuit if it is a Removed node
|
// short circuit if it is a Removed node
|
||||||
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
||||||
stateModel := models.StateNodeModel{
|
stateModel = models.StateNodeModel{
|
||||||
HeaderID: headerID,
|
HeaderID: headerID,
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
@ -451,13 +452,12 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
MhKey: shared.RemovedNodeMhKey,
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
return sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel)
|
} else {
|
||||||
}
|
|
||||||
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
||||||
}
|
}
|
||||||
stateModel := models.StateNodeModel{
|
stateModel = models.StateNodeModel{
|
||||||
HeaderID: headerID,
|
HeaderID: headerID,
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
@ -465,10 +465,13 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
MhKey: stateMhKey,
|
MhKey: stateMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// index the state node
|
// index the state node
|
||||||
if err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel); err != nil {
|
if err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we have a leaf, decode and index the account data
|
// if we have a leaf, decode and index the account data
|
||||||
if stateNode.NodeType == sdtypes.Leaf {
|
if stateNode.NodeType == sdtypes.Leaf {
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
@ -494,6 +497,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there are any storage nodes associated with this node, publish and index them
|
// if there are any storage nodes associated with this node, publish and index them
|
||||||
for _, storageNode := range stateNode.StorageNodes {
|
for _, storageNode := range stateNode.StorageNodes {
|
||||||
if storageNode.NodeType == sdtypes.Removed {
|
if storageNode.NodeType == sdtypes.Removed {
|
||||||
|
Loading…
Reference in New Issue
Block a user