Merge pull request #210 from deep-stack/pm-removed-storage-nodes
Add diffs for storage nodes of destroyed contracts
This commit is contained in:
commit
a9ab76cb33
@ -202,7 +202,8 @@ func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args types2.StateRo
|
|||||||
// a map of their leafkey to all the accounts that were touched and exist at A
|
// a map of their leafkey to all the accounts that were touched and exist at A
|
||||||
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
||||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||||
diffPathsAtB, params.watchedAddressesLeafKeys, output)
|
diffAccountsAtB, diffPathsAtB, params.watchedAddressesLeafKeys,
|
||||||
|
params.IntermediateStorageNodes, output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
||||||
}
|
}
|
||||||
@ -256,7 +257,8 @@ func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args types2.Stat
|
|||||||
// a map of their leafkey to all the accounts that were touched and exist at A
|
// a map of their leafkey to all the accounts that were touched and exist at A
|
||||||
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
||||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||||
diffPathsAtB, params.watchedAddressesLeafKeys, output)
|
diffAccountsAtB, diffPathsAtB, params.watchedAddressesLeafKeys,
|
||||||
|
params.IntermediateStorageNodes, output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
||||||
}
|
}
|
||||||
@ -386,7 +388,7 @@ func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIt
|
|||||||
|
|
||||||
// deletedOrUpdatedState returns a slice of all the pathes that are emptied at B
|
// deletedOrUpdatedState returns a slice of all the pathes that are emptied at B
|
||||||
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
||||||
func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedAddressesLeafKeys map[common.Hash]struct{}, output types2.StateNodeSink) (types2.AccountMap, error) {
|
func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffAccountsAtB types2.AccountMap, diffPathsAtB map[string]bool, watchedAddressesLeafKeys map[common.Hash]struct{}, intermediateStorageNodes bool, output types2.StateNodeSink) (types2.AccountMap, error) {
|
||||||
diffAccountAtA := make(types2.AccountMap)
|
diffAccountAtA := make(types2.AccountMap)
|
||||||
it, _ := trie.NewDifferenceIterator(b, a)
|
it, _ := trie.NewDifferenceIterator(b, a)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
@ -419,14 +421,36 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m
|
|||||||
}
|
}
|
||||||
// if this node's path did not show up in diffPathsAtB
|
// if this node's path did not show up in diffPathsAtB
|
||||||
// that means the node at this path was deleted (or moved) in B
|
// that means the node at this path was deleted (or moved) in B
|
||||||
// emit an empty "removed" diff to signify as such
|
|
||||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
||||||
if err := output(types2.StateNode{
|
var diff types2.StateNode
|
||||||
Path: node.Path,
|
// if this node's leaf key also did not show up in diffAccountsAtB
|
||||||
NodeValue: []byte{},
|
// that means the node was deleted
|
||||||
NodeType: types2.Removed,
|
// in that case, emit an empty "removed" diff state node
|
||||||
LeafKey: leafKey,
|
// include empty "removed" diff storage nodes for all the storage slots
|
||||||
}); err != nil {
|
if _, ok := diffAccountsAtB[common.Bytes2Hex(leafKey)]; !ok {
|
||||||
|
diff = types2.StateNode{
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
Path: node.Path,
|
||||||
|
LeafKey: leafKey,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageDiffs []types2.StorageNode
|
||||||
|
err := sdb.buildRemovedAccountStorageNodes(account.Root, intermediateStorageNodes, storageNodeAppender(&storageDiffs))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed building storage diffs for removed node %x\r\nerror: %v", node.Path, err)
|
||||||
|
}
|
||||||
|
diff.StorageNodes = storageDiffs
|
||||||
|
} else {
|
||||||
|
// emit an empty "removed" diff with empty leaf key if the account was moved
|
||||||
|
diff = types2.StateNode{
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
Path: node.Path,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := output(diff); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -548,7 +572,6 @@ func (sdb *builder) buildStorageNodesEventual(sr common.Hash, intermediateNodes
|
|||||||
}
|
}
|
||||||
|
|
||||||
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator
|
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator
|
||||||
// if any storage keys are provided it will only return those leaf nodes
|
|
||||||
// including intermediate nodes can be turned on or off
|
// including intermediate nodes can be turned on or off
|
||||||
func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, intermediateNodes bool, output types2.StorageNodeSink) error {
|
func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
@ -591,81 +614,28 @@ func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, intermediate
|
|||||||
return it.Error()
|
return it.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
// buildRemovedAccountStorageNodes builds the "removed" diffs for all the storage nodes for a destroyed account
|
||||||
func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
func (sdb *builder) buildRemovedAccountStorageNodes(sr common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
|
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
log.Debug("Storage Roots for Incremental Diff", "old", oldSR.Hex(), "new", newSR.Hex())
|
log.Debug("Storage Root For Removed Diffs", "root", sr.Hex())
|
||||||
oldTrie, err := sdb.stateCache.OpenTrie(oldSR)
|
sTrie, err := sdb.stateCache.OpenTrie(sr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Info("error in build removed account storage diffs", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
newTrie, err := sdb.stateCache.OpenTrie(newSR)
|
it := sTrie.NodeIterator(make([]byte, 0))
|
||||||
if err != nil {
|
err = sdb.buildRemovedStorageNodesFromTrie(it, intermediateNodes, output)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
diffPathsAtB, err := sdb.createdAndUpdatedStorage(
|
|
||||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
|
||||||
intermediateNodes, output)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = sdb.deletedOrUpdatedStorage(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
|
||||||
diffPathsAtB, intermediateNodes, output)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, intermediateNodes bool, output types2.StorageNodeSink) (map[string]bool, error) {
|
// buildRemovedStorageNodesFromTrie returns diffs for all the storage nodes in the provided node interator
|
||||||
diffPathsAtB := make(map[string]bool)
|
// including intermediate nodes can be turned on or off
|
||||||
it, _ := trie.NewDifferenceIterator(a, b)
|
func (sdb *builder) buildRemovedStorageNodesFromTrie(it trie.NodeIterator, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
for it.Next(true) {
|
|
||||||
// skip value nodes
|
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch node.NodeType {
|
|
||||||
case types2.Leaf:
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
if err := output(types2.StorageNode{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
LeafKey: leafKey,
|
|
||||||
}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case types2.Extension, types2.Branch:
|
|
||||||
if intermediateNodes {
|
|
||||||
if err := output(types2.StorageNode{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
|
||||||
}
|
|
||||||
diffPathsAtB[common.Bytes2Hex(node.Path)] = true
|
|
||||||
}
|
|
||||||
return diffPathsAtB, it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB map[string]bool, intermediateNodes bool, output types2.StorageNodeSink) error {
|
|
||||||
it, _ := trie.NewDifferenceIterator(b, a)
|
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
// skip value nodes
|
// skip value nodes
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
@ -675,12 +645,6 @@ func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// if this node path showed up in diffPathsAtB
|
|
||||||
// that means this node was updated at B and we already have the updated diff for it
|
|
||||||
// otherwise that means this node was deleted in B and we need to add a "removed" diff to represent that event
|
|
||||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch node.NodeType {
|
switch node.NodeType {
|
||||||
case types2.Leaf:
|
case types2.Leaf:
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||||
@ -712,6 +676,148 @@ func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB
|
|||||||
return it.Error()
|
return it.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
||||||
|
func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
|
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
log.Debug("Storage Roots for Incremental Diff", "old", oldSR.Hex(), "new", newSR.Hex())
|
||||||
|
oldTrie, err := sdb.stateCache.OpenTrie(oldSR)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newTrie, err := sdb.stateCache.OpenTrie(newSR)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
diffSlotsAtB, diffPathsAtB, err := sdb.createdAndUpdatedStorage(
|
||||||
|
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||||
|
intermediateNodes, output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = sdb.deletedOrUpdatedStorage(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||||
|
diffSlotsAtB, diffPathsAtB, intermediateNodes, output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, intermediateNodes bool, output types2.StorageNodeSink) (map[string]bool, map[string]bool, error) {
|
||||||
|
diffPathsAtB := make(map[string]bool)
|
||||||
|
diffSlotsAtB := make(map[string]bool)
|
||||||
|
it, _ := trie.NewDifferenceIterator(a, b)
|
||||||
|
for it.Next(true) {
|
||||||
|
// skip value nodes
|
||||||
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
switch node.NodeType {
|
||||||
|
case types2.Leaf:
|
||||||
|
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||||
|
valueNodePath := append(node.Path, partialPath...)
|
||||||
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
|
leafKey := encodedPath[1:]
|
||||||
|
diffSlotsAtB[common.Bytes2Hex(leafKey)] = true
|
||||||
|
if err := output(types2.StorageNode{
|
||||||
|
NodeType: node.NodeType,
|
||||||
|
Path: node.Path,
|
||||||
|
NodeValue: node.NodeValue,
|
||||||
|
LeafKey: leafKey,
|
||||||
|
}); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
case types2.Extension, types2.Branch:
|
||||||
|
if intermediateNodes {
|
||||||
|
if err := output(types2.StorageNode{
|
||||||
|
NodeType: node.NodeType,
|
||||||
|
Path: node.Path,
|
||||||
|
NodeValue: node.NodeValue,
|
||||||
|
}); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||||
|
}
|
||||||
|
diffPathsAtB[common.Bytes2Hex(node.Path)] = true
|
||||||
|
}
|
||||||
|
return diffSlotsAtB, diffPathsAtB, it.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffSlotsAtB, diffPathsAtB map[string]bool, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
|
it, _ := trie.NewDifferenceIterator(b, a)
|
||||||
|
for it.Next(true) {
|
||||||
|
// skip value nodes
|
||||||
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch node.NodeType {
|
||||||
|
case types2.Leaf:
|
||||||
|
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||||
|
valueNodePath := append(node.Path, partialPath...)
|
||||||
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
|
leafKey := encodedPath[1:]
|
||||||
|
|
||||||
|
// if this node's path did not show up in diffPathsAtB
|
||||||
|
// that means the node at this path was deleted (or moved) in B
|
||||||
|
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
||||||
|
// if this node's leaf key also did not show up in diffSlotsAtB
|
||||||
|
// that means the node was deleted
|
||||||
|
// in that case, emit an empty "removed" diff storage node
|
||||||
|
if _, ok := diffSlotsAtB[common.Bytes2Hex(leafKey)]; !ok {
|
||||||
|
if err := output(types2.StorageNode{
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
Path: node.Path,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
LeafKey: leafKey,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// emit an empty "removed" diff with empty leaf key if the account was moved
|
||||||
|
if err := output(types2.StorageNode{
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
Path: node.Path,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case types2.Extension, types2.Branch:
|
||||||
|
// if this node's path did not show up in diffPathsAtB
|
||||||
|
// that means the node at this path was deleted in B
|
||||||
|
// in that case, emit an empty "removed" diff storage node
|
||||||
|
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
||||||
|
if intermediateNodes {
|
||||||
|
if err := output(types2.StorageNode{
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
Path: node.Path,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return it.Error()
|
||||||
|
}
|
||||||
|
|
||||||
// isWatchedAddress is used to check if a state account corresponds to one of the addresses the builder is configured to watch
|
// isWatchedAddress is used to check if a state account corresponds to one of the addresses the builder is configured to watch
|
||||||
func isWatchedAddress(watchedAddressesLeafKeys map[common.Hash]struct{}, stateLeafKey []byte) bool {
|
func isWatchedAddress(watchedAddressesLeafKeys map[common.Hash]struct{}, stateLeafKey []byte) bool {
|
||||||
// If we aren't watching any specific addresses, we are watching everything
|
// If we aren't watching any specific addresses, we are watching everything
|
||||||
|
@ -74,10 +74,6 @@ var (
|
|||||||
common.Hex2Bytes("32575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b"),
|
common.Hex2Bytes("32575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b"),
|
||||||
slot3StorageValue,
|
slot3StorageValue,
|
||||||
})
|
})
|
||||||
slot0StorageLeafRootNode, _ = rlp.EncodeToBytes([]interface{}{
|
|
||||||
common.Hex2Bytes("20290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
|
|
||||||
slot0StorageValue,
|
|
||||||
})
|
|
||||||
|
|
||||||
contractAccountAtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{
|
contractAccountAtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{
|
||||||
Nonce: 1,
|
Nonce: 1,
|
||||||
@ -113,7 +109,7 @@ var (
|
|||||||
Nonce: 1,
|
Nonce: 1,
|
||||||
Balance: big.NewInt(0),
|
Balance: big.NewInt(0),
|
||||||
CodeHash: common.HexToHash("0xaaea5efba4fd7b45d7ec03918ac5d8b31aa93b48986af0e6b591f0f087c80127").Bytes(),
|
CodeHash: common.HexToHash("0xaaea5efba4fd7b45d7ec03918ac5d8b31aa93b48986af0e6b591f0f087c80127").Bytes(),
|
||||||
Root: crypto.Keccak256Hash(slot0StorageLeafRootNode),
|
Root: crypto.Keccak256Hash(block5StorageBranchRootNode),
|
||||||
})
|
})
|
||||||
contractAccountAtBlock5LeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
contractAccountAtBlock5LeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
||||||
common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45"),
|
common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45"),
|
||||||
@ -163,7 +159,7 @@ var (
|
|||||||
})
|
})
|
||||||
account1AtBlock5, _ = rlp.EncodeToBytes(types.StateAccount{
|
account1AtBlock5, _ = rlp.EncodeToBytes(types.StateAccount{
|
||||||
Nonce: 2,
|
Nonce: 2,
|
||||||
Balance: big.NewInt(2999566008847709960),
|
Balance: big.NewInt(2999586469962854280),
|
||||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||||
Root: test_helpers.EmptyContractRoot,
|
Root: test_helpers.EmptyContractRoot,
|
||||||
})
|
})
|
||||||
@ -173,7 +169,7 @@ var (
|
|||||||
})
|
})
|
||||||
account1AtBlock6, _ = rlp.EncodeToBytes(types.StateAccount{
|
account1AtBlock6, _ = rlp.EncodeToBytes(types.StateAccount{
|
||||||
Nonce: 3,
|
Nonce: 3,
|
||||||
Balance: big.NewInt(2999537516847709960),
|
Balance: big.NewInt(2999557977962854280),
|
||||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||||
Root: test_helpers.EmptyContractRoot,
|
Root: test_helpers.EmptyContractRoot,
|
||||||
})
|
})
|
||||||
@ -214,7 +210,7 @@ var (
|
|||||||
})
|
})
|
||||||
account2AtBlock6, _ = rlp.EncodeToBytes(types.StateAccount{
|
account2AtBlock6, _ = rlp.EncodeToBytes(types.StateAccount{
|
||||||
Nonce: 0,
|
Nonce: 0,
|
||||||
Balance: big.NewInt(6000063293259748636),
|
Balance: big.NewInt(6000063258066544204),
|
||||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||||
Root: test_helpers.EmptyContractRoot,
|
Root: test_helpers.EmptyContractRoot,
|
||||||
})
|
})
|
||||||
@ -278,8 +274,8 @@ var (
|
|||||||
bankAccountAtBlock4,
|
bankAccountAtBlock4,
|
||||||
})
|
})
|
||||||
bankAccountAtBlock5, _ = rlp.EncodeToBytes(types.StateAccount{
|
bankAccountAtBlock5, _ = rlp.EncodeToBytes(types.StateAccount{
|
||||||
Nonce: 7,
|
Nonce: 8,
|
||||||
Balance: big.NewInt(999805027999990000),
|
Balance: big.NewInt(999761283999990000),
|
||||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||||
Root: test_helpers.EmptyContractRoot,
|
Root: test_helpers.EmptyContractRoot,
|
||||||
})
|
})
|
||||||
@ -460,6 +456,25 @@ var (
|
|||||||
[]byte{},
|
[]byte{},
|
||||||
[]byte{},
|
[]byte{},
|
||||||
})
|
})
|
||||||
|
block5StorageBranchRootNode, _ = rlp.EncodeToBytes([]interface{}{
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
crypto.Keccak256(slot0StorageLeafNode),
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
crypto.Keccak256(slot3StorageLeafNode),
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
[]byte{},
|
||||||
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -1272,15 +1287,14 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
|
|||||||
StorageNodes: []types2.StorageNode{
|
StorageNodes: []types2.StorageNode{
|
||||||
{
|
{
|
||||||
Path: []byte{},
|
Path: []byte{},
|
||||||
NodeType: types2.Leaf,
|
NodeType: types2.Branch,
|
||||||
NodeValue: slot0StorageLeafRootNode,
|
NodeValue: block5StorageBranchRootNode,
|
||||||
LeafKey: slot0StorageKey.Bytes(),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x02'},
|
Path: []byte{'\x0c'},
|
||||||
NodeType: types2.Removed,
|
NodeType: types2.Leaf,
|
||||||
LeafKey: slot0StorageKey.Bytes(),
|
LeafKey: slot3StorageKey.Bytes(),
|
||||||
NodeValue: []byte{},
|
NodeValue: slot3StorageLeafNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x04'},
|
Path: []byte{'\x04'},
|
||||||
@ -1319,11 +1333,29 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
|
|||||||
StorageNodes: emptyStorage,
|
StorageNodes: emptyStorage,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x06'},
|
Path: []byte{'\x06'},
|
||||||
NodeType: types2.Removed,
|
NodeType: types2.Removed,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
StorageNodes: emptyStorage,
|
StorageNodes: []types2.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: []byte{'\x02'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot0StorageKey.Bytes(),
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: []byte{'\x0c'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot3StorageKey.Bytes(),
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x0c'},
|
Path: []byte{'\x0c'},
|
||||||
@ -1467,16 +1499,10 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
|
|||||||
NodeValue: contractAccountAtBlock5LeafNode,
|
NodeValue: contractAccountAtBlock5LeafNode,
|
||||||
StorageNodes: []types2.StorageNode{
|
StorageNodes: []types2.StorageNode{
|
||||||
{
|
{
|
||||||
Path: []byte{},
|
Path: []byte{'\x0c'},
|
||||||
NodeType: types2.Leaf,
|
NodeType: types2.Leaf,
|
||||||
LeafKey: slot0StorageKey.Bytes(),
|
LeafKey: slot3StorageKey.Bytes(),
|
||||||
NodeValue: slot0StorageLeafRootNode,
|
NodeValue: slot3StorageLeafNode,
|
||||||
},
|
|
||||||
{
|
|
||||||
Path: []byte{'\x02'},
|
|
||||||
NodeType: types2.Removed,
|
|
||||||
LeafKey: slot0StorageKey.Bytes(),
|
|
||||||
NodeValue: []byte{},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x04'},
|
Path: []byte{'\x04'},
|
||||||
@ -1513,6 +1539,20 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
|
|||||||
NodeType: types2.Removed,
|
NodeType: types2.Removed,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
|
StorageNodes: []types2.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{'\x02'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot0StorageKey.Bytes(),
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: []byte{'\x0c'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot3StorageKey.Bytes(),
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x0c'},
|
Path: []byte{'\x0c'},
|
||||||
@ -1754,16 +1794,10 @@ func TestBuilderWithRemovedWatchedAccount(t *testing.T) {
|
|||||||
NodeValue: contractAccountAtBlock5LeafNode,
|
NodeValue: contractAccountAtBlock5LeafNode,
|
||||||
StorageNodes: []types2.StorageNode{
|
StorageNodes: []types2.StorageNode{
|
||||||
{
|
{
|
||||||
Path: []byte{},
|
Path: []byte{'\x0c'},
|
||||||
NodeType: types2.Leaf,
|
NodeType: types2.Leaf,
|
||||||
LeafKey: slot0StorageKey.Bytes(),
|
LeafKey: slot3StorageKey.Bytes(),
|
||||||
NodeValue: slot0StorageLeafRootNode,
|
NodeValue: slot3StorageLeafNode,
|
||||||
},
|
|
||||||
{
|
|
||||||
Path: []byte{'\x02'},
|
|
||||||
NodeType: types2.Removed,
|
|
||||||
LeafKey: slot0StorageKey.Bytes(),
|
|
||||||
NodeValue: []byte{},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x04'},
|
Path: []byte{'\x04'},
|
||||||
@ -1800,6 +1834,20 @@ func TestBuilderWithRemovedWatchedAccount(t *testing.T) {
|
|||||||
NodeType: types2.Removed,
|
NodeType: types2.Removed,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
|
StorageNodes: []types2.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{'\x02'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot0StorageKey.Bytes(),
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: []byte{'\x0c'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot3StorageKey.Bytes(),
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x0e'},
|
Path: []byte{'\x0e'},
|
||||||
@ -2019,11 +2067,26 @@ func TestBuilderWithMovedAccount(t *testing.T) {
|
|||||||
NodeType: types2.Removed,
|
NodeType: types2.Removed,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
|
StorageNodes: []types2.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: []byte{'\x02'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot0StorageKey.Bytes(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: []byte{'\x0b'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot1StorageKey.Bytes(),
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x00'},
|
Path: []byte{'\x00'},
|
||||||
NodeType: types2.Removed,
|
NodeType: types2.Removed,
|
||||||
LeafKey: test_helpers.BankLeafKey,
|
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2144,11 +2207,22 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) {
|
|||||||
NodeType: types2.Removed,
|
NodeType: types2.Removed,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
|
StorageNodes: []types2.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{'\x02'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot0StorageKey.Bytes(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: []byte{'\x0b'},
|
||||||
|
NodeType: types2.Removed,
|
||||||
|
LeafKey: slot1StorageKey.Bytes(),
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x00'},
|
Path: []byte{'\x00'},
|
||||||
NodeType: types2.Removed,
|
NodeType: types2.Removed,
|
||||||
LeafKey: test_helpers.BankLeafKey,
|
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -382,10 +382,11 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||||
}
|
}
|
||||||
// publish the state node
|
// publish the state node
|
||||||
|
var stateModel models.StateNodeModel
|
||||||
if stateNode.NodeType == sdtypes.Removed {
|
if stateNode.NodeType == sdtypes.Removed {
|
||||||
// short circuit if it is a Removed node
|
// short circuit if it is a Removed node
|
||||||
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
||||||
stateModel := models.StateNodeModel{
|
stateModel = models.StateNodeModel{
|
||||||
HeaderID: headerID,
|
HeaderID: headerID,
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
@ -393,25 +394,26 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
MhKey: shared.RemovedNodeMhKey,
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprintf(sdi.dump, "%+v\r\n", stateModel)
|
} else {
|
||||||
return err
|
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
||||||
}
|
if err != nil {
|
||||||
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
||||||
if err != nil {
|
}
|
||||||
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
stateModel = models.StateNodeModel{
|
||||||
}
|
HeaderID: headerID,
|
||||||
stateModel := models.StateNodeModel{
|
Path: stateNode.Path,
|
||||||
HeaderID: headerID,
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
Path: stateNode.Path,
|
CID: stateCIDStr,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
MhKey: stateMhKey,
|
||||||
CID: stateCIDStr,
|
NodeType: stateNode.NodeType.Int(),
|
||||||
MhKey: stateMhKey,
|
}
|
||||||
NodeType: stateNode.NodeType.Int(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// index the state node, collect the stateID to reference by FK
|
// index the state node, collect the stateID to reference by FK
|
||||||
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", stateModel); err != nil {
|
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", stateModel); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we have a leaf, decode and index the account data
|
// if we have a leaf, decode and index the account data
|
||||||
if stateNode.NodeType == sdtypes.Leaf {
|
if stateNode.NodeType == sdtypes.Leaf {
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
@ -437,6 +439,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there are any storage nodes associated with this node, publish and index them
|
// if there are any storage nodes associated with this node, publish and index them
|
||||||
for _, storageNode := range stateNode.StorageNodes {
|
for _, storageNode := range stateNode.StorageNodes {
|
||||||
if storageNode.NodeType == sdtypes.Removed {
|
if storageNode.NodeType == sdtypes.Removed {
|
||||||
|
@ -392,10 +392,11 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
|
|||||||
// PushStateNode writes a state diff node object (including any child storage nodes) IPLD insert SQL stmt to a file
|
// PushStateNode writes a state diff node object (including any child storage nodes) IPLD insert SQL stmt to a file
|
||||||
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
|
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
|
||||||
// publish the state node
|
// publish the state node
|
||||||
|
var stateModel models.StateNodeModel
|
||||||
if stateNode.NodeType == sdtypes.Removed {
|
if stateNode.NodeType == sdtypes.Removed {
|
||||||
// short circuit if it is a Removed node
|
// short circuit if it is a Removed node
|
||||||
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
||||||
stateModel := models.StateNodeModel{
|
stateModel = models.StateNodeModel{
|
||||||
HeaderID: headerID,
|
HeaderID: headerID,
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
@ -403,23 +404,24 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
MhKey: shared.RemovedNodeMhKey,
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
sdi.fileWriter.upsertStateCID(stateModel)
|
} else {
|
||||||
return nil
|
stateCIDStr, stateMhKey, err := sdi.fileWriter.upsertIPLDRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
||||||
}
|
if err != nil {
|
||||||
stateCIDStr, stateMhKey, err := sdi.fileWriter.upsertIPLDRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
||||||
if err != nil {
|
}
|
||||||
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
stateModel = models.StateNodeModel{
|
||||||
}
|
HeaderID: headerID,
|
||||||
stateModel := models.StateNodeModel{
|
Path: stateNode.Path,
|
||||||
HeaderID: headerID,
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
Path: stateNode.Path,
|
CID: stateCIDStr,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
MhKey: stateMhKey,
|
||||||
CID: stateCIDStr,
|
NodeType: stateNode.NodeType.Int(),
|
||||||
MhKey: stateMhKey,
|
}
|
||||||
NodeType: stateNode.NodeType.Int(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// index the state node
|
// index the state node
|
||||||
sdi.fileWriter.upsertStateCID(stateModel)
|
sdi.fileWriter.upsertStateCID(stateModel)
|
||||||
|
|
||||||
// if we have a leaf, decode and index the account data
|
// if we have a leaf, decode and index the account data
|
||||||
if stateNode.NodeType == sdtypes.Leaf {
|
if stateNode.NodeType == sdtypes.Leaf {
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
@ -443,6 +445,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
}
|
}
|
||||||
sdi.fileWriter.upsertStateAccount(accountModel)
|
sdi.fileWriter.upsertStateAccount(accountModel)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there are any storage nodes associated with this node, publish and index them
|
// if there are any storage nodes associated with this node, publish and index them
|
||||||
for _, storageNode := range stateNode.StorageNodes {
|
for _, storageNode := range stateNode.StorageNodes {
|
||||||
if storageNode.NodeType == sdtypes.Removed {
|
if storageNode.NodeType == sdtypes.Removed {
|
||||||
|
@ -625,23 +625,34 @@ func TestFileIndexer(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
test_helpers.ExpectEqual(t, len(stateNodes), 1)
|
test_helpers.ExpectEqual(t, len(stateNodes), 2)
|
||||||
stateNode := stateNodes[0]
|
for idx, stateNode := range stateNodes {
|
||||||
var data []byte
|
var data []byte
|
||||||
dc, err := cid.Decode(stateNode.CID)
|
dc, err := cid.Decode(stateNode.CID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
|
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
|
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
||||||
|
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx == 0 {
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.RemovedLeafKey).Hex())
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
|
||||||
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
|
}
|
||||||
|
if idx == 1 {
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.Contract2LeafKey).Hex())
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x07'})
|
||||||
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
|
||||||
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
|
||||||
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
|
|
||||||
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
|
|
||||||
test_helpers.ExpectEqual(t, data, []byte{})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -694,26 +705,45 @@ func TestFileIndexer(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
test_helpers.ExpectEqual(t, len(storageNodes), 1)
|
test_helpers.ExpectEqual(t, len(storageNodes), 3)
|
||||||
test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
|
expectedStorageNodes := []models.StorageNodeWithStateKeyModel{
|
||||||
CID: shared.RemovedNodeStorageCID,
|
{
|
||||||
NodeType: 3,
|
CID: shared.RemovedNodeStorageCID,
|
||||||
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
|
NodeType: 3,
|
||||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
|
||||||
Path: []byte{'\x03'},
|
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
||||||
})
|
Path: []byte{'\x03'},
|
||||||
dc, err = cid.Decode(storageNodes[0].CID)
|
},
|
||||||
if err != nil {
|
{
|
||||||
t.Fatal(err)
|
CID: shared.RemovedNodeStorageCID,
|
||||||
|
NodeType: 3,
|
||||||
|
StorageKey: common.BytesToHash(mocks.Storage2LeafKey).Hex(),
|
||||||
|
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
||||||
|
Path: []byte{'\x0e'},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
CID: shared.RemovedNodeStorageCID,
|
||||||
|
NodeType: 3,
|
||||||
|
StorageKey: common.BytesToHash(mocks.Storage3LeafKey).Hex(),
|
||||||
|
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
||||||
|
Path: []byte{'\x0f'},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
mhKey = dshelp.MultihashToDsKey(dc.Hash())
|
for idx, storageNode := range storageNodes {
|
||||||
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
|
test_helpers.ExpectEqual(t, storageNode, expectedStorageNodes[idx])
|
||||||
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
dc, err = cid.Decode(storageNode.CID)
|
||||||
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
|
if err != nil {
|
||||||
if err != nil {
|
t.Fatal(err)
|
||||||
t.Fatal(err)
|
}
|
||||||
|
mhKey = dshelp.MultihashToDsKey(dc.Hash())
|
||||||
|
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
|
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
||||||
|
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
}
|
}
|
||||||
test_helpers.ExpectEqual(t, data, []byte{})
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -440,10 +440,11 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||||
}
|
}
|
||||||
// publish the state node
|
// publish the state node
|
||||||
|
var stateModel models.StateNodeModel
|
||||||
if stateNode.NodeType == sdtypes.Removed {
|
if stateNode.NodeType == sdtypes.Removed {
|
||||||
// short circuit if it is a Removed node
|
// short circuit if it is a Removed node
|
||||||
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
||||||
stateModel := models.StateNodeModel{
|
stateModel = models.StateNodeModel{
|
||||||
HeaderID: headerID,
|
HeaderID: headerID,
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
@ -451,24 +452,26 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
MhKey: shared.RemovedNodeMhKey,
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
return sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel)
|
} else {
|
||||||
}
|
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
||||||
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
if err != nil {
|
||||||
if err != nil {
|
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
||||||
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
}
|
||||||
}
|
stateModel = models.StateNodeModel{
|
||||||
stateModel := models.StateNodeModel{
|
HeaderID: headerID,
|
||||||
HeaderID: headerID,
|
Path: stateNode.Path,
|
||||||
Path: stateNode.Path,
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
CID: stateCIDStr,
|
||||||
CID: stateCIDStr,
|
MhKey: stateMhKey,
|
||||||
MhKey: stateMhKey,
|
NodeType: stateNode.NodeType.Int(),
|
||||||
NodeType: stateNode.NodeType.Int(),
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// index the state node
|
// index the state node
|
||||||
if err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel); err != nil {
|
if err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we have a leaf, decode and index the account data
|
// if we have a leaf, decode and index the account data
|
||||||
if stateNode.NodeType == sdtypes.Leaf {
|
if stateNode.NodeType == sdtypes.Leaf {
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
@ -494,6 +497,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there are any storage nodes associated with this node, publish and index them
|
// if there are any storage nodes associated with this node, publish and index them
|
||||||
for _, storageNode := range stateNode.StorageNodes {
|
for _, storageNode := range stateNode.StorageNodes {
|
||||||
if storageNode.NodeType == sdtypes.Removed {
|
if storageNode.NodeType == sdtypes.Removed {
|
||||||
|
@ -473,23 +473,34 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
test_helpers.ExpectEqual(t, len(stateNodes), 1)
|
test_helpers.ExpectEqual(t, len(stateNodes), 2)
|
||||||
stateNode := stateNodes[0]
|
for idx, stateNode := range stateNodes {
|
||||||
var data []byte
|
var data []byte
|
||||||
dc, err := cid.Decode(stateNode.CID)
|
dc, err := cid.Decode(stateNode.CID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
|
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
|
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
||||||
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx == 0 {
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.RemovedLeafKey).Hex())
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
|
||||||
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
|
}
|
||||||
|
if idx == 1 {
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.Contract2LeafKey).Hex())
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x07'})
|
||||||
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
|
||||||
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
|
||||||
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
|
|
||||||
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
|
|
||||||
test_helpers.ExpectEqual(t, data, []byte{})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -541,26 +552,45 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
test_helpers.ExpectEqual(t, len(storageNodes), 1)
|
test_helpers.ExpectEqual(t, len(storageNodes), 3)
|
||||||
test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
|
expectedStorageNodes := []models.StorageNodeWithStateKeyModel{
|
||||||
CID: shared.RemovedNodeStorageCID,
|
{
|
||||||
NodeType: 3,
|
CID: shared.RemovedNodeStorageCID,
|
||||||
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
|
NodeType: 3,
|
||||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
|
||||||
Path: []byte{'\x03'},
|
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
||||||
})
|
Path: []byte{'\x03'},
|
||||||
dc, err = cid.Decode(storageNodes[0].CID)
|
},
|
||||||
if err != nil {
|
{
|
||||||
t.Fatal(err)
|
CID: shared.RemovedNodeStorageCID,
|
||||||
|
NodeType: 3,
|
||||||
|
StorageKey: common.BytesToHash(mocks.Storage2LeafKey).Hex(),
|
||||||
|
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
||||||
|
Path: []byte{'\x0e'},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
CID: shared.RemovedNodeStorageCID,
|
||||||
|
NodeType: 3,
|
||||||
|
StorageKey: common.BytesToHash(mocks.Storage3LeafKey).Hex(),
|
||||||
|
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
||||||
|
Path: []byte{'\x0f'},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
mhKey = dshelp.MultihashToDsKey(dc.Hash())
|
for idx, storageNode := range storageNodes {
|
||||||
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
|
test_helpers.ExpectEqual(t, storageNode, expectedStorageNodes[idx])
|
||||||
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
dc, err = cid.Decode(storageNode.CID)
|
||||||
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
if err != nil {
|
||||||
if err != nil {
|
t.Fatal(err)
|
||||||
t.Fatal(err)
|
}
|
||||||
|
mhKey = dshelp.MultihashToDsKey(dc.Hash())
|
||||||
|
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
|
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
||||||
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
}
|
}
|
||||||
test_helpers.ExpectEqual(t, data, []byte{})
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -466,23 +466,34 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
test_helpers.ExpectEqual(t, len(stateNodes), 1)
|
test_helpers.ExpectEqual(t, len(stateNodes), 2)
|
||||||
stateNode := stateNodes[0]
|
for idx, stateNode := range stateNodes {
|
||||||
var data []byte
|
var data []byte
|
||||||
dc, err := cid.Decode(stateNode.CID)
|
dc, err := cid.Decode(stateNode.CID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
|
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
|
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
||||||
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx == 0 {
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.RemovedLeafKey).Hex())
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
|
||||||
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
|
}
|
||||||
|
if idx == 1 {
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.Contract2LeafKey).Hex())
|
||||||
|
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x07'})
|
||||||
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
|
||||||
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
|
||||||
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
|
|
||||||
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
|
|
||||||
test_helpers.ExpectEqual(t, data, []byte{})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -534,26 +545,45 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
test_helpers.ExpectEqual(t, len(storageNodes), 1)
|
test_helpers.ExpectEqual(t, len(storageNodes), 3)
|
||||||
test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
|
expectedStorageNodes := []models.StorageNodeWithStateKeyModel{
|
||||||
CID: shared.RemovedNodeStorageCID,
|
{
|
||||||
NodeType: 3,
|
CID: shared.RemovedNodeStorageCID,
|
||||||
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
|
NodeType: 3,
|
||||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
|
||||||
Path: []byte{'\x03'},
|
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
||||||
})
|
Path: []byte{'\x03'},
|
||||||
dc, err = cid.Decode(storageNodes[0].CID)
|
},
|
||||||
if err != nil {
|
{
|
||||||
t.Fatal(err)
|
CID: shared.RemovedNodeStorageCID,
|
||||||
|
NodeType: 3,
|
||||||
|
StorageKey: common.BytesToHash(mocks.Storage2LeafKey).Hex(),
|
||||||
|
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
||||||
|
Path: []byte{'\x0e'},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
CID: shared.RemovedNodeStorageCID,
|
||||||
|
NodeType: 3,
|
||||||
|
StorageKey: common.BytesToHash(mocks.Storage3LeafKey).Hex(),
|
||||||
|
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
||||||
|
Path: []byte{'\x0f'},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
mhKey = dshelp.MultihashToDsKey(dc.Hash())
|
for idx, storageNode := range storageNodes {
|
||||||
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
|
test_helpers.ExpectEqual(t, storageNode, expectedStorageNodes[idx])
|
||||||
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
dc, err = cid.Decode(storageNode.CID)
|
||||||
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
if err != nil {
|
||||||
if err != nil {
|
t.Fatal(err)
|
||||||
t.Fatal(err)
|
}
|
||||||
|
mhKey = dshelp.MultihashToDsKey(dc.Hash())
|
||||||
|
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
|
test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
|
||||||
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
}
|
}
|
||||||
test_helpers.ExpectEqual(t, data, []byte{})
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,6 +57,7 @@ var (
|
|||||||
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
||||||
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
||||||
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
|
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
|
||||||
|
ContractAddress2 = crypto.CreateAddress(SenderAddr, MockTransactions[3].Nonce())
|
||||||
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
|
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
|
||||||
mockTopic11 = common.HexToHash("0x04")
|
mockTopic11 = common.HexToHash("0x04")
|
||||||
mockTopic12 = common.HexToHash("0x06")
|
mockTopic12 = common.HexToHash("0x06")
|
||||||
@ -143,6 +144,12 @@ var (
|
|||||||
ContractAccount,
|
ContractAccount,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Contract2LeafKey = test_helpers.AddressToLeafKey(ContractAddress2)
|
||||||
|
storage2Location = common.HexToHash("2")
|
||||||
|
Storage2LeafKey = crypto.Keccak256Hash(storage2Location[:]).Bytes()
|
||||||
|
storage3Location = common.HexToHash("3")
|
||||||
|
Storage3LeafKey = crypto.Keccak256Hash(storage3Location[:]).Bytes()
|
||||||
|
|
||||||
nonce0 = uint64(0)
|
nonce0 = uint64(0)
|
||||||
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
|
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
|
||||||
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||||
@ -194,6 +201,26 @@ var (
|
|||||||
LeafKey: RemovedLeafKey,
|
LeafKey: RemovedLeafKey,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Path: []byte{'\x07'},
|
||||||
|
NodeType: sdtypes.Removed,
|
||||||
|
LeafKey: Contract2LeafKey,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
StorageNodes: []sdtypes.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{'\x0e'},
|
||||||
|
NodeType: sdtypes.Removed,
|
||||||
|
LeafKey: Storage2LeafKey,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: []byte{'\x0f'},
|
||||||
|
NodeType: sdtypes.Removed,
|
||||||
|
LeafKey: Storage3LeafKey,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -402,7 +402,7 @@ func testGetSyncStatus(t *testing.T) {
|
|||||||
t.Fatal("Sync Failed")
|
t.Fatal("Sync Failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
// Make sure if syncStatus is false that WaitForSync has completed!
|
// Make sure if syncStatus is false that WaitForSync has completed!
|
||||||
if !syncStatus && len(checkSyncComplete) == 0 {
|
if !syncStatus && len(checkSyncComplete) == 0 {
|
||||||
|
@ -106,13 +106,17 @@ func TestChainGen(i int, block *core.BlockGen) {
|
|||||||
block.AddTx(tx3)
|
block.AddTx(tx3)
|
||||||
case 4:
|
case 4:
|
||||||
// Block 5 has one tx from bankAccount to the contract, that transfers no value
|
// Block 5 has one tx from bankAccount to the contract, that transfers no value
|
||||||
// It sets the remaining storage value to zero
|
// It sets the one storage value to zero and the other to new value.
|
||||||
// Block 5 is mined by Account1Addr
|
// Block 5 is mined by Account1Addr
|
||||||
block.SetCoinbase(Account1Addr)
|
block.SetCoinbase(Account1Addr)
|
||||||
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000")
|
data1 := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000")
|
||||||
|
data2 := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003")
|
||||||
nonce := block.TxNonce(TestBankAddress)
|
nonce := block.TxNonce(TestBankAddress)
|
||||||
tx, _ := types.SignTx(types.NewTransaction(nonce, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data), signer, TestBankKey)
|
tx1, _ := types.SignTx(types.NewTransaction(nonce, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data1), signer, TestBankKey)
|
||||||
block.AddTx(tx)
|
nonce++
|
||||||
|
tx2, _ := types.SignTx(types.NewTransaction(nonce, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data2), signer, TestBankKey)
|
||||||
|
block.AddTx(tx1)
|
||||||
|
block.AddTx(tx2)
|
||||||
case 5:
|
case 5:
|
||||||
// Block 6 has a tx from Account1Key which self-destructs the contract, it transfers no value
|
// Block 6 has a tx from Account1Key which self-destructs the contract, it transfers no value
|
||||||
// Block 6 is mined by Account2Addr
|
// Block 6 is mined by Account2Addr
|
||||||
|
Loading…
Reference in New Issue
Block a user