diff --git a/statediff/indexer/database/file/indexer.go b/statediff/indexer/database/file/indexer.go index 5ddb3f5b9..11c02e7b6 100644 --- a/statediff/indexer/database/file/indexer.go +++ b/statediff/indexer/database/file/indexer.go @@ -27,7 +27,6 @@ import ( "sync/atomic" "time" - "github.com/ipfs/go-cid" node "github.com/ipfs/go-ipld-format" "github.com/multiformats/go-multihash" pg_query "github.com/pganalyze/pg_query_go/v2" @@ -124,16 +123,13 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip } // Generate the block iplds - headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts) + headerNode, uncleNodes, txNodes, rctNodes, logNodes, err := ipld2.FromBlockAndReceipts(block, receipts) if err != nil { return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) } - if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) { - return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs)) - } - if len(txTrieNodes) != len(rctTrieNodes) { - return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes)) + if len(txNodes) != len(rctNodes) { + return nil, fmt.Errorf("expected number of transactions (%d) and receipts (%d) to be equal", len(txNodes), len(rctNodes)) } // Calculate reward @@ -188,12 +184,8 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip receipts: receipts, txs: transactions, rctNodes: rctNodes, - rctTrieNodes: rctTrieNodes, txNodes: txNodes, - txTrieNodes: txTrieNodes, - logTrieNodes: logTrieNodes, - logLeafNodeCIDs: logLeafNodeCIDs, - rctLeafNodeCIDs: rctLeafNodeCIDs, + logNodes: logNodes, }) if err != nil { return nil, err @@ -268,12 +260,8 @@ type processArgs struct { receipts types.Receipts txs types.Transactions rctNodes []*ipld2.EthReceipt - rctTrieNodes []*ipld2.EthRctTrie txNodes []*ipld2.EthTx - txTrieNodes []*ipld2.EthTxTrie - logTrieNodes [][]node.Node - logLeafNodeCIDs [][]cid.Cid - rctLeafNodeCIDs []cid.Cid + logNodes [][]*ipld2.EthLog } // processReceiptsAndTxs writes receipt and tx IPLD insert SQL stmts to a file @@ -281,9 +269,6 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { // Process receipts and txs signer := types.MakeSigner(sdi.chainConfig, args.blockNumber) for i, receipt := range args.receipts { - for _, logTrieNode := range args.logTrieNodes[i] { - sdi.fileWriter.upsertIPLDNode(sdi.blockNumber, logTrieNode) - } txNode := args.txNodes[i] sdi.fileWriter.upsertIPLDNode(sdi.blockNumber, txNode) @@ -301,6 +286,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { if err != nil { return fmt.Errorf("error deriving tx sender: %v", err) } + txCid := txNode.Cid() txModel := models.TxModel{ BlockNumber: sdi.blockNumber, HeaderID: args.headerID, @@ -309,8 +295,8 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { TxHash: txID, Index: int64(i), Data: trx.Data(), - CID: txNode.Cid().String(), - MhKey: shared.MultihashKeyFromCID(txNode.Cid()), + CID: txCid.String(), + MhKey: shared.MultihashKeyFromCID(txCid), Type: trx.Type(), Value: val, } @@ -338,20 +324,14 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { if contract != "" { contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String() } - - // index receipt - if !args.rctLeafNodeCIDs[i].Defined() { - return fmt.Errorf("invalid receipt leaf node cid") - } - + rctCid := args.rctNodes[i].Cid() rctModel := &models.ReceiptModel{ BlockNumber: sdi.blockNumber, TxID: txID, Contract: contract, ContractHash: contractHash, - LeafCID: args.rctLeafNodeCIDs[i].String(), - LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]), - LogRoot: args.rctNodes[i].LogRoot.String(), + CID: rctCid.String(), + MhKey: shared.MultihashKeyFromCID(rctCid), } if len(receipt.PostState) == 0 { rctModel.PostStatus = receipt.Status @@ -367,19 +347,15 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { for ti, topic := range l.Topics { topicSet[ti] = topic.Hex() } - - if !args.logLeafNodeCIDs[i][idx].Defined() { - return fmt.Errorf("invalid log cid") - } - + logCid := args.logNodes[i][idx].Cid() logDataSet[idx] = &models.LogsModel{ BlockNumber: sdi.blockNumber, ReceiptID: txID, Address: l.Address.String(), Index: int64(l.Index), Data: l.Data, - LeafCID: args.logLeafNodeCIDs[i][idx].String(), - LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]), + CID: logCid.String(), + MhKey: shared.MultihashKeyFromCID(logCid), Topic0: topicSet[0], Topic1: topicSet[1], Topic2: topicSet[2], @@ -389,54 +365,32 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { sdi.fileWriter.upsertLogCID(logDataSet) } - // publish trie nodes, these aren't indexed directly - for i, n := range args.txTrieNodes { - sdi.fileWriter.upsertIPLDNode(sdi.blockNumber, n) - sdi.fileWriter.upsertIPLDNode(sdi.blockNumber, args.rctTrieNodes[i]) - } - return nil } // PushStateNode writes a state diff node object (including any child storage nodes) IPLD insert SQL stmt to a file func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error { // publish the state node - var stateModel models.StateNodeModel + var stateModel models.StateLeafModel if stateNode.NodeType == sdtypes.Removed { + // TODO: still need to handle removed leaves if atomic.LoadUint32(sdi.removedCacheFlag) == 0 { atomic.StoreUint32(sdi.removedCacheFlag, 1) sdi.fileWriter.upsertIPLDDirect(sdi.blockNumber, shared.RemovedNodeMhKey, []byte{}) } - stateModel = models.StateNodeModel{ + stateModel = models.StateLeafModel{ BlockNumber: sdi.blockNumber, HeaderID: headerID, Path: stateNode.Path, StateKey: common.BytesToHash(stateNode.LeafKey).String(), CID: shared.RemovedNodeStateCID, MhKey: shared.RemovedNodeMhKey, - NodeType: stateNode.NodeType.Int(), } } else { stateCIDStr, stateMhKey, err := sdi.fileWriter.upsertIPLDRaw(sdi.blockNumber, ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue) if err != nil { return fmt.Errorf("error generating and cacheing state node IPLD: %v", err) } - stateModel = models.StateNodeModel{ - BlockNumber: sdi.blockNumber, - HeaderID: headerID, - Path: stateNode.Path, - StateKey: common.BytesToHash(stateNode.LeafKey).String(), - CID: stateCIDStr, - MhKey: stateMhKey, - NodeType: stateNode.NodeType.Int(), - } - } - - // index the state node - sdi.fileWriter.upsertStateCID(stateModel) - - // if we have a leaf, decode and index the account data - if stateNode.NodeType == sdtypes.Leaf { var i []interface{} if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil { return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error()) @@ -448,26 +402,32 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil { return fmt.Errorf("error decoding state account rlp: %s", err.Error()) } - accountModel := models.StateAccountModel{ + stateModel = models.StateLeafModel{ BlockNumber: sdi.blockNumber, HeaderID: headerID, - StatePath: stateNode.Path, + Path: stateNode.Path, + StateKey: common.BytesToHash(stateNode.LeafKey).String(), + CID: stateCIDStr, + MhKey: stateMhKey, Balance: account.Balance.String(), Nonce: account.Nonce, CodeHash: account.CodeHash, StorageRoot: account.Root.String(), } - sdi.fileWriter.upsertStateAccount(accountModel) } + // index the state node + sdi.fileWriter.upsertStateCID(stateModel) + // if there are any storage nodes associated with this node, publish and index them for _, storageNode := range stateNode.StorageNodes { if storageNode.NodeType == sdtypes.Removed { + // TODO: still need to handle leaf deletions if atomic.LoadUint32(sdi.removedCacheFlag) == 0 { atomic.StoreUint32(sdi.removedCacheFlag, 1) sdi.fileWriter.upsertIPLDDirect(sdi.blockNumber, shared.RemovedNodeMhKey, []byte{}) } - storageModel := models.StorageNodeModel{ + storageModel := models.StorageLeafModel{ BlockNumber: sdi.blockNumber, HeaderID: headerID, StatePath: stateNode.Path, @@ -475,7 +435,6 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt StorageKey: common.BytesToHash(storageNode.LeafKey).String(), CID: shared.RemovedNodeStorageCID, MhKey: shared.RemovedNodeMhKey, - NodeType: storageNode.NodeType.Int(), } sdi.fileWriter.upsertStorageCID(storageModel) continue @@ -484,7 +443,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err != nil { return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err) } - storageModel := models.StorageNodeModel{ + storageModel := models.StorageLeafModel{ BlockNumber: sdi.blockNumber, HeaderID: headerID, StatePath: stateNode.Path, @@ -492,7 +451,6 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt StorageKey: common.BytesToHash(storageNode.LeafKey).String(), CID: storageCIDStr, MhKey: storageMhKey, - NodeType: storageNode.NodeType.Int(), } sdi.fileWriter.upsertStorageCID(storageModel) } diff --git a/statediff/indexer/database/file/writer.go b/statediff/indexer/database/file/writer.go index 3c11b5eea..b4cd54a17 100644 --- a/statediff/indexer/database/file/writer.go +++ b/statediff/indexer/database/file/writer.go @@ -76,11 +76,11 @@ func (sqw *SQLWriter) Loop() { l = len(stmt) if sqw.collationIndex+l > writeBufferSize { if err := sqw.flush(); err != nil { - panic(fmt.Sprintf("error writing sql stmts buffer to file: %v", err)) + panic((any)(fmt.Sprintf("error writing sql stmts buffer to file: %v", err))) } if l > writeBufferSize { if _, err := sqw.wc.Write(stmt); err != nil { - panic(fmt.Sprintf("error writing large sql stmt to file: %v", err)) + panic((any)(fmt.Sprintf("error writing large sql stmt to file: %v", err))) } continue } @@ -89,12 +89,12 @@ func (sqw *SQLWriter) Loop() { sqw.collationIndex += l case <-sqw.quitChan: if err := sqw.flush(); err != nil { - panic(fmt.Sprintf("error writing sql stmts buffer to file: %v", err)) + panic((any)(fmt.Sprintf("error writing sql stmts buffer to file: %v", err))) } return case <-sqw.flushChan: if err := sqw.flush(); err != nil { - panic(fmt.Sprintf("error writing sql stmts buffer to file: %v", err)) + panic((any)(fmt.Sprintf("error writing sql stmts buffer to file: %v", err))) } sqw.flushFinished <- struct{}{} } @@ -142,20 +142,17 @@ const ( alInsert = "INSERT INTO eth.access_list_elements (block_number, tx_id, index, address, storage_keys) VALUES " + "('%s', '%s', %d, '%s', '%s');\n" - rctInsert = "INSERT INTO eth.receipt_cids (block_number, tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, " + - "post_status, log_root) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', %d, '%s');\n" + rctInsert = "INSERT INTO eth.receipt_cids (block_number, tx_id, cid, contract, contract_hash, mh_key, post_state, " + + "post_status) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', %d);\n" - logInsert = "INSERT INTO eth.log_cids (block_number, leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, " + + logInsert = "INSERT INTO eth.log_cids (block_number, cid, mh_key, rct_id, address, index, topic0, topic1, topic2, " + "topic3, log_data) VALUES ('%s', '%s', '%s', '%s', '%s', %d, '%s', '%s', '%s', '%s', '\\x%x');\n" - stateInsert = "INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) " + - "VALUES ('%s', '%s', '%s', '%s', '\\x%x', %d, %t, '%s');\n" - - accountInsert = "INSERT INTO eth.state_accounts (block_number, header_id, state_path, balance, nonce, code_hash, storage_root) " + - "VALUES ('%s', '%s', '\\x%x', '%s', %d, '\\x%x', '%s');\n" + stateInsert = "INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, diff, mh_key, " + + "balance, nonce, code_hash, storage_root) VALUES ('%s', '%s', '%s', '%s', '\\x%x', %t, '%s', '%s', %d, '\\x%x', '%s');\n" storageInsert = "INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, " + - "node_type, diff, mh_key) VALUES ('%s', '%s', '\\x%x', '%s', '%s', '\\x%x', %d, %t, '%s');\n" + "diff, mh_key) VALUES ('%s', '%s', '\\x%x', '%s', '%s', '\\x%x', %t, '%s');\n" ) func (sqw *SQLWriter) upsertNode(node nodeinfo.Info) { @@ -222,38 +219,33 @@ func (sqw *SQLWriter) upsertAccessListElement(accessListElement models.AccessLis } func (sqw *SQLWriter) upsertReceiptCID(rct *models.ReceiptModel) { - sqw.stmts <- []byte(fmt.Sprintf(rctInsert, rct.BlockNumber, rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, - rct.PostState, rct.PostStatus, rct.LogRoot)) + sqw.stmts <- []byte(fmt.Sprintf(rctInsert, rct.BlockNumber, rct.TxID, rct.CID, rct.Contract, rct.ContractHash, rct.MhKey, + rct.PostState, rct.PostStatus)) indexerMetrics.receipts.Inc(1) } func (sqw *SQLWriter) upsertLogCID(logs []*models.LogsModel) { for _, l := range logs { - sqw.stmts <- []byte(fmt.Sprintf(logInsert, l.BlockNumber, l.LeafCID, l.LeafMhKey, l.ReceiptID, l.Address, l.Index, l.Topic0, + sqw.stmts <- []byte(fmt.Sprintf(logInsert, l.BlockNumber, l.CID, l.MhKey, l.ReceiptID, l.Address, l.Index, l.Topic0, l.Topic1, l.Topic2, l.Topic3, l.Data)) indexerMetrics.logs.Inc(1) } } -func (sqw *SQLWriter) upsertStateCID(stateNode models.StateNodeModel) { +func (sqw *SQLWriter) upsertStateCID(stateNode models.StateLeafModel) { var stateKey string if stateNode.StateKey != nullHash.String() { stateKey = stateNode.StateKey } sqw.stmts <- []byte(fmt.Sprintf(stateInsert, stateNode.BlockNumber, stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, - stateNode.NodeType, true, stateNode.MhKey)) + true, stateNode.MhKey, stateNode.Balance, stateNode.Nonce, stateNode.CodeHash, stateNode.StorageRoot)) } -func (sqw *SQLWriter) upsertStateAccount(stateAccount models.StateAccountModel) { - sqw.stmts <- []byte(fmt.Sprintf(accountInsert, stateAccount.BlockNumber, stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance, - stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot)) -} - -func (sqw *SQLWriter) upsertStorageCID(storageCID models.StorageNodeModel) { +func (sqw *SQLWriter) upsertStorageCID(storageCID models.StorageLeafModel) { var storageKey string if storageCID.StorageKey != nullHash.String() { storageKey = storageCID.StorageKey } sqw.stmts <- []byte(fmt.Sprintf(storageInsert, storageCID.BlockNumber, storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID, - storageCID.Path, storageCID.NodeType, true, storageCID.MhKey)) + storageCID.Path, true, storageCID.MhKey)) } diff --git a/statediff/indexer/database/sql/indexer.go b/statediff/indexer/database/sql/indexer.go index 74ce06745..e20b93d43 100644 --- a/statediff/indexer/database/sql/indexer.go +++ b/statediff/indexer/database/sql/indexer.go @@ -25,7 +25,6 @@ import ( "math/big" "time" - "github.com/ipfs/go-cid" node "github.com/ipfs/go-ipld-format" "github.com/multiformats/go-multihash" @@ -102,16 +101,13 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip } // Generate the block iplds - headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts) + headerNode, uncleNodes, txNodes, rctNodes, logNodes, err := ipld2.FromBlockAndReceipts(block, receipts) if err != nil { return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) } - if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) { - return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs)) - } - if len(txTrieNodes) != len(rctTrieNodes) { - return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes)) + if len(txNodes) != len(rctNodes) { + return nil, fmt.Errorf("expected number of transactions (%d) and receipts (%d) to be equal", len(txNodes), len(rctNodes)) } // Calculate reward @@ -130,7 +126,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip return nil, err } defer func() { - if p := recover(); p != nil { + if p := recover(); p != (any)(nil) { rollback(sdi.ctx, tx) panic(p) } else if err != nil { @@ -156,7 +152,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip close(self.quit) close(self.iplds) }() - if p := recover(); p != nil { + if p := recover(); p != (any)(nil) { log.Info("panic detected before tx submission, rolling back the tx", "panic", p) rollback(sdi.ctx, tx) panic(p) @@ -218,12 +214,8 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip receipts: receipts, txs: transactions, rctNodes: rctNodes, - rctTrieNodes: rctTrieNodes, txNodes: txNodes, - txTrieNodes: txTrieNodes, - logTrieNodes: logTrieNodes, - logLeafNodeCIDs: logLeafNodeCIDs, - rctLeafNodeCIDs: rctLeafNodeCIDs, + logNodes: logNodes, }) if err != nil { return nil, err @@ -301,12 +293,8 @@ type processArgs struct { receipts types.Receipts txs types.Transactions rctNodes []*ipld2.EthReceipt - rctTrieNodes []*ipld2.EthRctTrie txNodes []*ipld2.EthTx - txTrieNodes []*ipld2.EthTxTrie - logTrieNodes [][]node.Node - logLeafNodeCIDs [][]cid.Cid - rctLeafNodeCIDs []cid.Cid + logNodes [][]*ipld2.EthLog } // processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres @@ -314,9 +302,6 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs // Process receipts and txs signer := types.MakeSigner(sdi.chainConfig, args.blockNumber) for i, receipt := range args.receipts { - for _, logTrieNode := range args.logTrieNodes[i] { - tx.cacheIPLD(logTrieNode) - } txNode := args.txNodes[i] tx.cacheIPLD(txNode) @@ -334,6 +319,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs if err != nil { return fmt.Errorf("error deriving tx sender: %v", err) } + txCid := txNode.Cid() txModel := models.TxModel{ BlockNumber: sdi.blockNumber, HeaderID: args.headerID, @@ -342,8 +328,8 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs TxHash: txID, Index: int64(i), Data: trx.Data(), - CID: txNode.Cid().String(), - MhKey: shared.MultihashKeyFromCID(txNode.Cid()), + CID: txCid.String(), + MhKey: shared.MultihashKeyFromCID(txCid), Type: trx.Type(), Value: val, } @@ -375,20 +361,14 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs if contract != "" { contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String() } - - // index receipt - if !args.rctLeafNodeCIDs[i].Defined() { - return fmt.Errorf("invalid receipt leaf node cid") - } - + rctCid := args.rctNodes[i].Cid() rctModel := &models.ReceiptModel{ BlockNumber: sdi.blockNumber, TxID: txID, Contract: contract, ContractHash: contractHash, - LeafCID: args.rctLeafNodeCIDs[i].String(), - LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]), - LogRoot: args.rctNodes[i].LogRoot.String(), + CID: rctCid.String(), + MhKey: shared.MultihashKeyFromCID(rctCid), } if len(receipt.PostState) == 0 { rctModel.PostStatus = receipt.Status @@ -407,19 +387,15 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs for ti, topic := range l.Topics { topicSet[ti] = topic.Hex() } - - if !args.logLeafNodeCIDs[i][idx].Defined() { - return fmt.Errorf("invalid log cid") - } - + logCid := args.logNodes[i][idx].Cid() logDataSet[idx] = &models.LogsModel{ BlockNumber: sdi.blockNumber, ReceiptID: txID, Address: l.Address.String(), Index: int64(l.Index), Data: l.Data, - LeafCID: args.logLeafNodeCIDs[i][idx].String(), - LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]), + CID: logCid.String(), + MhKey: shared.MultihashKeyFromCID(logCid), Topic0: topicSet[0], Topic1: topicSet[1], Topic2: topicSet[2], @@ -432,12 +408,6 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs } } - // publish trie nodes, these aren't indexed directly - for i, n := range args.txTrieNodes { - tx.cacheIPLD(n) - tx.cacheIPLD(args.rctTrieNodes[i]) - } - return nil } @@ -448,41 +418,23 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch) } // publish the state node - var stateModel models.StateNodeModel + var stateModel models.StateLeafModel if stateNode.NodeType == sdtypes.Removed { + // TODO: we need to continue tracking removed nodes for leaves? tx.cacheRemoved(shared.RemovedNodeMhKey, []byte{}) - stateModel = models.StateNodeModel{ + stateModel = models.StateLeafModel{ BlockNumber: sdi.blockNumber, HeaderID: headerID, Path: stateNode.Path, StateKey: common.BytesToHash(stateNode.LeafKey).String(), CID: shared.RemovedNodeStateCID, MhKey: shared.RemovedNodeMhKey, - NodeType: stateNode.NodeType.Int(), } } else { stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue) if err != nil { return fmt.Errorf("error generating and cacheing state node IPLD: %v", err) } - stateModel = models.StateNodeModel{ - BlockNumber: sdi.blockNumber, - HeaderID: headerID, - Path: stateNode.Path, - StateKey: common.BytesToHash(stateNode.LeafKey).String(), - CID: stateCIDStr, - MhKey: stateMhKey, - NodeType: stateNode.NodeType.Int(), - } - } - - // index the state node - if err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel); err != nil { - return err - } - - // if we have a leaf, decode and index the account data - if stateNode.NodeType == sdtypes.Leaf { var i []interface{} if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil { return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error()) @@ -494,25 +446,31 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil { return fmt.Errorf("error decoding state account rlp: %s", err.Error()) } - accountModel := models.StateAccountModel{ + stateModel = models.StateLeafModel{ BlockNumber: sdi.blockNumber, HeaderID: headerID, - StatePath: stateNode.Path, + Path: stateNode.Path, + StateKey: common.BytesToHash(stateNode.LeafKey).String(), + CID: stateCIDStr, + MhKey: stateMhKey, Balance: account.Balance.String(), Nonce: account.Nonce, CodeHash: account.CodeHash, StorageRoot: account.Root.String(), } - if err := sdi.dbWriter.upsertStateAccount(tx.dbtx, accountModel); err != nil { - return err - } + } + + // index the state node + if err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel); err != nil { + return err } // if there are any storage nodes associated with this node, publish and index them for _, storageNode := range stateNode.StorageNodes { + // TODO: we still need to handle leaf deletions if storageNode.NodeType == sdtypes.Removed { tx.cacheRemoved(shared.RemovedNodeMhKey, []byte{}) - storageModel := models.StorageNodeModel{ + storageModel := models.StorageLeafModel{ BlockNumber: sdi.blockNumber, HeaderID: headerID, StatePath: stateNode.Path, @@ -520,7 +478,6 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt StorageKey: common.BytesToHash(storageNode.LeafKey).String(), CID: shared.RemovedNodeStorageCID, MhKey: shared.RemovedNodeMhKey, - NodeType: storageNode.NodeType.Int(), } if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil { return err @@ -531,7 +488,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err != nil { return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err) } - storageModel := models.StorageNodeModel{ + storageModel := models.StorageLeafModel{ BlockNumber: sdi.blockNumber, HeaderID: headerID, StatePath: stateNode.Path, @@ -539,7 +496,6 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt StorageKey: common.BytesToHash(storageNode.LeafKey).String(), CID: storageCIDStr, MhKey: storageMhKey, - NodeType: storageNode.NodeType.Int(), } if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil { return err @@ -595,7 +551,7 @@ func (sdi *StateDiffIndexer) InsertWatchedAddresses(args []sdtypes.WatchAddressA return err } defer func() { - if p := recover(); p != nil { + if p := recover(); p != (any)(nil) { rollback(sdi.ctx, tx) panic(p) } else if err != nil { @@ -623,7 +579,7 @@ func (sdi *StateDiffIndexer) RemoveWatchedAddresses(args []sdtypes.WatchAddressA return err } defer func() { - if p := recover(); p != nil { + if p := recover(); p != (any)(nil) { rollback(sdi.ctx, tx) panic(p) } else if err != nil { @@ -650,7 +606,7 @@ func (sdi *StateDiffIndexer) SetWatchedAddresses(args []sdtypes.WatchAddressArg, return err } defer func() { - if p := recover(); p != nil { + if p := recover(); p != (any)(nil) { rollback(sdi.ctx, tx) panic(p) } else if err != nil { diff --git a/statediff/indexer/database/sql/postgres/database.go b/statediff/indexer/database/sql/postgres/database.go index 35a9cbc82..bb6355af6 100644 --- a/statediff/indexer/database/sql/postgres/database.go +++ b/statediff/indexer/database/sql/postgres/database.go @@ -63,32 +63,26 @@ func (db *DB) InsertAccessListElementStm() string { // InsertRctStm satisfies the sql.Statements interface func (db *DB) InsertRctStm() string { - return `INSERT INTO eth.receipt_cids (block_number, tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + return `INSERT INTO eth.receipt_cids (block_number, tx_id, cid, contract, contract_hash, mh_key, post_state, post_status) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (tx_id, block_number) DO NOTHING` } // InsertLogStm satisfies the sql.Statements interface func (db *DB) InsertLogStm() string { - return `INSERT INTO eth.log_cids (block_number, leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) + return `INSERT INTO eth.log_cids (block_number, cid, mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) ON CONFLICT (rct_id, index, block_number) DO NOTHING` } // InsertStateStm satisfies the sql.Statements interface func (db *DB) InsertStateStm() string { - return `INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - ON CONFLICT (header_id, state_path, block_number) DO UPDATE SET (block_number, state_leaf_key, cid, node_type, diff, mh_key) = ($1, $3, $4, $6, $7, $8)` -} - -// InsertAccountStm satisfies the sql.Statements interface -func (db *DB) InsertAccountStm() string { - return `INSERT INTO eth.state_accounts (block_number, header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6, $7) - ON CONFLICT (header_id, state_path, block_number) DO NOTHING` + return `INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, diff, mh_key, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) + ON CONFLICT (header_id, state_path, block_number) DO UPDATE SET (block_number, state_leaf_key, cid, diff, mh_key) = ($1, $3, $4, $6, $7, $8, $9, $10, $11)` } // InsertStorageStm satisfies the sql.Statements interface func (db *DB) InsertStorageStm() string { - return `INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) - ON CONFLICT (header_id, state_path, storage_path, block_number) DO UPDATE SET (block_number, storage_leaf_key, cid, node_type, diff, mh_key) = ($1, $4, $5, $7, $8, $9)` + return `INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT (header_id, state_path, storage_path, block_number) DO UPDATE SET (block_number, storage_leaf_key, cid, diff, mh_key) = ($1, $4, $5, $7, $8)` } // InsertIPLDStm satisfies the sql.Statements interface diff --git a/statediff/indexer/database/sql/writer.go b/statediff/indexer/database/sql/writer.go index c6c378556..76ee3b29f 100644 --- a/statediff/indexer/database/sql/writer.go +++ b/statediff/indexer/database/sql/writer.go @@ -105,13 +105,13 @@ func (w *Writer) upsertAccessListElement(tx Tx, accessListElement models.AccessL } /* -INSERT INTO eth.receipt_cids (block_number, tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) +INSERT INTO eth.receipt_cids (block_number, tx_id, cid, contract, contract_hash, mh_key, post_state, post_status) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (tx_id, block_number) DO NOTHING */ func (w *Writer) upsertReceiptCID(tx Tx, rct *models.ReceiptModel) error { _, err := tx.Exec(w.db.Context(), w.db.InsertRctStm(), - rct.BlockNumber, rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, - rct.PostStatus, rct.LogRoot) + rct.BlockNumber, rct.TxID, rct.CID, rct.Contract, rct.ContractHash, rct.MhKey, rct.PostState, + rct.PostStatus) if err != nil { return fmt.Errorf("error upserting receipt_cids entry: %w", err) } @@ -120,13 +120,13 @@ func (w *Writer) upsertReceiptCID(tx Tx, rct *models.ReceiptModel) error { } /* -INSERT INTO eth.log_cids (block_number, leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) +INSERT INTO eth.log_cids (block_number, cid, mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) ON CONFLICT (rct_id, index, block_number) DO NOTHING */ func (w *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel) error { for _, log := range logs { _, err := tx.Exec(w.db.Context(), w.db.InsertLogStm(), - log.BlockNumber, log.LeafCID, log.LeafMhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1, + log.BlockNumber, log.CID, log.MhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data) if err != nil { return fmt.Errorf("error upserting logs entry: %w", err) @@ -137,17 +137,17 @@ func (w *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel) error { } /* -INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) -ON CONFLICT (header_id, state_path, block_number) DO UPDATE SET (block_number, state_leaf_key, cid, node_type, diff, mh_key) = ($1 $3, $4, $6, $7, $8) +INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, diff, mh_key, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) +ON CONFLICT (header_id, state_path, block_number) DO UPDATE SET (block_number, state_leaf_key, cid, diff, mh_key, balance, nonce, code_hash, storage_root) = ($1 $3, $4, $6, $7, $8, $9, $10, $11) */ -func (w *Writer) upsertStateCID(tx Tx, stateNode models.StateNodeModel) error { +func (w *Writer) upsertStateCID(tx Tx, stateNode models.StateLeafModel) error { var stateKey string if stateNode.StateKey != nullHash.String() { stateKey = stateNode.StateKey } _, err := tx.Exec(w.db.Context(), w.db.InsertStateStm(), - stateNode.BlockNumber, stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, - stateNode.MhKey) + stateNode.BlockNumber, stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, true, + stateNode.MhKey, stateNode.Balance, stateNode.Nonce, stateNode.CodeHash, stateNode.StorageRoot) if err != nil { return fmt.Errorf("error upserting state_cids entry: %v", err) } @@ -155,31 +155,17 @@ func (w *Writer) upsertStateCID(tx Tx, stateNode models.StateNodeModel) error { } /* -INSERT INTO eth.state_accounts (block_number, header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6, $7) -ON CONFLICT (header_id, state_path, block_number) DO NOTHING +INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) +ON CONFLICT (header_id, state_path, storage_path, block_number) DO UPDATE SET (block_number, storage_leaf_key, cid, diff, mh_key) = ($1, $4, $5, $7, $8) */ -func (w *Writer) upsertStateAccount(tx Tx, stateAccount models.StateAccountModel) error { - _, err := tx.Exec(w.db.Context(), w.db.InsertAccountStm(), - stateAccount.BlockNumber, stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance, - stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot) - if err != nil { - return fmt.Errorf("error upserting state_accounts entry: %v", err) - } - return nil -} - -/* -INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) -ON CONFLICT (header_id, state_path, storage_path, block_number) DO UPDATE SET (block_number, storage_leaf_key, cid, node_type, diff, mh_key) = ($1, $4, $5, $7, $8, $9) -*/ -func (w *Writer) upsertStorageCID(tx Tx, storageCID models.StorageNodeModel) error { +func (w *Writer) upsertStorageCID(tx Tx, storageCID models.StorageLeafModel) error { var storageKey string if storageCID.StorageKey != nullHash.String() { storageKey = storageCID.StorageKey } _, err := tx.Exec(w.db.Context(), w.db.InsertStorageStm(), storageCID.BlockNumber, storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID, storageCID.Path, - storageCID.NodeType, true, storageCID.MhKey) + true, storageCID.MhKey) if err != nil { return fmt.Errorf("error upserting storage_cids entry: %v", err) } diff --git a/statediff/indexer/ipld/eth_header_test.go b/statediff/indexer/ipld/eth_header_test.go index ebbab2129..cebc67371 100644 --- a/statediff/indexer/ipld/eth_header_test.go +++ b/statediff/indexer/ipld/eth_header_test.go @@ -430,10 +430,10 @@ func TestEthBlockCopy(t *testing.T) { defer func() { r := recover() - if r == nil { + if r == (any)(nil) { t.Fatal("Expected panic") } - if r != "implement me" { + if r != (any)("implement me") { t.Fatalf("Wrong panic message\r\nexpected %s\r\ngot %s", "'implement me'", r) } }() diff --git a/statediff/indexer/ipld/eth_parser.go b/statediff/indexer/ipld/eth_parser.go index 03061f828..9b33dd073 100644 --- a/statediff/indexer/ipld/eth_parser.go +++ b/statediff/indexer/ipld/eth_parser.go @@ -124,12 +124,12 @@ func FromBlockJSON(r io.Reader) (*EthHeader, []*EthTx, []*EthTxTrie, error) { } // FromBlockAndReceipts takes a block and processes it -// to return it a set of IPLD nodes for further processing. -func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, [][]node.Node, [][]cid.Cid, []cid.Cid, error) { +// to return a set of IPLD nodes for further processing. +func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthReceipt, [][]*EthLog, error) { // Process the header headerNode, err := NewEthHeader(block.Header()) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } // Process the uncles @@ -137,23 +137,71 @@ func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHe for i, uncle := range block.Uncles() { uncleNode, err := NewEthHeader(uncle) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } uncleNodes[i] = uncleNode } // Process the txs - txNodes, txTrieNodes, err := processTransactions(block.Transactions(), - block.Header().TxHash[:]) + txNodes, err := processTransactionsSuccinct(block.Transactions()) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } // Process the receipts and logs - rctNodes, tctTrieNodes, logTrieAndLogNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := processReceiptsAndLogs(receipts, - block.Header().ReceiptHash[:]) + rctNodes, logNodes, err := processReceiptsAndLogsSuccinct(receipts) - return headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, tctTrieNodes, logTrieAndLogNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err + return headerNode, uncleNodes, txNodes, rctNodes, logNodes, err +} + +// processTransactions converts txs to IPLDs +func processTransactionsSuccinct(txs []*types.Transaction) ([]*EthTx, error) { + ethTxNodes := make([]*EthTx, len(txs)) + + for i, tx := range txs { + ethTx, err := NewEthTx(tx) + if err != nil { + return nil, err + } + ethTxNodes[i] = ethTx + } + return ethTxNodes, nil +} + +// processReceiptsAndLogsSuccinct will take in receipts and returns the IPLDs for the receipts and logs contained within +func processReceiptsAndLogsSuccinct(rcts []*types.Receipt) ([]*EthReceipt, [][]*EthLog, error) { + // Pre allocating memory. + ethRctNodes := make([]*EthReceipt, len(rcts)) + ethLogNodes := make([][]*EthLog, 0, len(rcts)) + + for i, rct := range rcts { + // Process logs for each receipt. + ethLogs, err := processLogsSuccinct(rct.Logs) + if err != nil { + return nil, nil, err + } + ethRct, err := NewReceipt(rct) + if err != nil { + return nil, nil, err + } + ethRctNodes[i] = ethRct + ethLogNodes[i] = ethLogs + } + + return ethRctNodes, ethLogNodes, nil +} + +func processLogsSuccinct(logs []*types.Log) ([]*EthLog, error) { + ethLogs := make([]*EthLog, len(logs)) + for i, log := range logs { + logNode, err := NewLog(log) + if err != nil { + return nil, err + } + ethLogs[i] = logNode + } + + return ethLogs, nil } // processTransactions will take the found transactions in a parsed block body diff --git a/statediff/indexer/models/batch.go b/statediff/indexer/models/batch.go index 94e9b4e96..ceac81174 100644 --- a/statediff/indexer/models/batch.go +++ b/statediff/indexer/models/batch.go @@ -69,7 +69,6 @@ type ReceiptBatch struct { PostStates []string Contracts []string ContractHashes []string - LogRoots []string } // LogBatch holds the arguments for a batch insert of log data @@ -87,37 +86,28 @@ type LogBatch struct { Topic3s []string } -// StateBatch holds the arguments for a batch insert of state data -type StateBatch struct { +// StateLeafBatch holds the arguments for a batch insert of state data +type StateLeafBatch struct { BlockNumbers []string HeaderID string Paths [][]byte StateKeys []string - NodeTypes []int CIDs []string MhKeys []string Diff bool -} - -// AccountBatch holds the arguments for a batch insert of account data -type AccountBatch struct { - BlockNumbers []string - HeaderID string - StatePaths [][]byte Balances []string Nonces []uint64 CodeHashes [][]byte StorageRoots []string } -// StorageBatch holds the arguments for a batch insert of storage data -type StorageBatch struct { +// StorageLeafBatch holds the arguments for a batch insert of storage data +type StorageLeafBatch struct { BlockNumbers []string HeaderID string StatePaths [][]string Paths [][]byte StorageKeys []string - NodeTypes []int CIDs []string MhKeys []string Diff bool diff --git a/statediff/indexer/models/models.go b/statediff/indexer/models/models.go index cbf17d977..67e981f9c 100644 --- a/statediff/indexer/models/models.go +++ b/statediff/indexer/models/models.go @@ -84,71 +84,60 @@ type AccessListElementModel struct { type ReceiptModel struct { BlockNumber string `db:"block_number"` TxID string `db:"tx_id"` - LeafCID string `db:"leaf_cid"` - LeafMhKey string `db:"leaf_mh_key"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` PostStatus uint64 `db:"post_status"` PostState string `db:"post_state"` Contract string `db:"contract"` ContractHash string `db:"contract_hash"` - LogRoot string `db:"log_root"` } -// StateNodeModel is the db model for eth.state_cids -type StateNodeModel struct { +// StateLeafModel is the db model for eth.state_cids +type StateLeafModel struct { BlockNumber string `db:"block_number"` HeaderID string `db:"header_id"` Path []byte `db:"state_path"` StateKey string `db:"state_leaf_key"` - NodeType int `db:"node_type"` CID string `db:"cid"` MhKey string `db:"mh_key"` Diff bool `db:"diff"` -} - -// StorageNodeModel is the db model for eth.storage_cids -type StorageNodeModel struct { - BlockNumber string `db:"block_number"` - HeaderID string `db:"header_id"` - StatePath []byte `db:"state_path"` - Path []byte `db:"storage_path"` - StorageKey string `db:"storage_leaf_key"` - NodeType int `db:"node_type"` - CID string `db:"cid"` - MhKey string `db:"mh_key"` - Diff bool `db:"diff"` -} - -// StorageNodeWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key -type StorageNodeWithStateKeyModel struct { - BlockNumber string `db:"block_number"` - HeaderID string `db:"header_id"` - StatePath []byte `db:"state_path"` - Path []byte `db:"storage_path"` - StateKey string `db:"state_leaf_key"` - StorageKey string `db:"storage_leaf_key"` - NodeType int `db:"node_type"` - CID string `db:"cid"` - MhKey string `db:"mh_key"` - Diff bool `db:"diff"` -} - -// StateAccountModel is a db model for an eth state account (decoded value of state leaf node) -type StateAccountModel struct { - BlockNumber string `db:"block_number"` - HeaderID string `db:"header_id"` - StatePath []byte `db:"state_path"` Balance string `db:"balance"` Nonce uint64 `db:"nonce"` CodeHash []byte `db:"code_hash"` StorageRoot string `db:"storage_root"` } +// StorageLeafModel is the db model for eth.storage_cids +type StorageLeafModel struct { + BlockNumber string `db:"block_number"` + HeaderID string `db:"header_id"` + StatePath []byte `db:"state_path"` + Path []byte `db:"storage_path"` + StorageKey string `db:"storage_leaf_key"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` + Diff bool `db:"diff"` +} + +// StorageLeafWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key +type StorageLeafWithStateKeyModel struct { + BlockNumber string `db:"block_number"` + HeaderID string `db:"header_id"` + StatePath []byte `db:"state_path"` + Path []byte `db:"storage_path"` + StateKey string `db:"state_leaf_key"` + StorageKey string `db:"storage_leaf_key"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` + Diff bool `db:"diff"` +} + // LogsModel is the db model for eth.logs type LogsModel struct { BlockNumber string `db:"block_number"` ReceiptID string `db:"rct_id"` - LeafCID string `db:"leaf_cid"` - LeafMhKey string `db:"leaf_mh_key"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` Address string `db:"address"` Index int64 `db:"index"` Data []byte `db:"log_data"`