write code to pg-ipfs table, for codehash lookups

This commit is contained in:
Ian Norden 2020-08-10 12:46:03 -05:00
parent 52b5c99760
commit 39354b2114
12 changed files with 627 additions and 663 deletions

View File

@ -8,6 +8,8 @@ CREATE TABLE eth.transaction_cids (
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
dst VARCHAR(66) NOT NULL, dst VARCHAR(66) NOT NULL,
src VARCHAR(66) NOT NULL, src VARCHAR(66) NOT NULL,
deployment BOOL NOT NULL,
data BYTEA,
UNIQUE (header_id, tx_hash) UNIQUE (header_id, tx_hash)
); );

View File

@ -415,7 +415,9 @@ CREATE TABLE eth.transaction_cids (
cid text NOT NULL, cid text NOT NULL,
mh_key text NOT NULL, mh_key text NOT NULL,
dst character varying(66) NOT NULL, dst character varying(66) NOT NULL,
src character varying(66) NOT NULL src character varying(66) NOT NULL,
deployment boolean NOT NULL,
data bytea
); );

View File

@ -17,42 +17,31 @@
package btc package btc
import ( import (
"context"
"errors"
"fmt" "fmt"
"github.com/ipfs/go-block-format" "github.com/jmoiron/sqlx"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
var ( // IPLDPGFetcher satisfies the IPLDFetcher interface for ethereum
errUnexpectedNumberOfIPLDs = errors.New("ipfs batch fetch returned unexpected number of IPLDs") // it interfaces directly with PG-IPFS instead of going through a node-interface or remote node
) type IPLDPGFetcher struct {
db *postgres.DB
// IPLDFetcher satisfies the IPLDFetcher interface for ethereum
type IPLDFetcher struct {
BlockService blockservice.BlockService
} }
// NewIPLDFetcher creates a pointer to a new IPLDFetcher // NewIPLDPGFetcher creates a pointer to a new IPLDPGFetcher
// It interfaces with PG-IPFS through an internalized IPFS node interface func NewIPLDPGFetcher(db *postgres.DB) *IPLDPGFetcher {
func NewIPLDFetcher(ipfsPath string) (*IPLDFetcher, error) { return &IPLDPGFetcher{
blockService, err := ipfs.InitIPFSBlockService(ipfsPath) db: db,
if err != nil {
return nil, err
} }
return &IPLDFetcher{
BlockService: blockService,
}, nil
} }
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper // Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) { func (f *IPLDPGFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
cidWrapper, ok := cids.(*CIDWrapper) cidWrapper, ok := cids.(*CIDWrapper)
if !ok { if !ok {
return nil, fmt.Errorf("btc fetcher: expected cids type %T got %T", &CIDWrapper{}, cids) return nil, fmt.Errorf("btc fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
@ -60,76 +49,59 @@ func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
log.Debug("fetching iplds") log.Debug("fetching iplds")
iplds := IPLDs{} iplds := IPLDs{}
iplds.BlockNumber = cidWrapper.BlockNumber iplds.BlockNumber = cidWrapper.BlockNumber
var err error
iplds.Header, err = f.FetchHeader(cidWrapper.Header) tx, err := f.db.Beginx()
if err != nil { if err != nil {
return nil, err return nil, err
} }
iplds.Transactions, err = f.FetchTrxs(cidWrapper.Transactions) defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
iplds.Header, err = f.FetchHeader(tx, cidWrapper.Header)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("btc pg fetcher: header fetching error: %s", err.Error())
} }
return iplds, nil iplds.Transactions, err = f.FetchTrxs(tx, cidWrapper.Transactions)
if err != nil {
return nil, fmt.Errorf("btc pg fetcher: transaction fetching error: %s", err.Error())
}
return iplds, err
} }
// FetchHeaders fetches headers // FetchHeaders fetches headers
// It uses the f.fetch method func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) {
func (f *IPLDFetcher) FetchHeader(c HeaderModel) (ipfs.BlockModel, error) {
log.Debug("fetching header ipld") log.Debug("fetching header ipld")
dc, err := cid.Decode(c.CID) headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil {
return ipfs.BlockModel{}, err
}
header, err := f.fetch(dc)
if err != nil { if err != nil {
return ipfs.BlockModel{}, err return ipfs.BlockModel{}, err
} }
return ipfs.BlockModel{ return ipfs.BlockModel{
Data: header.RawData(), Data: headerBytes,
CID: header.Cid().String(), CID: c.CID,
}, nil }, nil
} }
// FetchTrxs fetches transactions // FetchTrxs fetches transactions
// It uses the f.fetchBatch method func (f *IPLDPGFetcher) FetchTrxs(tx *sqlx.Tx, cids []TxModel) ([]ipfs.BlockModel, error) {
func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching transaction iplds") log.Debug("fetching transaction iplds")
trxCids := make([]cid.Cid, len(cids)) trxIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
dc, err := cid.Decode(c.CID) trxBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
trxCids[i] = dc
}
trxs := f.fetchBatch(trxCids)
trxIPLDs := make([]ipfs.BlockModel, len(trxs))
for i, trx := range trxs {
trxIPLDs[i] = ipfs.BlockModel{ trxIPLDs[i] = ipfs.BlockModel{
Data: trx.RawData(), Data: trxBytes,
CID: trx.Cid().String(), CID: c.CID,
} }
} }
if len(trxIPLDs) != len(trxCids) {
log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
return trxIPLDs, errUnexpectedNumberOfIPLDs
}
return trxIPLDs, nil return trxIPLDs, nil
} }
// fetch is used to fetch a single cid
func (f *IPLDFetcher) fetch(cid cid.Cid) (blocks.Block, error) {
return f.BlockService.GetBlock(context.Background(), cid)
}
// fetchBatch is used to fetch a batch of IPFS data blocks by cid
// There is no guarantee all are fetched, and no error in such a case, so
// downstream we will need to confirm which CIDs were fetched in the result set
func (f *IPLDFetcher) fetchBatch(cids []cid.Cid) []blocks.Block {
fetchedBlocks := make([]blocks.Block, 0, len(cids))
blockChan := f.BlockService.GetBlocks(context.Background(), cids)
for block := range blockChan {
fetchedBlocks = append(fetchedBlocks, block)
}
return fetchedBlocks
}

View File

@ -20,102 +20,107 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/dag_putters"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
// IPLDPublisher satisfies the IPLDPublisher for ethereum // IPLDPublisherAndIndexer satisfies the IPLDPublisher interface for bitcoin
type IPLDPublisher struct { // It interfaces directly with the public.blocks table of PG-IPFS rather than going through an ipfs intermediary
HeaderPutter ipfs.DagPutter // It publishes and indexes IPLDs together in a single sqlx.Tx
TransactionPutter ipfs.DagPutter type IPLDPublisherAndIndexer struct {
TransactionTriePutter ipfs.DagPutter indexer *CIDIndexer
} }
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface // NewIPLDPublisherAndIndexer creates a pointer to a new IPLDPublisherAndIndexer which satisfies the IPLDPublisher interface
func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) { func NewIPLDPublisherAndIndexer(db *postgres.DB) *IPLDPublisherAndIndexer {
node, err := ipfs.InitIPFSNode(ipfsPath) return &IPLDPublisherAndIndexer{
if err != nil { indexer: NewCIDIndexer(db),
return nil, err
} }
return &IPLDPublisher{
HeaderPutter: dag_putters.NewBtcHeaderDagPutter(node),
TransactionPutter: dag_putters.NewBtcTxDagPutter(node),
TransactionTriePutter: dag_putters.NewBtcTxTrieDagPutter(node),
}, nil
} }
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload // Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) { func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
ipldPayload, ok := payload.(ConvertedPayload) ipldPayload, ok := payload.(ConvertedPayload)
if !ok { if !ok {
return nil, fmt.Errorf("eth publisher expected payload type %T got %T", &ConvertedPayload{}, payload) return nil, fmt.Errorf("btc publisher expected payload type %T got %T", ConvertedPayload{}, payload)
} }
// Generate nodes // Generate the iplds
headerNode, txNodes, txTrieNodes, err := ipld.FromHeaderAndTxs(ipldPayload.Header, ipldPayload.Txs) headerNode, txNodes, txTrieNodes, err := ipld.FromHeaderAndTxs(ipldPayload.Header, ipldPayload.Txs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Process and publish headers
headerCid, err := pub.publishHeader(headerNode) // Begin new db tx
tx, err := pub.indexer.db.Beginx()
if err != nil { if err != nil {
return nil, err return nil, err
} }
mhKey, _ := shared.MultihashKeyFromCIDString(headerCid) defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
// Publish trie nodes
for _, node := range txTrieNodes {
if err := shared.PublishIPLD(tx, node); err != nil {
return nil, err
}
}
// Publish and index header
if err := shared.PublishIPLD(tx, headerNode); err != nil {
return nil, err
}
header := HeaderModel{ header := HeaderModel{
CID: headerCid, CID: headerNode.Cid().String(),
MhKey: mhKey, MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: ipldPayload.Header.PrevBlock.String(), ParentHash: ipldPayload.Header.PrevBlock.String(),
BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)), BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)),
BlockHash: ipldPayload.Header.BlockHash().String(), BlockHash: ipldPayload.Header.BlockHash().String(),
Timestamp: ipldPayload.Header.Timestamp.UnixNano(), Timestamp: ipldPayload.Header.Timestamp.UnixNano(),
Bits: ipldPayload.Header.Bits, Bits: ipldPayload.Header.Bits,
} }
// Process and publish transactions headerID, err := pub.indexer.indexHeaderCID(tx, header)
transactionCids, err := pub.publishTransactions(txNodes, txTrieNodes, ipldPayload.TxMetaData)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Package CIDs and their metadata into a single struct
return &CIDPayload{
HeaderCID: header,
TransactionCIDs: transactionCids,
}, nil
}
func (pub *IPLDPublisher) publishHeader(header *ipld.BtcHeader) (string, error) { // Publish and index txs
cid, err := pub.HeaderPutter.DagPut(header) for i, txNode := range txNodes {
if err != nil { if err := shared.PublishIPLD(tx, txNode); err != nil {
return "", err return nil, err
} }
return cid, nil txModel := ipldPayload.TxMetaData[i]
} txModel.CID = txNode.Cid().String()
txModel.MhKey = shared.MultihashKeyFromCID(txNode.Cid())
func (pub *IPLDPublisher) publishTransactions(transactions []*ipld.BtcTx, txTrie []*ipld.BtcTxTrie, trxMeta []TxModelWithInsAndOuts) ([]TxModelWithInsAndOuts, error) { txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID)
txCids := make([]TxModelWithInsAndOuts, len(transactions))
for i, tx := range transactions {
cid, err := pub.TransactionPutter.DagPut(tx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
mhKey, _ := shared.MultihashKeyFromCIDString(cid) for _, input := range txModel.TxInputs {
txCids[i] = TxModelWithInsAndOuts{ if err := pub.indexer.indexTxInput(tx, input, txID); err != nil {
CID: cid, return nil, err
MhKey: mhKey, }
Index: trxMeta[i].Index, }
TxHash: trxMeta[i].TxHash, for _, output := range txModel.TxOutputs {
SegWit: trxMeta[i].SegWit, if err := pub.indexer.indexTxOutput(tx, output, txID); err != nil {
WitnessHash: trxMeta[i].WitnessHash, return nil, err
TxInputs: trxMeta[i].TxInputs, }
TxOutputs: trxMeta[i].TxOutputs,
} }
} }
for _, txNode := range txTrie {
// We don't do anything with the tx trie cids atm // This IPLDPublisher does both publishing and indexing, we do not need to pass anything forward to the indexer
if _, err := pub.TransactionTriePutter.DagPut(txNode); err != nil { return nil, err
return nil, err }
}
} // Index satisfies the shared.CIDIndexer interface
return txCids, nil func (pub *IPLDPublisherAndIndexer) Index(cids shared.CIDsForIndexing) error {
return nil
} }

View File

@ -19,63 +19,103 @@ package btc_test
import ( import (
"bytes" "bytes"
"github.com/ethereum/go-ethereum/common" "github.com/ipfs/go-cid"
"github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-ds-help"
"github.com/multiformats/go-multihash"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc/mocks" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc/mocks"
mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/mocks" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
var ( var _ = Describe("PublishAndIndexer", func() {
mockHeaderDagPutter *mocks2.MappedDagPutter var (
mockTrxDagPutter *mocks2.MappedDagPutter db *postgres.DB
mockTrxTrieDagPutter *mocks2.DagPutter err error
) repo *btc.IPLDPublisherAndIndexer
ipfsPgGet = `SELECT data FROM public.blocks
var _ = Describe("Publisher", func() { WHERE key = $1`
)
BeforeEach(func() { BeforeEach(func() {
mockHeaderDagPutter = new(mocks2.MappedDagPutter) db, err = shared.SetupDB()
mockTrxDagPutter = new(mocks2.MappedDagPutter) Expect(err).ToNot(HaveOccurred())
mockTrxTrieDagPutter = new(mocks2.DagPutter) repo = btc.NewIPLDPublisherAndIndexer(db)
})
AfterEach(func() {
btc.TearDownDB(db)
}) })
Describe("Publish", func() { Describe("Publish", func() {
It("Publishes the passed IPLDPayload objects to IPFS and returns a CIDPayload for indexing", func() { It("Published and indexes header and transaction IPLDs in a single tx", func() {
by := new(bytes.Buffer) emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
err := mocks.MockConvertedPayload.BlockPayload.Header.Serialize(by) Expect(emptyReturn).To(BeNil())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
headerBytes := by.Bytes() pgStr := `SELECT * FROM btc.header_cids
err = mocks.MockTransactions[0].MsgTx().Serialize(by) WHERE block_number = $1`
// check header was properly indexed
buf := bytes.NewBuffer(make([]byte, 0, 80))
err = mocks.MockBlock.Header.Serialize(buf)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
tx1Bytes := by.Bytes() headerBytes := buf.Bytes()
err = mocks.MockTransactions[1].MsgTx().Serialize(by) c, _ := ipld.RawdataToCid(ipld.MBitcoinHeader, headerBytes, multihash.DBL_SHA2_256)
header := new(btc.HeaderModel)
err = db.Get(header, pgStr, mocks.MockHeaderMetaData.BlockNumber)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
tx2Bytes := by.Bytes() Expect(header.CID).To(Equal(c.String()))
err = mocks.MockTransactions[2].MsgTx().Serialize(by) Expect(header.BlockNumber).To(Equal(mocks.MockHeaderMetaData.BlockNumber))
Expect(header.Bits).To(Equal(mocks.MockHeaderMetaData.Bits))
Expect(header.Timestamp).To(Equal(mocks.MockHeaderMetaData.Timestamp))
Expect(header.BlockHash).To(Equal(mocks.MockHeaderMetaData.BlockHash))
Expect(header.ParentHash).To(Equal(mocks.MockHeaderMetaData.ParentHash))
dc, err := cid.Decode(header.CID)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
tx3Bytes := by.Bytes() mhKey := dshelp.MultihashToDsKey(dc.Hash())
mockHeaderDagPutter.CIDsToReturn = map[common.Hash]string{ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
common.BytesToHash(headerBytes): mocks.MockHeaderCID.String(), var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(headerBytes))
// check that txs were properly indexed
trxs := make([]btc.TxModel, 0)
pgStr = `SELECT transaction_cids.id, transaction_cids.header_id, transaction_cids.index,
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.segwit, transaction_cids.witness_hash
FROM btc.transaction_cids INNER JOIN btc.header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&trxs, pgStr, mocks.MockHeaderMetaData.BlockNumber)
Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(3))
txData := make([][]byte, len(mocks.MockTransactions))
txCIDs := make([]string, len(mocks.MockTransactions))
for i, m := range mocks.MockTransactions {
buf := bytes.NewBuffer(make([]byte, 0))
err = m.MsgTx().Serialize(buf)
Expect(err).ToNot(HaveOccurred())
tx := buf.Bytes()
txData[i] = tx
c, _ := ipld.RawdataToCid(ipld.MBitcoinTx, tx, multihash.DBL_SHA2_256)
txCIDs[i] = c.String()
} }
mockTrxDagPutter.CIDsToReturn = map[common.Hash]string{ for _, tx := range trxs {
common.BytesToHash(tx1Bytes): mocks.MockTrxCID1.String(), Expect(tx.SegWit).To(Equal(false))
common.BytesToHash(tx2Bytes): mocks.MockTrxCID2.String(), Expect(tx.HeaderID).To(Equal(header.ID))
common.BytesToHash(tx3Bytes): mocks.MockTrxCID3.String(), Expect(tx.WitnessHash).To(Equal(""))
Expect(tx.CID).To(Equal(txCIDs[tx.Index]))
Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[tx.Index].TxHash().String()))
dc, err := cid.Decode(tx.CID)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(txData[tx.Index]))
} }
publisher := btc.IPLDPublisher{
HeaderPutter: mockHeaderDagPutter,
TransactionPutter: mockTrxDagPutter,
TransactionTriePutter: mockTrxTrieDagPutter,
}
payload, err := publisher.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
cidPayload, ok := payload.(*btc.CIDPayload)
Expect(ok).To(BeTrue())
Expect(cidPayload).To(Equal(&mocks.MockCIDPayload))
Expect(cidPayload.HeaderCID).To(Equal(mocks.MockHeaderMetaData))
Expect(cidPayload.TransactionCIDs).To(Equal(mocks.MockTxsMetaDataPostPublish))
}) })
}) })
}) })

View File

@ -71,11 +71,13 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Convert
if err != nil { if err != nil {
return nil, err return nil, err
} }
txMeta := TxModel{ txMeta := TxModel{
Dst: shared.HandleZeroAddrPointer(trx.To()), Dst: shared.HandleZeroAddrPointer(trx.To()),
Src: shared.HandleZeroAddr(from), Src: shared.HandleZeroAddr(from),
TxHash: trx.Hash().String(), TxHash: trx.Hash().String(),
Index: int64(i), Index: int64(i),
Data: trx.Data(),
} }
// txMeta will have same index as its corresponding trx in the convertedPayload.BlockBody // txMeta will have same index as its corresponding trx in the convertedPayload.BlockBody
convertedPayload.TxMetaData = append(convertedPayload.TxMetaData, txMeta) convertedPayload.TxMetaData = append(convertedPayload.TxMetaData, txMeta)
@ -90,7 +92,7 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Convert
if err := receipts.DeriveFields(pc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil { if err := receipts.DeriveFields(pc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil {
return nil, err return nil, err
} }
for _, receipt := range receipts { for i, receipt := range receipts {
// Extract topic and contract data from the receipt for indexing // Extract topic and contract data from the receipt for indexing
topicSets := make([][]string, 4) topicSets := make([][]string, 4)
mappedContracts := make(map[string]bool) // use map to avoid duplicate addresses mappedContracts := make(map[string]bool) // use map to avoid duplicate addresses
@ -109,6 +111,7 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Convert
contract := shared.HandleZeroAddr(receipt.ContractAddress) contract := shared.HandleZeroAddr(receipt.ContractAddress)
var contractHash string var contractHash string
if contract != "" { if contract != "" {
convertedPayload.TxMetaData[i].Deployment = true
contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String() contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
} }
rctMeta := ReceiptModel{ rctMeta := ReceiptModel{

View File

@ -109,10 +109,10 @@ func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle UncleModel, headerID int6
func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error { func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for _, trxCidMeta := range payload.TransactionCIDs { for _, trxCidMeta := range payload.TransactionCIDs {
var txID int64 var txID int64
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, data, deployment) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key) = ($3, $4, $5, $6, $7) ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, data, deployment) = ($3, $4, $5, $6, $7, $8, $9)
RETURNING id`, RETURNING id`,
headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src, trxCidMeta.Index, trxCidMeta.MhKey).Scan(&txID) headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src, trxCidMeta.Index, trxCidMeta.MhKey, trxCidMeta.Data, trxCidMeta.Deployment).Scan(&txID)
if err != nil { if err != nil {
return err return err
} }
@ -128,10 +128,10 @@ func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPa
func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModel, headerID int64) (int64, error) { func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModel, headerID int64) (int64, error) {
var txID int64 var txID int64
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, data, deployment) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key) = ($3, $4, $5, $6, $7) ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, data, deployment) = ($3, $4, $5, $6, $7, $8, $9)
RETURNING id`, RETURNING id`,
headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey).Scan(&txID) headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Deployment).Scan(&txID)
return txID, err return txID, err
} }

View File

@ -17,203 +17,168 @@
package eth package eth
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-block-format" "github.com/jmoiron/sqlx"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
var ( // IPLDPGFetcher satisfies the IPLDFetcher interface for ethereum
errUnexpectedNumberOfIPLDs = errors.New("ipfs batch fetch returned unexpected number of IPLDs") // It interfaces directly with PG-IPFS
) type IPLDPGFetcher struct {
db *postgres.DB
// IPLDFetcher satisfies the IPLDFetcher interface for ethereum
type IPLDFetcher struct {
BlockService blockservice.BlockService
} }
// NewIPLDFetcher creates a pointer to a new IPLDFetcher // NewIPLDPGFetcher creates a pointer to a new IPLDPGFetcher
func NewIPLDFetcher(ipfsPath string) (*IPLDFetcher, error) { func NewIPLDPGFetcher(db *postgres.DB) *IPLDPGFetcher {
blockService, err := ipfs.InitIPFSBlockService(ipfsPath) return &IPLDPGFetcher{
if err != nil { db: db,
return nil, err
} }
return &IPLDFetcher{
BlockService: blockService,
}, nil
} }
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper // Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) { func (f *IPLDPGFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
cidWrapper, ok := cids.(*CIDWrapper) cidWrapper, ok := cids.(*CIDWrapper)
if !ok { if !ok {
return nil, fmt.Errorf("eth fetcher: expected cids type %T got %T", &CIDWrapper{}, cids) return nil, fmt.Errorf("eth fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
} }
log.Debug("fetching iplds") log.Debug("fetching iplds")
var err error
iplds := IPLDs{} iplds := IPLDs{}
iplds.TotalDifficulty, ok = new(big.Int).SetString(cidWrapper.Header.TotalDifficulty, 10) iplds.TotalDifficulty, ok = new(big.Int).SetString(cidWrapper.Header.TotalDifficulty, 10)
if !ok { if !ok {
return nil, errors.New("eth fetcher: unable to set total difficulty") return nil, errors.New("eth fetcher: unable to set total difficulty")
} }
iplds.BlockNumber = cidWrapper.BlockNumber iplds.BlockNumber = cidWrapper.BlockNumber
iplds.Header, err = f.FetchHeader(cidWrapper.Header)
tx, err := f.db.Beginx()
if err != nil { if err != nil {
return nil, err return nil, err
} }
iplds.Uncles, err = f.FetchUncles(cidWrapper.Uncles) defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
iplds.Header, err = f.FetchHeader(tx, cidWrapper.Header)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("eth pg fetcher: header fetching error: %s", err.Error())
} }
iplds.Transactions, err = f.FetchTrxs(cidWrapper.Transactions) iplds.Uncles, err = f.FetchUncles(tx, cidWrapper.Uncles)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("eth pg fetcher: uncle fetching error: %s", err.Error())
} }
iplds.Receipts, err = f.FetchRcts(cidWrapper.Receipts) iplds.Transactions, err = f.FetchTrxs(tx, cidWrapper.Transactions)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("eth pg fetcher: transaction fetching error: %s", err.Error())
} }
iplds.StateNodes, err = f.FetchState(cidWrapper.StateNodes) iplds.Receipts, err = f.FetchRcts(tx, cidWrapper.Receipts)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("eth pg fetcher: receipt fetching error: %s", err.Error())
} }
iplds.StorageNodes, err = f.FetchStorage(cidWrapper.StorageNodes) iplds.StateNodes, err = f.FetchState(tx, cidWrapper.StateNodes)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("eth pg fetcher: state fetching error: %s", err.Error())
} }
return iplds, nil iplds.StorageNodes, err = f.FetchStorage(tx, cidWrapper.StorageNodes)
if err != nil {
return nil, fmt.Errorf("eth pg fetcher: storage fetching error: %s", err.Error())
}
return iplds, err
} }
// FetchHeaders fetches headers // FetchHeaders fetches headers
// It uses the f.fetch method func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) {
func (f *IPLDFetcher) FetchHeader(c HeaderModel) (ipfs.BlockModel, error) {
log.Debug("fetching header ipld") log.Debug("fetching header ipld")
dc, err := cid.Decode(c.CID) headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil {
return ipfs.BlockModel{}, err
}
header, err := f.fetch(dc)
if err != nil { if err != nil {
return ipfs.BlockModel{}, err return ipfs.BlockModel{}, err
} }
return ipfs.BlockModel{ return ipfs.BlockModel{
Data: header.RawData(), Data: headerBytes,
CID: header.Cid().String(), CID: c.CID,
}, nil }, nil
} }
// FetchUncles fetches uncles // FetchUncles fetches uncles
// It uses the f.fetchBatch method func (f *IPLDPGFetcher) FetchUncles(tx *sqlx.Tx, cids []UncleModel) ([]ipfs.BlockModel, error) {
func (f *IPLDFetcher) FetchUncles(cids []UncleModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching uncle iplds") log.Debug("fetching uncle iplds")
uncleCids := make([]cid.Cid, len(cids)) uncleIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
dc, err := cid.Decode(c.CID) uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
uncleCids[i] = dc
}
uncles := f.fetchBatch(uncleCids)
uncleIPLDs := make([]ipfs.BlockModel, len(uncles))
for i, uncle := range uncles {
uncleIPLDs[i] = ipfs.BlockModel{ uncleIPLDs[i] = ipfs.BlockModel{
Data: uncle.RawData(), Data: uncleBytes,
CID: uncle.Cid().String(), CID: c.CID,
} }
} }
if len(uncleIPLDs) != len(uncleCids) {
log.Errorf("ipfs fetcher: number of uncle blocks returned (%d) does not match number expected (%d)", len(uncles), len(uncleCids))
return uncleIPLDs, errUnexpectedNumberOfIPLDs
}
return uncleIPLDs, nil return uncleIPLDs, nil
} }
// FetchTrxs fetches transactions // FetchTrxs fetches transactions
// It uses the f.fetchBatch method func (f *IPLDPGFetcher) FetchTrxs(tx *sqlx.Tx, cids []TxModel) ([]ipfs.BlockModel, error) {
func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching transaction iplds") log.Debug("fetching transaction iplds")
trxCids := make([]cid.Cid, len(cids)) trxIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
dc, err := cid.Decode(c.CID) txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
trxCids[i] = dc
}
trxs := f.fetchBatch(trxCids)
trxIPLDs := make([]ipfs.BlockModel, len(trxs))
for i, trx := range trxs {
trxIPLDs[i] = ipfs.BlockModel{ trxIPLDs[i] = ipfs.BlockModel{
Data: trx.RawData(), Data: txBytes,
CID: trx.Cid().String(), CID: c.CID,
} }
} }
if len(trxIPLDs) != len(trxCids) {
log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
return trxIPLDs, errUnexpectedNumberOfIPLDs
}
return trxIPLDs, nil return trxIPLDs, nil
} }
// FetchRcts fetches receipts // FetchRcts fetches receipts
// It uses the f.fetchBatch method func (f *IPLDPGFetcher) FetchRcts(tx *sqlx.Tx, cids []ReceiptModel) ([]ipfs.BlockModel, error) {
func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching receipt iplds") log.Debug("fetching receipt iplds")
rctCids := make([]cid.Cid, len(cids)) rctIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
dc, err := cid.Decode(c.CID) rctBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rctCids[i] = dc
}
rcts := f.fetchBatch(rctCids)
rctIPLDs := make([]ipfs.BlockModel, len(rcts))
for i, rct := range rcts {
rctIPLDs[i] = ipfs.BlockModel{ rctIPLDs[i] = ipfs.BlockModel{
Data: rct.RawData(), Data: rctBytes,
CID: rct.Cid().String(), CID: c.CID,
} }
} }
if len(rctIPLDs) != len(rctCids) {
log.Errorf("ipfs fetcher: number of receipt blocks returned (%d) does not match number expected (%d)", len(rcts), len(rctCids))
return rctIPLDs, errUnexpectedNumberOfIPLDs
}
return rctIPLDs, nil return rctIPLDs, nil
} }
// FetchState fetches state nodes // FetchState fetches state nodes
// It uses the single f.fetch method instead of the batch fetch, because it func (f *IPLDPGFetcher) FetchState(tx *sqlx.Tx, cids []StateNodeModel) ([]StateNode, error) {
// needs to maintain the data's relation to state keys
func (f *IPLDFetcher) FetchState(cids []StateNodeModel) ([]StateNode, error) {
log.Debug("fetching state iplds") log.Debug("fetching state iplds")
stateNodes := make([]StateNode, 0, len(cids)) stateNodes := make([]StateNode, 0, len(cids))
for _, stateNode := range cids { for _, stateNode := range cids {
if stateNode.CID == "" { if stateNode.CID == "" {
continue continue
} }
dc, err := cid.Decode(stateNode.CID) stateBytes, err := shared.FetchIPLDByMhKey(tx, stateNode.MhKey)
if err != nil {
return nil, err
}
state, err := f.fetch(dc)
if err != nil { if err != nil {
return nil, err return nil, err
} }
stateNodes = append(stateNodes, StateNode{ stateNodes = append(stateNodes, StateNode{
IPLD: ipfs.BlockModel{ IPLD: ipfs.BlockModel{
Data: state.RawData(), Data: stateBytes,
CID: state.Cid().String(), CID: stateNode.CID,
}, },
StateLeafKey: common.HexToHash(stateNode.StateKey), StateLeafKey: common.HexToHash(stateNode.StateKey),
Type: ResolveToNodeType(stateNode.NodeType), Type: ResolveToNodeType(stateNode.NodeType),
@ -224,27 +189,21 @@ func (f *IPLDFetcher) FetchState(cids []StateNodeModel) ([]StateNode, error) {
} }
// FetchStorage fetches storage nodes // FetchStorage fetches storage nodes
// It uses the single f.fetch method instead of the batch fetch, because it func (f *IPLDPGFetcher) FetchStorage(tx *sqlx.Tx, cids []StorageNodeWithStateKeyModel) ([]StorageNode, error) {
// needs to maintain the data's relation to state and storage keys
func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) ([]StorageNode, error) {
log.Debug("fetching storage iplds") log.Debug("fetching storage iplds")
storageNodes := make([]StorageNode, 0, len(cids)) storageNodes := make([]StorageNode, 0, len(cids))
for _, storageNode := range cids { for _, storageNode := range cids {
if storageNode.CID == "" || storageNode.StateKey == "" { if storageNode.CID == "" || storageNode.StateKey == "" {
continue continue
} }
dc, err := cid.Decode(storageNode.CID) storageBytes, err := shared.FetchIPLDByMhKey(tx, storageNode.MhKey)
if err != nil {
return nil, err
}
storage, err := f.fetch(dc)
if err != nil { if err != nil {
return nil, err return nil, err
} }
storageNodes = append(storageNodes, StorageNode{ storageNodes = append(storageNodes, StorageNode{
IPLD: ipfs.BlockModel{ IPLD: ipfs.BlockModel{
Data: storage.RawData(), Data: storageBytes,
CID: storage.Cid().String(), CID: storageNode.CID,
}, },
StateLeafKey: common.HexToHash(storageNode.StateKey), StateLeafKey: common.HexToHash(storageNode.StateKey),
StorageLeafKey: common.HexToHash(storageNode.StorageKey), StorageLeafKey: common.HexToHash(storageNode.StorageKey),
@ -254,20 +213,3 @@ func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) ([]Stora
} }
return storageNodes, nil return storageNodes, nil
} }
// fetch is used to fetch a single cid
func (f *IPLDFetcher) fetch(cid cid.Cid) (blocks.Block, error) {
return f.BlockService.GetBlock(context.Background(), cid)
}
// fetchBatch is used to fetch a batch of IPFS data blocks by cid
// There is no guarantee all are fetched, and no error in such a case, so
// downstream we will need to confirm which CIDs were fetched in the result set
func (f *IPLDFetcher) fetchBatch(cids []cid.Cid) []blocks.Block {
fetchedBlocks := make([]blocks.Block, 0, len(cids))
blockChan := f.BlockService.GetBlocks(context.Background(), cids)
for block := range blockChan {
fetchedBlocks = append(fetchedBlocks, block)
}
return fetchedBlocks
}

View File

@ -17,139 +17,49 @@
package eth_test package eth_test
import ( import (
"bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff"
"github.com/ipfs/go-block-format"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/mocks" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
var ( var (
mockHeaderData = []byte{0, 1, 2, 3, 4} db *postgres.DB
mockUncleData = []byte{1, 2, 3, 4, 5} pubAndIndexer *eth.IPLDPublisherAndIndexer
mockTrxData = []byte{2, 3, 4, 5, 6} fetcher *eth.IPLDPGFetcher
mockReceiptData = []byte{3, 4, 5, 6, 7}
mockStateData = []byte{4, 5, 6, 7, 8}
mockStorageData = []byte{5, 6, 7, 8, 9}
mockStorageData2 = []byte{6, 7, 8, 9, 1}
mockHeaderBlock = blocks.NewBlock(mockHeaderData)
mockUncleBlock = blocks.NewBlock(mockUncleData)
mockTrxBlock = blocks.NewBlock(mockTrxData)
mockReceiptBlock = blocks.NewBlock(mockReceiptData)
mockStateBlock = blocks.NewBlock(mockStateData)
mockStorageBlock1 = blocks.NewBlock(mockStorageData)
mockStorageBlock2 = blocks.NewBlock(mockStorageData2)
mockBlocks = []blocks.Block{mockHeaderBlock, mockUncleBlock, mockTrxBlock, mockReceiptBlock, mockStateBlock, mockStorageBlock1, mockStorageBlock2}
mockBlockService *mocks.MockIPFSBlockService
mockCIDWrapper = &eth.CIDWrapper{
BlockNumber: big.NewInt(9000),
Header: eth.HeaderModel{
TotalDifficulty: "1337",
CID: mockHeaderBlock.Cid().String(),
},
Uncles: []eth.UncleModel{
{
CID: mockUncleBlock.Cid().String(),
},
},
Transactions: []eth.TxModel{
{
CID: mockTrxBlock.Cid().String(),
},
},
Receipts: []eth.ReceiptModel{
{
CID: mockReceiptBlock.Cid().String(),
},
},
StateNodes: []eth.StateNodeModel{{
CID: mockStateBlock.Cid().String(),
NodeType: 2,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
}},
StorageNodes: []eth.StorageNodeWithStateKeyModel{{
CID: mockStorageBlock1.Cid().String(),
NodeType: 2,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
StorageKey: "0000000000000000000000000000000000000000000000000000000000000001",
},
{
CID: mockStorageBlock2.Cid().String(),
NodeType: 2,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
StorageKey: "0000000000000000000000000000000000000000000000000000000000000002",
}},
}
) )
var _ = Describe("IPLDFetcher", func() { var _ = Describe("IPLDPGFetcher", func() {
Describe("Fetch", func() { Describe("Fetch", func() {
BeforeEach(func() { BeforeEach(func() {
mockBlockService = new(mocks.MockIPFSBlockService) var err error
err := mockBlockService.AddBlocks(mockBlocks) db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(mockBlockService.Blocks)).To(Equal(7)) pubAndIndexer = eth.NewIPLDPublisherAndIndexer(db)
_, err = pubAndIndexer.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
fetcher = eth.NewIPLDPGFetcher(db)
})
AfterEach(func() {
eth.TearDownDB(db)
}) })
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() { It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
fetcher := new(eth.IPLDFetcher) i, err := fetcher.Fetch(mocks.MockCIDWrapper)
fetcher.BlockService = mockBlockService
i, err := fetcher.Fetch(mockCIDWrapper)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
iplds, ok := i.(eth.IPLDs) iplds, ok := i.(eth.IPLDs)
Expect(ok).To(BeTrue()) Expect(ok).To(BeTrue())
Expect(iplds.TotalDifficulty).To(Equal(big.NewInt(1337))) Expect(iplds.TotalDifficulty).To(Equal(mocks.MockConvertedPayload.TotalDifficulty))
Expect(iplds.BlockNumber).To(Equal(mockCIDWrapper.BlockNumber)) Expect(iplds.BlockNumber).To(Equal(mocks.MockConvertedPayload.Block.Number()))
Expect(iplds.Header).To(Equal(ipfs.BlockModel{ Expect(iplds.Header).To(Equal(mocks.MockIPLDs.Header))
Data: mockHeaderBlock.RawData(), Expect(len(iplds.Uncles)).To(Equal(0))
CID: mockHeaderBlock.Cid().String(), Expect(iplds.Transactions).To(Equal(mocks.MockIPLDs.Transactions))
})) Expect(iplds.Receipts).To(Equal(mocks.MockIPLDs.Receipts))
Expect(len(iplds.Uncles)).To(Equal(1)) Expect(iplds.StateNodes).To(Equal(mocks.MockIPLDs.StateNodes))
Expect(iplds.Uncles[0]).To(Equal(ipfs.BlockModel{ Expect(iplds.StorageNodes).To(Equal(mocks.MockIPLDs.StorageNodes))
Data: mockUncleBlock.RawData(),
CID: mockUncleBlock.Cid().String(),
}))
Expect(len(iplds.Transactions)).To(Equal(1))
Expect(iplds.Transactions[0]).To(Equal(ipfs.BlockModel{
Data: mockTrxBlock.RawData(),
CID: mockTrxBlock.Cid().String(),
}))
Expect(len(iplds.Receipts)).To(Equal(1))
Expect(iplds.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: mockReceiptBlock.RawData(),
CID: mockReceiptBlock.Cid().String(),
}))
Expect(len(iplds.StateNodes)).To(Equal(1))
Expect(iplds.StateNodes[0].StateLeafKey).To(Equal(common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")))
Expect(iplds.StateNodes[0].Type).To(Equal(statediff.Leaf))
Expect(iplds.StateNodes[0].IPLD).To(Equal(ipfs.BlockModel{
Data: mockStateBlock.RawData(),
CID: mockStateBlock.Cid().String(),
}))
Expect(len(iplds.StorageNodes)).To(Equal(2))
for _, storage := range iplds.StorageNodes {
Expect(storage.Type).To(Equal(statediff.Leaf))
Expect(storage.StateLeafKey).To(Equal(common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")))
if bytes.Equal(storage.StorageLeafKey.Bytes(), common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001").Bytes()) {
Expect(storage.IPLD).To(Equal(ipfs.BlockModel{
Data: mockStorageBlock1.RawData(),
CID: mockStorageBlock1.Cid().String(),
}))
}
if bytes.Equal(storage.StorageLeafKey.Bytes(), common.HexToHash("0000000000000000000000000000000000000000000000000000000000000002").Bytes()) {
Expect(storage.IPLD).To(Equal(ipfs.BlockModel{
Data: mockStorageBlock2.RawData(),
CID: mockStorageBlock2.Cid().String(),
}))
}
}
}) })
}) })
}) })

View File

@ -59,6 +59,8 @@ type TxModel struct {
MhKey string `db:"mh_key"` MhKey string `db:"mh_key"`
Dst string `db:"dst"` Dst string `db:"dst"`
Src string `db:"src"` Src string `db:"src"`
Data []byte `db:"data"`
Deployment bool `db:"deployment"`
} }
// ReceiptModel is the db model for eth.receipt_cids // ReceiptModel is the db model for eth.receipt_cids

View File

@ -21,64 +21,77 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff" "github.com/ethereum/go-ethereum/statediff"
"github.com/jmoiron/sqlx"
"github.com/multiformats/go-multihash"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/dag_putters"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
// IPLDPublisher satisfies the IPLDPublisher for ethereum // IPLDPublisherAndIndexer satisfies the IPLDPublisher interface for ethereum
type IPLDPublisher struct { // It interfaces directly with the public.blocks table of PG-IPFS rather than going through an ipfs intermediary
HeaderPutter ipfs.DagPutter // It publishes and indexes IPLDs together in a single sqlx.Tx
TransactionPutter ipfs.DagPutter type IPLDPublisherAndIndexer struct {
TransactionTriePutter ipfs.DagPutter indexer *CIDIndexer
ReceiptPutter ipfs.DagPutter
ReceiptTriePutter ipfs.DagPutter
StatePutter ipfs.DagPutter
StoragePutter ipfs.DagPutter
} }
// NewIPLDPublisher creates a pointer to a new IPLDPublisher which satisfies the IPLDPublisher interface // NewIPLDPublisherAndIndexer creates a pointer to a new IPLDPublisherAndIndexer which satisfies the IPLDPublisher interface
func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) { func NewIPLDPublisherAndIndexer(db *postgres.DB) *IPLDPublisherAndIndexer {
node, err := ipfs.InitIPFSNode(ipfsPath) return &IPLDPublisherAndIndexer{
if err != nil { indexer: NewCIDIndexer(db),
return nil, err
} }
return &IPLDPublisher{
HeaderPutter: dag_putters.NewEthBlockHeaderDagPutter(node),
TransactionPutter: dag_putters.NewEthTxsDagPutter(node),
TransactionTriePutter: dag_putters.NewEthTxTrieDagPutter(node),
ReceiptPutter: dag_putters.NewEthReceiptDagPutter(node),
ReceiptTriePutter: dag_putters.NewEthRctTrieDagPutter(node),
StatePutter: dag_putters.NewEthStateDagPutter(node),
StoragePutter: dag_putters.NewEthStorageDagPutter(node),
}, nil
} }
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload // Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) { func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
ipldPayload, ok := payload.(ConvertedPayload) ipldPayload, ok := payload.(ConvertedPayload)
if !ok { if !ok {
return nil, fmt.Errorf("eth publisher expected payload type %T got %T", ConvertedPayload{}, payload) return nil, fmt.Errorf("eth IPLDPublisherAndIndexer expected payload type %T got %T", ConvertedPayload{}, payload)
} }
// Generate the nodes for publishing // Generate the iplds
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, err := ipld.FromBlockAndReceipts(ipldPayload.Block, ipldPayload.Receipts) headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, err := ipld.FromBlockAndReceipts(ipldPayload.Block, ipldPayload.Receipts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Process and publish headers // Begin new db tx
headerCid, err := pub.publishHeader(headerNode) tx, err := pub.indexer.db.Beginx()
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
// Publish trie nodes
for _, node := range txTrieNodes {
if err := shared.PublishIPLD(tx, node); err != nil {
return nil, err
}
}
for _, node := range rctTrieNodes {
if err := shared.PublishIPLD(tx, node); err != nil {
return nil, err
}
}
// Publish and index header
if err := shared.PublishIPLD(tx, headerNode); err != nil {
return nil, err
}
reward := CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts) reward := CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts)
header := HeaderModel{ header := HeaderModel{
CID: headerCid, CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: ipldPayload.Block.ParentHash().String(), ParentHash: ipldPayload.Block.ParentHash().String(),
BlockNumber: ipldPayload.Block.Number().String(), BlockNumber: ipldPayload.Block.Number().String(),
@ -92,189 +105,129 @@ func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForI
UncleRoot: ipldPayload.Block.UncleHash().String(), UncleRoot: ipldPayload.Block.UncleHash().String(),
Timestamp: ipldPayload.Block.Time(), Timestamp: ipldPayload.Block.Time(),
} }
headerID, err := pub.indexer.indexHeaderCID(tx, header)
if err != nil {
return nil, err
}
// Process and publish uncles // Publish and index uncles
uncleCids := make([]UncleModel, len(uncleNodes)) for _, uncleNode := range uncleNodes {
for i, uncle := range uncleNodes { if err := shared.PublishIPLD(tx, uncleNode); err != nil {
uncleCid, err := pub.publishHeader(uncle)
if err != nil {
return nil, err return nil, err
} }
uncleReward := CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncle.Number.Int64()) uncleReward := CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncleNode.Number.Int64())
uncleCids[i] = UncleModel{ uncle := UncleModel{
CID: uncleCid, CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncle.Cid()), MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
ParentHash: uncle.ParentHash.String(), ParentHash: uncleNode.ParentHash.String(),
BlockHash: uncle.Hash().String(), BlockHash: uncleNode.Hash().String(),
Reward: uncleReward.String(), Reward: uncleReward.String(),
} }
} if err := pub.indexer.indexUncleCID(tx, uncle, headerID); err != nil {
// Process and publish transactions
transactionCids, err := pub.publishTransactions(txNodes, txTrieNodes, ipldPayload.TxMetaData)
if err != nil {
return nil, err
}
// Process and publish receipts
receiptsCids, err := pub.publishReceipts(rctNodes, rctTrieNodes, ipldPayload.ReceiptMetaData)
if err != nil {
return nil, err
}
// Process and publish state leafs
stateNodeCids, stateAccounts, err := pub.publishStateNodes(ipldPayload.StateNodes)
if err != nil {
return nil, err
}
// Process and publish storage leafs
storageNodeCids, err := pub.publishStorageNodes(ipldPayload.StorageNodes)
if err != nil {
return nil, err
}
// Package CIDs and their metadata into a single struct
return &CIDPayload{
HeaderCID: header,
UncleCIDs: uncleCids,
TransactionCIDs: transactionCids,
ReceiptCIDs: receiptsCids,
StateNodeCIDs: stateNodeCids,
StorageNodeCIDs: storageNodeCids,
StateAccounts: stateAccounts,
}, nil
}
func (pub *IPLDPublisher) generateBlockNodes(body *types.Block, receipts types.Receipts) (*ipld.EthHeader,
[]*ipld.EthHeader, []*ipld.EthTx, []*ipld.EthTxTrie, []*ipld.EthReceipt, []*ipld.EthRctTrie, error) {
return ipld.FromBlockAndReceipts(body, receipts)
}
func (pub *IPLDPublisher) publishHeader(header *ipld.EthHeader) (string, error) {
return pub.HeaderPutter.DagPut(header)
}
func (pub *IPLDPublisher) publishTransactions(transactions []*ipld.EthTx, txTrie []*ipld.EthTxTrie, trxMeta []TxModel) ([]TxModel, error) {
trxCids := make([]TxModel, len(transactions))
for i, tx := range transactions {
cid, err := pub.TransactionPutter.DagPut(tx)
if err != nil {
return nil, err
}
trxCids[i] = TxModel{
CID: cid,
MhKey: shared.MultihashKeyFromCID(tx.Cid()),
Index: trxMeta[i].Index,
TxHash: trxMeta[i].TxHash,
Src: trxMeta[i].Src,
Dst: trxMeta[i].Dst,
}
}
for _, txNode := range txTrie {
// We don't do anything with the tx trie cids atm
if _, err := pub.TransactionTriePutter.DagPut(txNode); err != nil {
return nil, err return nil, err
} }
} }
return trxCids, nil
}
func (pub *IPLDPublisher) publishReceipts(receipts []*ipld.EthReceipt, receiptTrie []*ipld.EthRctTrie, receiptMeta []ReceiptModel) (map[common.Hash]ReceiptModel, error) { // Publish and index txs and receipts
rctCids := make(map[common.Hash]ReceiptModel) for i, txNode := range txNodes {
for i, rct := range receipts { if err := shared.PublishIPLD(tx, txNode); err != nil {
cid, err := pub.ReceiptPutter.DagPut(rct) return nil, err
}
rctNode := rctNodes[i]
if err := shared.PublishIPLD(tx, rctNode); err != nil {
return nil, err
}
txModel := ipldPayload.TxMetaData[i]
txModel.CID = txNode.Cid().String()
txModel.MhKey = shared.MultihashKeyFromCID(txNode.Cid())
txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rctCids[rct.TxHash] = ReceiptModel{ if txModel.Deployment {
CID: cid, if _, err = shared.PublishRaw(tx, ipld.MEthStorageTrie, multihash.KECCAK_256, txModel.Data); err != nil {
MhKey: shared.MultihashKeyFromCID(rct.Cid()), return nil, err
Contract: receiptMeta[i].Contract, }
ContractHash: receiptMeta[i].ContractHash,
Topic0s: receiptMeta[i].Topic0s,
Topic1s: receiptMeta[i].Topic1s,
Topic2s: receiptMeta[i].Topic2s,
Topic3s: receiptMeta[i].Topic3s,
LogContracts: receiptMeta[i].LogContracts,
} }
} rctModel := ipldPayload.ReceiptMetaData[i]
for _, rctNode := range receiptTrie { rctModel.CID = rctNode.Cid().String()
// We don't do anything with the rct trie cids atm rctModel.MhKey = shared.MultihashKeyFromCID(rctNode.Cid())
if _, err := pub.ReceiptTriePutter.DagPut(rctNode); err != nil { if err := pub.indexer.indexReceiptCID(tx, rctModel, txID); err != nil {
return nil, err return nil, err
} }
} }
return rctCids, nil
// Publish and index state and storage
err = pub.publishAndIndexStateAndStorage(tx, ipldPayload, headerID)
// This IPLDPublisher does both publishing and indexing, we do not need to pass anything forward to the indexer
return nil, err // return err variable explicitly so that we return the err = tx.Commit() assignment in the defer
} }
func (pub *IPLDPublisher) publishStateNodes(stateNodes []TrieNode) ([]StateNodeModel, map[string]StateAccountModel, error) { func (pub *IPLDPublisherAndIndexer) publishAndIndexStateAndStorage(tx *sqlx.Tx, ipldPayload ConvertedPayload, headerID int64) error {
stateNodeCids := make([]StateNodeModel, 0, len(stateNodes)) // Publish and index state and storage
stateAccounts := make(map[string]StateAccountModel) for _, stateNode := range ipldPayload.StateNodes {
for _, stateNode := range stateNodes { stateCIDStr, err := shared.PublishRaw(tx, ipld.MEthStateTrie, multihash.KECCAK_256, stateNode.Value)
node, err := ipld.FromStateTrieRLP(stateNode.Value)
if err != nil { if err != nil {
return nil, nil, err return err
} }
cid, err := pub.StatePutter.DagPut(node) mhKey, _ := shared.MultihashKeyFromCIDString(stateCIDStr)
if err != nil { stateModel := StateNodeModel{
return nil, nil, err
}
stateNodeCids = append(stateNodeCids, StateNodeModel{
Path: stateNode.Path, Path: stateNode.Path,
StateKey: stateNode.LeafKey.String(), StateKey: stateNode.LeafKey.String(),
CID: cid, CID: stateCIDStr,
MhKey: shared.MultihashKeyFromCID(node.Cid()), MhKey: mhKey,
NodeType: ResolveFromNodeType(stateNode.Type), NodeType: ResolveFromNodeType(stateNode.Type),
}) }
// If we have a leaf, decode the account to extract additional metadata for indexing stateID, err := pub.indexer.indexStateCID(tx, stateModel, headerID)
if err != nil {
return err
}
// If we have a leaf, decode and index the account data and any associated storage diffs
if stateNode.Type == statediff.Leaf { if stateNode.Type == statediff.Leaf {
var i []interface{} var i []interface{}
if err := rlp.DecodeBytes(stateNode.Value, &i); err != nil { if err := rlp.DecodeBytes(stateNode.Value, &i); err != nil {
return nil, nil, err return err
} }
if len(i) != 2 { if len(i) != 2 {
return nil, nil, fmt.Errorf("IPLDPublisher expected state leaf node rlp to decode into two elements") return fmt.Errorf("eth IPLDPublisherAndIndexer expected state leaf node rlp to decode into two elements")
} }
var account state.Account var account state.Account
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil { if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
return nil, nil, err return err
} }
// Map state account to the state path hash accountModel := StateAccountModel{
statePath := common.Bytes2Hex(stateNode.Path)
stateAccounts[statePath] = StateAccountModel{
Balance: account.Balance.String(), Balance: account.Balance.String(),
Nonce: account.Nonce, Nonce: account.Nonce,
CodeHash: account.CodeHash, CodeHash: account.CodeHash,
StorageRoot: account.Root.String(), StorageRoot: account.Root.String(),
} }
if err := pub.indexer.indexStateAccount(tx, accountModel, stateID); err != nil {
return err
}
for _, storageNode := range ipldPayload.StorageNodes[common.Bytes2Hex(stateNode.Path)] {
storageCIDStr, err := shared.PublishRaw(tx, ipld.MEthStorageTrie, multihash.KECCAK_256, storageNode.Value)
if err != nil {
return err
}
mhKey, _ := shared.MultihashKeyFromCIDString(storageCIDStr)
storageModel := StorageNodeModel{
Path: storageNode.Path,
StorageKey: storageNode.LeafKey.Hex(),
CID: storageCIDStr,
MhKey: mhKey,
NodeType: ResolveFromNodeType(storageNode.Type),
}
if err := pub.indexer.indexStorageCID(tx, storageModel, stateID); err != nil {
return err
}
}
} }
} }
return stateNodeCids, stateAccounts, nil return nil
} }
func (pub *IPLDPublisher) publishStorageNodes(storageNodes map[string][]TrieNode) (map[string][]StorageNodeModel, error) { // Index satisfies the shared.CIDIndexer interface
storageLeafCids := make(map[string][]StorageNodeModel) func (pub *IPLDPublisherAndIndexer) Index(cids shared.CIDsForIndexing) error {
for path, storageTrie := range storageNodes { return nil
storageLeafCids[path] = make([]StorageNodeModel, 0, len(storageTrie))
for _, storageNode := range storageTrie {
node, err := ipld.FromStorageTrieRLP(storageNode.Value)
if err != nil {
return nil, err
}
cid, err := pub.StoragePutter.DagPut(node)
if err != nil {
return nil, err
}
// Map storage node cids to the state path hash
storageLeafCids[path] = append(storageLeafCids[path], StorageNodeModel{
Path: storageNode.Path,
StorageKey: storageNode.LeafKey.Hex(),
CID: cid,
MhKey: shared.MultihashKeyFromCID(node.Cid()),
NodeType: ResolveFromNodeType(storageNode.Type),
})
}
}
return storageLeafCids, nil
} }

View File

@ -18,88 +18,221 @@ package eth_test
import ( import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-ds-help"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/mocks" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
var ( var _ = Describe("PublishAndIndexer", func() {
mockHeaderDagPutter *mocks2.MappedDagPutter var (
mockTrxDagPutter *mocks2.MappedDagPutter db *postgres.DB
mockTrxTrieDagPutter *mocks2.DagPutter err error
mockRctDagPutter *mocks2.MappedDagPutter repo *eth.IPLDPublisherAndIndexer
mockRctTrieDagPutter *mocks2.DagPutter ipfsPgGet = `SELECT data FROM public.blocks
mockStateDagPutter *mocks2.MappedDagPutter WHERE key = $1`
mockStorageDagPutter *mocks2.MappedDagPutter )
)
var _ = Describe("Publisher", func() {
BeforeEach(func() { BeforeEach(func() {
mockHeaderDagPutter = new(mocks2.MappedDagPutter) db, err = shared.SetupDB()
mockTrxDagPutter = new(mocks2.MappedDagPutter) Expect(err).ToNot(HaveOccurred())
mockTrxTrieDagPutter = new(mocks2.DagPutter) repo = eth.NewIPLDPublisherAndIndexer(db)
mockRctDagPutter = new(mocks2.MappedDagPutter) })
mockRctTrieDagPutter = new(mocks2.DagPutter) AfterEach(func() {
mockStateDagPutter = new(mocks2.MappedDagPutter) eth.TearDownDB(db)
mockStorageDagPutter = new(mocks2.MappedDagPutter)
}) })
Describe("Publish", func() { Describe("Publish", func() {
It("Publishes the passed IPLDPayload objects to IPFS and returns a CIDPayload for indexing", func() { It("Published and indexes header IPLDs in a single tx", func() {
mockHeaderDagPutter.CIDsToReturn = map[common.Hash]string{ emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
common.BytesToHash(mocks.HeaderIPLD.RawData()): mocks.HeaderCID.String(), Expect(emptyReturn).To(BeNil())
}
mockTrxDagPutter.CIDsToReturn = map[common.Hash]string{
common.BytesToHash(mocks.Trx1IPLD.RawData()): mocks.Trx1CID.String(),
common.BytesToHash(mocks.Trx2IPLD.RawData()): mocks.Trx2CID.String(),
common.BytesToHash(mocks.Trx3IPLD.RawData()): mocks.Trx3CID.String(),
}
mockRctDagPutter.CIDsToReturn = map[common.Hash]string{
common.BytesToHash(mocks.Rct1IPLD.RawData()): mocks.Rct1CID.String(),
common.BytesToHash(mocks.Rct2IPLD.RawData()): mocks.Rct2CID.String(),
common.BytesToHash(mocks.Rct3IPLD.RawData()): mocks.Rct3CID.String(),
}
mockStateDagPutter.CIDsToReturn = map[common.Hash]string{
common.BytesToHash(mocks.State1IPLD.RawData()): mocks.State1CID.String(),
common.BytesToHash(mocks.State2IPLD.RawData()): mocks.State2CID.String(),
}
mockStorageDagPutter.CIDsToReturn = map[common.Hash]string{
common.BytesToHash(mocks.StorageIPLD.RawData()): mocks.StorageCID.String(),
}
publisher := eth.IPLDPublisher{
HeaderPutter: mockHeaderDagPutter,
TransactionPutter: mockTrxDagPutter,
TransactionTriePutter: mockTrxTrieDagPutter,
ReceiptPutter: mockRctDagPutter,
ReceiptTriePutter: mockRctTrieDagPutter,
StatePutter: mockStateDagPutter,
StoragePutter: mockStorageDagPutter,
}
payload, err := publisher.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
cidPayload, ok := payload.(*eth.CIDPayload) pgStr := `SELECT cid, td, reward, id
Expect(ok).To(BeTrue()) FROM eth.header_cids
Expect(cidPayload.HeaderCID.TotalDifficulty).To(Equal(mocks.MockConvertedPayload.TotalDifficulty.String())) WHERE block_number = $1`
Expect(cidPayload.HeaderCID.BlockNumber).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockNumber)) // check header was properly indexed
Expect(cidPayload.HeaderCID.BlockHash).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockHash)) type res struct {
Expect(cidPayload.HeaderCID.Reward).To(Equal(mocks.MockCIDPayload.HeaderCID.Reward)) CID string
Expect(cidPayload.UncleCIDs).To(Equal(mocks.MockCIDPayload.UncleCIDs)) TD string
Expect(cidPayload.HeaderCID).To(Equal(mocks.MockCIDPayload.HeaderCID)) Reward string
Expect(len(cidPayload.TransactionCIDs)).To(Equal(3)) ID int
Expect(cidPayload.TransactionCIDs[0]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[0])) }
Expect(cidPayload.TransactionCIDs[1]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[1])) header := new(res)
Expect(cidPayload.TransactionCIDs[2]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[2])) err = db.QueryRowx(pgStr, 1).StructScan(header)
Expect(len(cidPayload.ReceiptCIDs)).To(Equal(3)) Expect(err).ToNot(HaveOccurred())
Expect(cidPayload.ReceiptCIDs[mocks.MockTransactions[0].Hash()]).To(Equal(mocks.MockCIDPayload.ReceiptCIDs[mocks.MockTransactions[0].Hash()])) Expect(header.CID).To(Equal(mocks.HeaderCID.String()))
Expect(cidPayload.ReceiptCIDs[mocks.MockTransactions[1].Hash()]).To(Equal(mocks.MockCIDPayload.ReceiptCIDs[mocks.MockTransactions[1].Hash()])) Expect(header.TD).To(Equal(mocks.MockBlock.Difficulty().String()))
Expect(cidPayload.ReceiptCIDs[mocks.MockTransactions[2].Hash()]).To(Equal(mocks.MockCIDPayload.ReceiptCIDs[mocks.MockTransactions[2].Hash()])) Expect(header.Reward).To(Equal("5000000000000000000"))
Expect(len(cidPayload.StateNodeCIDs)).To(Equal(2)) dc, err := cid.Decode(header.CID)
Expect(cidPayload.StateNodeCIDs[0]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[0])) Expect(err).ToNot(HaveOccurred())
Expect(cidPayload.StateNodeCIDs[1]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[1])) mhKey := dshelp.MultihashToDsKey(dc.Hash())
Expect(cidPayload.StorageNodeCIDs).To(Equal(mocks.MockCIDPayload.StorageNodeCIDs)) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(mocks.MockHeaderRlp))
})
It("Publishes and indexes transaction IPLDs in a single tx", func() {
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
Expect(emptyReturn).To(BeNil())
Expect(err).ToNot(HaveOccurred())
// check that txs were properly indexed
trxs := make([]string, 0)
pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&trxs, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(3))
Expect(shared.ListContainsString(trxs, mocks.Trx1CID.String())).To(BeTrue())
Expect(shared.ListContainsString(trxs, mocks.Trx2CID.String())).To(BeTrue())
Expect(shared.ListContainsString(trxs, mocks.Trx3CID.String())).To(BeTrue())
// and published
for _, c := range trxs {
dc, err := cid.Decode(c)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
switch c {
case mocks.Trx1CID.String():
Expect(data).To(Equal(mocks.MockTransactions.GetRlp(0)))
case mocks.Trx2CID.String():
Expect(data).To(Equal(mocks.MockTransactions.GetRlp(1)))
case mocks.Trx3CID.String():
Expect(data).To(Equal(mocks.MockTransactions.GetRlp(2)))
}
}
})
It("Publishes and indexes receipt IPLDs in a single tx", func() {
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
Expect(emptyReturn).To(BeNil())
Expect(err).ToNot(HaveOccurred())
// check receipts were properly indexed
rcts := make([]string, 0)
pgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
err = db.Select(&rcts, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(rcts)).To(Equal(3))
Expect(shared.ListContainsString(rcts, mocks.Rct1CID.String())).To(BeTrue())
Expect(shared.ListContainsString(rcts, mocks.Rct2CID.String())).To(BeTrue())
Expect(shared.ListContainsString(rcts, mocks.Rct3CID.String())).To(BeTrue())
// and published
for _, c := range rcts {
dc, err := cid.Decode(c)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
switch c {
case mocks.Rct1CID.String():
Expect(data).To(Equal(mocks.MockReceipts.GetRlp(0)))
case mocks.Rct2CID.String():
Expect(data).To(Equal(mocks.MockReceipts.GetRlp(1)))
case mocks.Rct3CID.String():
Expect(data).To(Equal(mocks.MockReceipts.GetRlp(2)))
}
}
})
It("Publishes and indexes state IPLDs in a single tx", func() {
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
Expect(emptyReturn).To(BeNil())
Expect(err).ToNot(HaveOccurred())
// check that state nodes were properly indexed and published
stateNodes := make([]eth.StateNodeModel, 0)
pgStr := `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&stateNodes, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(stateNodes)).To(Equal(2))
for _, stateNode := range stateNodes {
var data []byte
dc, err := cid.Decode(stateNode.CID)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
var account eth.StateAccountModel
err = db.Get(&account, pgStr, stateNode.ID)
Expect(err).ToNot(HaveOccurred())
if stateNode.CID == mocks.State1CID.String() {
Expect(stateNode.NodeType).To(Equal(2))
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.ContractLeafKey).Hex()))
Expect(stateNode.Path).To(Equal([]byte{'\x06'}))
Expect(data).To(Equal(mocks.ContractLeafNode))
Expect(account).To(Equal(eth.StateAccountModel{
ID: account.ID,
StateID: stateNode.ID,
Balance: "0",
CodeHash: mocks.ContractCodeHash.Bytes(),
StorageRoot: mocks.ContractRoot,
Nonce: 1,
}))
}
if stateNode.CID == mocks.State2CID.String() {
Expect(stateNode.NodeType).To(Equal(2))
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.AccountLeafKey).Hex()))
Expect(stateNode.Path).To(Equal([]byte{'\x0c'}))
Expect(data).To(Equal(mocks.AccountLeafNode))
Expect(account).To(Equal(eth.StateAccountModel{
ID: account.ID,
StateID: stateNode.ID,
Balance: "1000",
CodeHash: mocks.AccountCodeHash.Bytes(),
StorageRoot: mocks.AccountRoot,
Nonce: 0,
}))
}
}
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
})
It("Publishes and indexes storage IPLDs in a single tx", func() {
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
Expect(emptyReturn).To(BeNil())
Expect(err).ToNot(HaveOccurred())
// check that storage nodes were properly indexed
storageNodes := make([]eth.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
err = db.Select(&storageNodes, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(storageNodes)).To(Equal(1))
Expect(storageNodes[0]).To(Equal(eth.StorageNodeWithStateKeyModel{
CID: mocks.StorageCID.String(),
NodeType: 2,
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
Path: []byte{},
}))
var data []byte
dc, err := cid.Decode(storageNodes[0].CID)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(mocks.StorageLeafNode))
}) })
}) })
}) })