api test; return full ipld models to subscribers

This commit is contained in:
Ian Norden 2020-02-20 16:13:19 -06:00
parent fb360d8562
commit e3e8700d34
32 changed files with 1105 additions and 774 deletions

View File

@ -41,12 +41,9 @@ var (
databaseConfig config.Database databaseConfig config.Database
genConfig config.Plugin genConfig config.Plugin
ipc string ipc string
levelDbPath string
queueRecheckInterval time.Duration queueRecheckInterval time.Duration
startingBlockNumber int64 startingBlockNumber int64
storageDiffsPath string storageDiffsPath string
syncAll bool
endingBlockNumber int64
recheckHeadersArg bool recheckHeadersArg bool
subCommand string subCommand string
logWithCommand log.Entry logWithCommand log.Entry
@ -81,7 +78,6 @@ func initFuncs(cmd *cobra.Command, args []string) {
func setViperConfigs() { func setViperConfigs() {
ipc = viper.GetString("client.ipcpath") ipc = viper.GetString("client.ipcpath")
levelDbPath = viper.GetString("client.leveldbpath")
storageDiffsPath = viper.GetString("filesystem.storageDiffsPath") storageDiffsPath = viper.GetString("filesystem.storageDiffsPath")
storageDiffsSource = viper.GetString("storageDiffs.source") storageDiffsSource = viper.GetString("storageDiffs.source")
databaseConfig = config.Database{ databaseConfig = config.Database{

View File

@ -80,14 +80,13 @@ func streamEthSubscription() {
logWithCommand.Error(payload.Err) logWithCommand.Error(payload.Err)
continue continue
} }
data, ok := payload.Data.(eth.StreamResponse) var ethData eth.IPLDs
if !ok { if err := rlp.DecodeBytes(payload.Data, &ethData); err != nil {
logWithCommand.Warnf("payload data expected type %T got %T", eth.StreamResponse{}, payload.Data) logWithCommand.Error(err)
continue
} }
for _, headerRlp := range data.HeadersRlp { for _, headerRlp := range ethData.Headers {
var header types.Header var header types.Header
err = rlp.Decode(bytes.NewBuffer(headerRlp), &header) err = rlp.Decode(bytes.NewBuffer(headerRlp.Data), &header)
if err != nil { if err != nil {
logWithCommand.Error(err) logWithCommand.Error(err)
continue continue
@ -95,9 +94,9 @@ func streamEthSubscription() {
fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex()) fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
fmt.Printf("header: %v\n", header) fmt.Printf("header: %v\n", header)
} }
for _, trxRlp := range data.TransactionsRlp { for _, trxRlp := range ethData.Transactions {
var trx types.Transaction var trx types.Transaction
buff := bytes.NewBuffer(trxRlp) buff := bytes.NewBuffer(trxRlp.Data)
stream := rlp.NewStream(buff, 0) stream := rlp.NewStream(buff, 0)
err := trx.DecodeRLP(stream) err := trx.DecodeRLP(stream)
if err != nil { if err != nil {
@ -107,9 +106,9 @@ func streamEthSubscription() {
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex()) fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
fmt.Printf("trx: %v\n", trx) fmt.Printf("trx: %v\n", trx)
} }
for _, rctRlp := range data.ReceiptsRlp { for _, rctRlp := range ethData.Receipts {
var rct types.ReceiptForStorage var rct types.ReceiptForStorage
buff := bytes.NewBuffer(rctRlp) buff := bytes.NewBuffer(rctRlp.Data)
stream := rlp.NewStream(buff, 0) stream := rlp.NewStream(buff, 0)
err = rct.DecodeRLP(stream) err = rct.DecodeRLP(stream)
if err != nil { if err != nil {
@ -129,40 +128,34 @@ func streamEthSubscription() {
} }
} }
// This assumes leafs only // This assumes leafs only
for key, stateRlp := range data.StateNodesRlp { for _, stateNode := range ethData.StateNodes {
var acct state.Account var acct state.Account
err = rlp.Decode(bytes.NewBuffer(stateRlp), &acct) err = rlp.Decode(bytes.NewBuffer(stateNode.IPLD.Data), &acct)
if err != nil { if err != nil {
logWithCommand.Error(err) logWithCommand.Error(err)
continue continue
} }
fmt.Printf("Account for key %s, and root %s, with balance %d\n", fmt.Printf("Account for key %s, and root %s, with balance %d\n",
key.Hex(), acct.Root.Hex(), acct.Balance.Int64()) stateNode.StateTrieKey.Hex(), acct.Root.Hex(), acct.Balance.Int64())
fmt.Printf("state account: %v\n", acct) fmt.Printf("state account: %v\n", acct)
} }
for stateKey, mappedRlp := range data.StorageNodesRlp { for _, storageNode := range ethData.StorageNodes {
fmt.Printf("Storage for state key %s ", stateKey.Hex()) fmt.Printf("Storage for state key %s ", storageNode.StateTrieKey.Hex())
for storageKey, storageRlp := range mappedRlp { fmt.Printf("with storage key %s\n", storageNode.StorageTrieKey.Hex())
fmt.Printf("with storage key %s\n", storageKey.Hex()) var i []interface{}
var i []interface{} err := rlp.DecodeBytes(storageNode.IPLD.Data, &i)
err := rlp.DecodeBytes(storageRlp, &i) if err != nil {
if err != nil { logWithCommand.Error(err)
logWithCommand.Error(err) continue
}
// if a value node
if len(i) == 1 {
valueBytes, ok := i[0].([]byte)
if !ok {
continue continue
} }
// if a leaf node fmt.Printf("Storage leaf key: %s, and value hash: %s\n",
if len(i) == 2 { storageNode.StorageTrieKey.Hex(), common.BytesToHash(valueBytes).Hex())
keyBytes, ok := i[0].([]byte)
if !ok {
continue
}
valueBytes, ok := i[1].([]byte)
if !ok {
continue
}
fmt.Printf("Storage leaf key: %s, and value hash: %s\n",
common.BytesToHash(keyBytes).Hex(), common.BytesToHash(valueBytes).Hex())
}
} }
} }
case err = <-sub.Err(): case err = <-sub.Err():

View File

@ -68,9 +68,9 @@ func superNode() {
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
var forwardPayloadChan chan shared.StreamedIPLDs var forwardPayloadChan chan shared.ConvertedData
if superNodeConfig.Serve { if superNodeConfig.Serve {
forwardPayloadChan = make(chan shared.StreamedIPLDs, super_node.PayloadChanBufferSize) forwardPayloadChan = make(chan shared.ConvertedData, super_node.PayloadChanBufferSize)
superNode.FilterAndServe(wg, forwardPayloadChan) superNode.FilterAndServe(wg, forwardPayloadChan)
if err := startServers(superNode, superNodeConfig); err != nil { if err := startServers(superNode, superNodeConfig); err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)

View File

@ -7,6 +7,7 @@ CREATE TABLE eth.header_cids (
cid TEXT NOT NULL, cid TEXT NOT NULL,
td NUMERIC NOT NULL, td NUMERIC NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE, node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
reward NUMERIC NOT NULL,
UNIQUE (block_number, block_hash) UNIQUE (block_number, block_hash)
); );

View File

@ -5,6 +5,7 @@ CREATE TABLE eth.uncle_cids (
block_hash VARCHAR(66) NOT NULL, block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL, parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL, cid TEXT NOT NULL,
reward NUMERIC NOT NULL,
UNIQUE (header_id, block_hash) UNIQUE (header_id, block_hash)
); );

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package getter_test package integration_test
import ( import (
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package getter_test
import (
"io/ioutil"
"log"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestRepository(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Getter Suite Test")
}
var _ = BeforeSuite(func() {
log.SetOutput(ioutil.Discard)
})

View File

@ -75,3 +75,43 @@ func staticRewardByBlockNumber(blockNumber int64) *big.Int {
} }
return staticBlockReward return staticBlockReward
} }
func CalcEthBlockReward(block *types.Block, receipts types.Receipts) *big.Int {
staticBlockReward := staticRewardByBlockNumber(block.Number().Int64())
transactionFees := calcEthTransactionFees(block, receipts)
uncleInclusionRewards := calcEthUncleInclusionRewards(block, block.Uncles())
tmp := transactionFees.Add(transactionFees, uncleInclusionRewards)
return tmp.Add(tmp, staticBlockReward)
}
func CalcUncleMinerReward(blockNumber, uncleBlockNumber int64) *big.Int {
staticBlockReward := staticRewardByBlockNumber(blockNumber)
rewardDiv8 := staticBlockReward.Div(staticBlockReward, big.NewInt(8))
mainBlock := big.NewInt(blockNumber)
uncleBlock := big.NewInt(uncleBlockNumber)
uncleBlockPlus8 := uncleBlock.Add(uncleBlock, big.NewInt(8))
uncleBlockPlus8MinusMainBlock := uncleBlockPlus8.Sub(uncleBlockPlus8, mainBlock)
return rewardDiv8.Mul(rewardDiv8, uncleBlockPlus8MinusMainBlock)
}
func calcEthTransactionFees(block *types.Block, receipts types.Receipts) *big.Int {
transactionFees := new(big.Int)
for i, transaction := range block.Transactions() {
receipt := receipts[i]
gasPrice := big.NewInt(transaction.GasPrice().Int64())
gasUsed := big.NewInt(int64(receipt.GasUsed))
transactionFee := gasPrice.Mul(gasPrice, gasUsed)
transactionFees = transactionFees.Add(transactionFees, transactionFee)
}
return transactionFees
}
func calcEthUncleInclusionRewards(block *types.Block, uncles []*types.Header) *big.Int {
uncleInclusionRewards := new(big.Int)
for range uncles {
staticBlockReward := staticRewardByBlockNumber(block.Number().Int64())
staticBlockReward.Div(staticBlockReward, big.NewInt(32))
uncleInclusionRewards.Add(uncleInclusionRewards, staticBlockReward)
}
return uncleInclusionRewards
}

View File

@ -40,7 +40,7 @@ func (erdp *EthReceiptDagPutter) DagPut(raw interface{}) ([]string, error) {
} }
cids := make([]string, len(receipts)) cids := make([]string, len(receipts))
for i, receipt := range receipts { for i, receipt := range receipts {
node, err := ipld.NewReceipt((*types.ReceiptForStorage)(receipt)) node, err := ipld.NewReceipt(receipt)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -29,7 +29,7 @@ import (
) )
type EthReceipt struct { type EthReceipt struct {
*types.ReceiptForStorage *types.Receipt
rawdata []byte rawdata []byte
cid cid.Cid cid cid.Cid
@ -43,7 +43,7 @@ var _ node.Node = (*EthReceipt)(nil)
*/ */
// NewReceipt converts a types.ReceiptForStorage to an EthReceipt IPLD node // NewReceipt converts a types.ReceiptForStorage to an EthReceipt IPLD node
func NewReceipt(receipt *types.ReceiptForStorage) (*EthReceipt, error) { func NewReceipt(receipt *types.Receipt) (*EthReceipt, error) {
receiptRLP, err := rlp.EncodeToBytes(receipt) receiptRLP, err := rlp.EncodeToBytes(receipt)
if err != nil { if err != nil {
return nil, err return nil, err
@ -53,9 +53,9 @@ func NewReceipt(receipt *types.ReceiptForStorage) (*EthReceipt, error) {
return nil, err return nil, err
} }
return &EthReceipt{ return &EthReceipt{
ReceiptForStorage: receipt, Receipt: receipt,
cid: c, cid: c,
rawdata: receiptRLP, rawdata: receiptRLP,
}, nil }, nil
} }
@ -158,7 +158,7 @@ func (r *EthReceipt) Stat() (*node.NodeStat, error) {
// Size will go away. It is here to comply with the interface. // Size will go away. It is here to comply with the interface.
func (r *EthReceipt) Size() (uint64, error) { func (r *EthReceipt) Size() (uint64, error) {
return strconv.ParseUint((*types.Receipt)(r.ReceiptForStorage).Size().String(), 10, 64) return strconv.ParseUint(r.Receipt.Size().String(), 10, 64)
} }
/* /*

View File

@ -1,440 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
)
// TrieNode is the general abstraction for
//ethereum IPLD trie nodes.
type TrieNode struct {
// leaf, extension or branch
nodeKind string
// If leaf or extension: [0] is key, [1] is val.
// If branch: [0] - [16] are children.
elements []interface{}
// IPLD block information
cid cid.Cid
rawdata []byte
}
/*
OUTPUT
*/
type trieNodeLeafDecoder func([]interface{}) ([]interface{}, error)
// decodeTrieNode returns a TrieNode object from an IPLD block's
// cid and rawdata.
func decodeTrieNode(c cid.Cid, b []byte,
leafDecoder trieNodeLeafDecoder) (*TrieNode, error) {
var (
i, decoded, elements []interface{}
nodeKind string
err error
)
err = rlp.DecodeBytes(b, &i)
if err != nil {
return nil, err
}
codec := c.Type()
switch len(i) {
case 2:
nodeKind, decoded, err = decodeCompactKey(i)
if err != nil {
return nil, err
}
if nodeKind == "extension" {
elements, err = parseTrieNodeExtension(decoded, codec)
}
if nodeKind == "leaf" {
elements, err = leafDecoder(decoded)
}
if nodeKind != "extension" && nodeKind != "leaf" {
return nil, fmt.Errorf("unexpected nodeKind returned from decoder")
}
case 17:
nodeKind = "branch"
elements, err = parseTrieNodeBranch(i, codec)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown trie node type")
}
return &TrieNode{
nodeKind: nodeKind,
elements: elements,
rawdata: b,
cid: c,
}, nil
}
// decodeCompactKey takes a compact key, and returns its nodeKind and value.
func decodeCompactKey(i []interface{}) (string, []interface{}, error) {
first := i[0].([]byte)
last := i[1].([]byte)
switch first[0] / 16 {
case '\x00':
return "extension", []interface{}{
nibbleToByte(first)[2:],
last,
}, nil
case '\x01':
return "extension", []interface{}{
nibbleToByte(first)[1:],
last,
}, nil
case '\x02':
return "leaf", []interface{}{
nibbleToByte(first)[2:],
last,
}, nil
case '\x03':
return "leaf", []interface{}{
nibbleToByte(first)[1:],
last,
}, nil
default:
return "", nil, fmt.Errorf("unknown hex prefix")
}
}
// parseTrieNodeExtension helper improves readability
func parseTrieNodeExtension(i []interface{}, codec uint64) ([]interface{}, error) {
return []interface{}{
i[0].([]byte),
keccak256ToCid(codec, i[1].([]byte)),
}, nil
}
// parseTrieNodeBranch helper improves readability
func parseTrieNodeBranch(i []interface{}, codec uint64) ([]interface{}, error) {
var out []interface{}
for _, vi := range i {
v := vi.([]byte)
switch len(v) {
case 0:
out = append(out, nil)
case 32:
out = append(out, keccak256ToCid(codec, v))
default:
return nil, fmt.Errorf("unrecognized object: %v", v)
}
}
return out, nil
}
/*
Node INTERFACE
*/
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (t *TrieNode) Resolve(p []string) (interface{}, []string, error) {
switch t.nodeKind {
case "extension":
return t.resolveTrieNodeExtension(p)
case "leaf":
return t.resolveTrieNodeLeaf(p)
case "branch":
return t.resolveTrieNodeBranch(p)
default:
return nil, nil, fmt.Errorf("nodeKind case not implemented")
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (t *TrieNode) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
var out []string
switch t.nodeKind {
case "extension":
var val string
for _, e := range t.elements[0].([]byte) {
val += fmt.Sprintf("%x", e)
}
return []string{val}
case "branch":
for i, elem := range t.elements {
if _, ok := elem.(*cid.Cid); ok {
out = append(out, fmt.Sprintf("%x", i))
}
}
return out
default:
return nil
}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (t *TrieNode) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := t.Resolve(p)
if err != nil {
return nil, nil, err
}
lnk, ok := obj.(*node.Link)
if !ok {
return nil, nil, fmt.Errorf("was not a link")
}
return lnk, rest, nil
}
// Copy will go away. It is here to comply with the interface.
func (t *TrieNode) Copy() node.Node {
panic("dont use this yet")
}
// Links is a helper function that returns all links within this object
func (t *TrieNode) Links() []*node.Link {
var out []*node.Link
for _, i := range t.elements {
c, ok := i.(cid.Cid)
if ok {
out = append(out, &node.Link{Cid: c})
}
}
return out
}
// Stat will go away. It is here to comply with the interface.
func (t *TrieNode) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface.
func (t *TrieNode) Size() (uint64, error) {
return 0, nil
}
/*
TrieNode functions
*/
// MarshalJSON processes the transaction trie into readable JSON format.
func (t *TrieNode) MarshalJSON() ([]byte, error) {
var out map[string]interface{}
switch t.nodeKind {
case "extension":
fallthrough
case "leaf":
var hexPrefix string
for _, e := range t.elements[0].([]byte) {
hexPrefix += fmt.Sprintf("%x", e)
}
// if we got a byte we need to do this casting otherwise
// it will be marshaled to a base64 encoded value
if _, ok := t.elements[1].([]byte); ok {
var hexVal string
for _, e := range t.elements[1].([]byte) {
hexVal += fmt.Sprintf("%x", e)
}
t.elements[1] = hexVal
}
out = map[string]interface{}{
"type": t.nodeKind,
hexPrefix: t.elements[1],
}
case "branch":
out = map[string]interface{}{
"type": "branch",
"0": t.elements[0],
"1": t.elements[1],
"2": t.elements[2],
"3": t.elements[3],
"4": t.elements[4],
"5": t.elements[5],
"6": t.elements[6],
"7": t.elements[7],
"8": t.elements[8],
"9": t.elements[9],
"a": t.elements[10],
"b": t.elements[11],
"c": t.elements[12],
"d": t.elements[13],
"e": t.elements[14],
"f": t.elements[15],
}
default:
return nil, fmt.Errorf("nodeKind %s not supported", t.nodeKind)
}
return json.Marshal(out)
}
// nibbleToByte expands the nibbles of a byte slice into their own bytes.
func nibbleToByte(k []byte) []byte {
var out []byte
for _, b := range k {
out = append(out, b/16)
out = append(out, b%16)
}
return out
}
// Resolve reading conveniences
func (t *TrieNode) resolveTrieNodeExtension(p []string) (interface{}, []string, error) {
nibbles := t.elements[0].([]byte)
idx, rest := shiftFromPath(p, len(nibbles))
if len(idx) < len(nibbles) {
return nil, nil, fmt.Errorf("not enough nibbles to traverse this extension")
}
for _, i := range idx {
if getHexIndex(string(i)) == -1 {
return nil, nil, fmt.Errorf("invalid path element")
}
}
for i, n := range nibbles {
if string(idx[i]) != fmt.Sprintf("%x", n) {
return nil, nil, fmt.Errorf("no such link in this extension")
}
}
return &node.Link{Cid: t.elements[1].(cid.Cid)}, rest, nil
}
func (t *TrieNode) resolveTrieNodeLeaf(p []string) (interface{}, []string, error) {
nibbles := t.elements[0].([]byte)
if len(nibbles) != 0 {
idx, rest := shiftFromPath(p, len(nibbles))
if len(idx) < len(nibbles) {
return nil, nil, fmt.Errorf("not enough nibbles to traverse this leaf")
}
for _, i := range idx {
if getHexIndex(string(i)) == -1 {
return nil, nil, fmt.Errorf("invalid path element")
}
}
for i, n := range nibbles {
if string(idx[i]) != fmt.Sprintf("%x", n) {
return nil, nil, fmt.Errorf("no such link in this extension")
}
}
p = rest
}
link, ok := t.elements[1].(node.Node)
if !ok {
return nil, nil, fmt.Errorf("leaf children is not an IPLD node")
}
return link.Resolve(p)
}
func (t *TrieNode) resolveTrieNodeBranch(p []string) (interface{}, []string, error) {
idx, rest := shiftFromPath(p, 1)
hidx := getHexIndex(idx)
if hidx == -1 {
return nil, nil, fmt.Errorf("incorrect path")
}
child := t.elements[hidx]
if child != nil {
return &node.Link{Cid: child.(cid.Cid)}, rest, nil
}
return nil, nil, fmt.Errorf("no such link in this branch")
}
// shiftFromPath extracts from a given path (as a slice of strings)
// the given number of elements as a single string, returning whatever
// it has not taken.
//
// Examples:
// ["0", "a", "something"] and 1 -> "0" and ["a", "something"]
// ["ab", "c", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
// ["abc", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
func shiftFromPath(p []string, i int) (string, []string) {
var (
out string
rest []string
)
for _, pe := range p {
re := ""
for _, c := range pe {
if len(out) < i {
out += string(c)
} else {
re += string(c)
}
}
if len(out) == i && re != "" {
rest = append(rest, re)
}
}
return out, rest
}
// getHexIndex returns to you the integer 0 - 15 equivalent to your
// string character if applicable, or -1 otherwise.
func getHexIndex(s string) int {
if len(s) != 1 {
return -1
}
c := byte(s[0])
switch {
case '0' <= c && c <= '9':
return int(c - '0')
case 'a' <= c && c <= 'f':
return int(c - 'a' + 10)
}
return -1
}

View File

@ -21,6 +21,10 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"github.com/multiformats/go-multihash"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared" "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
) )
@ -63,7 +67,15 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
if err := payload.Header.Serialize(headerBuffer); err != nil { if err := payload.Header.Serialize(headerBuffer); err != nil {
return err return err
} }
response.Headers = append(response.Headers, headerBuffer.Bytes()) data := headerBuffer.Bytes()
cid, err := ipld.RawdataToCid(ipld.MBitcoinHeader, data, multihash.DBL_SHA2_256)
if err != nil {
return err
}
response.Headers = append(response.Headers, ipfs.BlockModel{
Data: data,
CID: cid.String(),
})
} }
return nil return nil
} }
@ -77,14 +89,22 @@ func checkRange(start, end, actual int64) bool {
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) error { func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) error {
if !trxFilter.Off { if !trxFilter.Off {
response.Transactions = make([][]byte, 0, len(payload.TxMetaData)) response.Transactions = make([]ipfs.BlockModel, 0, len(payload.TxMetaData))
for i, txMeta := range payload.TxMetaData { for i, txMeta := range payload.TxMetaData {
if checkTransaction(txMeta, trxFilter) { if checkTransaction(txMeta, trxFilter) {
trxBuffer := new(bytes.Buffer) trxBuffer := new(bytes.Buffer)
if err := payload.Txs[i].MsgTx().Serialize(trxBuffer); err != nil { if err := payload.Txs[i].MsgTx().Serialize(trxBuffer); err != nil {
return err return err
} }
response.Transactions = append(response.Transactions, trxBuffer.Bytes()) data := trxBuffer.Bytes()
cid, err := ipld.RawdataToCid(ipld.MBitcoinTx, data, multihash.DBL_SHA2_256)
if err != nil {
return err
}
response.Transactions = append(response.Transactions, ipfs.BlockModel{
Data: data,
CID: cid.String(),
})
} }
} }
} }

View File

@ -21,14 +21,13 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
"github.com/ipfs/go-block-format" "github.com/ipfs/go-block-format"
"github.com/ipfs/go-blockservice" "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/ipfs" "github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
) )
var ( var (
@ -74,7 +73,7 @@ func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
// FetchHeaders fetches headers // FetchHeaders fetches headers
// It uses the f.fetchBatch method // It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([][]byte, error) { func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching header iplds") log.Debug("fetching header iplds")
headerCids := make([]cid.Cid, len(cids)) headerCids := make([]cid.Cid, len(cids))
for i, c := range cids { for i, c := range cids {
@ -85,20 +84,23 @@ func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([][]byte, error) {
headerCids[i] = dc headerCids[i] = dc
} }
headers := f.fetchBatch(headerCids) headers := f.fetchBatch(headerCids)
headersBytes := make([][]byte, len(headers)) headerIPLDs := make([]ipfs.BlockModel, len(headers))
for i, header := range headers { for i, header := range headers {
headersBytes[i] = header.RawData() headerIPLDs[i] = ipfs.BlockModel{
Data: header.RawData(),
CID: header.Cid().String(),
}
} }
if len(headersBytes) != len(headerCids) { if len(headerIPLDs) != len(headerCids) {
log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(headers), len(headerCids)) log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(headers), len(headerCids))
return headersBytes, errUnexpectedNumberOfIPLDs return headerIPLDs, errUnexpectedNumberOfIPLDs
} }
return headersBytes, nil return headerIPLDs, nil
} }
// FetchTrxs fetches transactions // FetchTrxs fetches transactions
// It uses the f.fetchBatch method // It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([][]byte, error) { func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching transaction iplds") log.Debug("fetching transaction iplds")
trxCids := make([]cid.Cid, len(cids)) trxCids := make([]cid.Cid, len(cids))
for i, c := range cids { for i, c := range cids {
@ -109,15 +111,18 @@ func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([][]byte, error) {
trxCids[i] = dc trxCids[i] = dc
} }
trxs := f.fetchBatch(trxCids) trxs := f.fetchBatch(trxCids)
trxBytes := make([][]byte, len(trxs)) trxIPLDs := make([]ipfs.BlockModel, len(trxs))
for i, trx := range trxs { for i, trx := range trxs {
trxBytes[i] = trx.RawData() trxIPLDs[i] = ipfs.BlockModel{
Data: trx.RawData(),
CID: trx.Cid().String(),
}
} }
if len(trxBytes) != len(trxCids) { if len(trxIPLDs) != len(trxCids) {
log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids)) log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
return trxBytes, errUnexpectedNumberOfIPLDs return trxIPLDs, errUnexpectedNumberOfIPLDs
} }
return trxBytes, nil return trxIPLDs, nil
} }
// fetch is used to fetch a single cid // fetch is used to fetch a single cid

View File

@ -19,6 +19,8 @@ package btc
import ( import (
"math/big" "math/big"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
) )
@ -64,8 +66,8 @@ type CIDWrapper struct {
// Returned by IPLDFetcher and ResponseFilterer // Returned by IPLDFetcher and ResponseFilterer
type IPLDs struct { type IPLDs struct {
BlockNumber *big.Int BlockNumber *big.Int
Headers [][]byte Headers []ipfs.BlockModel
Transactions [][]byte Transactions []ipfs.BlockModel
} }
// Height satisfies the StreamedIPLDs interface // Height satisfies the StreamedIPLDs interface

View File

@ -20,6 +20,8 @@ import (
"context" "context"
"math/big" "math/big"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
@ -63,6 +65,7 @@ func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery)
topicStrSets := make([][]string, 4) topicStrSets := make([][]string, 4)
for i, topicSet := range crit.Topics { for i, topicSet := range crit.Topics {
if i > 3 { if i > 3 {
// don't allow more than 4 topics
break break
} }
for _, topic := range topicSet { for _, topic := range topicSet {
@ -173,19 +176,19 @@ func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.H
return nil, err return nil, err
} }
if tx != nil { if tx != nil {
return newRPCTransaction(tx, blockHash, blockNumber, index), nil return NewRPCTransaction(tx, blockHash, blockNumber, index), nil
} }
// Transaction unknown, return as such // Transaction unknown, return as such
return nil, nil return nil, nil
} }
// extractLogsOfInterest returns logs from the receipt IPLD // extractLogsOfInterest returns logs from the receipt IPLD
func extractLogsOfInterest(rctIPLDs [][]byte, wantedTopics [][]string) ([]*types.Log, error) { func extractLogsOfInterest(rctIPLDs []ipfs.BlockModel, wantedTopics [][]string) ([]*types.Log, error) {
var logs []*types.Log var logs []*types.Log
for _, rctIPLD := range rctIPLDs { for _, rctIPLD := range rctIPLDs {
rctRLP := rctIPLD rctRLP := rctIPLD
var rct types.Receipt var rct types.Receipt
if err := rlp.DecodeBytes(rctRLP, &rct); err != nil { if err := rlp.DecodeBytes(rctRLP.Data, &rct); err != nil {
return nil, err return nil, err
} }
for _, log := range rct.Logs { for _, log := range rct.Logs {
@ -200,7 +203,7 @@ func extractLogsOfInterest(rctIPLDs [][]byte, wantedTopics [][]string) ([]*types
// returns true if the log matches on the filter // returns true if the log matches on the filter
func wantedLog(wantedTopics [][]string, actualTopics []common.Hash) bool { func wantedLog(wantedTopics [][]string, actualTopics []common.Hash) bool {
// actualTopics will always have length <= 4 // actualTopics will always have length <= 4
// wantedTopics will always have length == 4 // wantedTopics will always have length 4
matches := 0 matches := 0
for i, actualTopic := range actualTopics { for i, actualTopic := range actualTopics {
// If we have topics in this filter slot, count as a match if the actualTopic matches one of the ones in this filter slot // If we have topics in this filter slot, count as a match if the actualTopic matches one of the ones in this filter slot
@ -291,7 +294,7 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]i
} }
if fullTx { if fullTx {
formatTx = func(tx *types.Transaction) (interface{}, error) { formatTx = func(tx *types.Transaction) (interface{}, error) {
return newRPCTransactionFromBlockHash(block, tx.Hash()), nil return NewRPCTransactionFromBlockHash(block, tx.Hash()), nil
} }
} }
txs := block.Transactions() txs := block.Transactions()
@ -314,8 +317,8 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]i
return fields, nil return fields, nil
} }
// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation. // NewRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation.
func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction { func NewRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction {
for idx, tx := range b.Transactions() { for idx, tx := range b.Transactions() {
if tx.Hash() == hash { if tx.Hash() == hash {
return newRPCTransactionFromBlockIndex(b, uint64(idx)) return newRPCTransactionFromBlockIndex(b, uint64(idx))
@ -330,7 +333,7 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransacti
if index >= uint64(len(txs)) { if index >= uint64(len(txs)) {
return nil return nil
} }
return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index) return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index)
} }
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction // RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
@ -351,9 +354,9 @@ type RPCTransaction struct {
S *hexutil.Big `json:"s"` S *hexutil.Big `json:"s"`
} }
// newRPCTransaction returns a transaction that will serialize to the RPC // NewRPCTransaction returns a transaction that will serialize to the RPC
// representation, with the given location metadata set (if available). // representation, with the given location metadata set (if available).
func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction { func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction {
var signer types.Signer = types.FrontierSigner{} var signer types.Signer = types.FrontierSigner{}
if tx.Protected() { if tx.Protected() {
signer = types.NewEIP155Signer(tx.ChainId()) signer = types.NewEIP155Signer(tx.ChainId())
@ -366,7 +369,7 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
Gas: hexutil.Uint64(tx.Gas()), Gas: hexutil.Uint64(tx.Gas()),
GasPrice: (*hexutil.Big)(tx.GasPrice()), GasPrice: (*hexutil.Big)(tx.GasPrice()),
Hash: tx.Hash(), Hash: tx.Hash(),
Input: hexutil.Bytes(tx.Data()), Input: hexutil.Bytes(tx.Data()), // somehow this is ending up `nil`
Nonce: hexutil.Uint64(tx.Nonce()), Nonce: hexutil.Uint64(tx.Nonce()),
To: tx.To(), To: tx.To(),
Value: (*hexutil.Big)(tx.Value()), Value: (*hexutil.Big)(tx.Value()),

View File

@ -0,0 +1,588 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"context"
"strconv"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
mocks3 "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
var (
expectedBlock = map[string]interface{}{
"number": (*hexutil.Big)(mocks.MockBlock.Number()),
"hash": mocks.MockBlock.Hash(),
"parentHash": mocks.MockBlock.ParentHash(),
"nonce": mocks.MockBlock.Header().Nonce,
"mixHash": mocks.MockBlock.MixDigest(),
"sha3Uncles": mocks.MockBlock.UncleHash(),
"logsBloom": mocks.MockBlock.Bloom(),
"stateRoot": mocks.MockBlock.Root(),
"miner": mocks.MockBlock.Coinbase(),
"difficulty": (*hexutil.Big)(mocks.MockBlock.Difficulty()),
"extraData": hexutil.Bytes(mocks.MockBlock.Header().Extra),
"gasLimit": hexutil.Uint64(mocks.MockBlock.GasLimit()),
"gasUsed": hexutil.Uint64(mocks.MockBlock.GasUsed()),
"timestamp": hexutil.Uint64(mocks.MockBlock.Time()),
"transactionsRoot": mocks.MockBlock.TxHash(),
"receiptsRoot": mocks.MockBlock.ReceiptHash(),
"totalDifficulty": (*hexutil.Big)(mocks.MockBlock.Difficulty()),
"size": hexutil.Uint64(mocks.MockBlock.Size()),
}
expectedHeader = map[string]interface{}{
"number": (*hexutil.Big)(mocks.MockBlock.Header().Number),
"hash": mocks.MockBlock.Header().Hash(),
"parentHash": mocks.MockBlock.Header().ParentHash,
"nonce": mocks.MockBlock.Header().Nonce,
"mixHash": mocks.MockBlock.Header().MixDigest,
"sha3Uncles": mocks.MockBlock.Header().UncleHash,
"logsBloom": mocks.MockBlock.Header().Bloom,
"stateRoot": mocks.MockBlock.Header().Root,
"miner": mocks.MockBlock.Header().Coinbase,
"difficulty": (*hexutil.Big)(mocks.MockBlock.Header().Difficulty),
"extraData": hexutil.Bytes(mocks.MockBlock.Header().Extra),
"size": hexutil.Uint64(mocks.MockBlock.Header().Size()),
"gasLimit": hexutil.Uint64(mocks.MockBlock.Header().GasLimit),
"gasUsed": hexutil.Uint64(mocks.MockBlock.Header().GasUsed),
"timestamp": hexutil.Uint64(mocks.MockBlock.Header().Time),
"transactionsRoot": mocks.MockBlock.Header().TxHash,
"receiptsRoot": mocks.MockBlock.Header().ReceiptHash,
"totalDifficulty": (*hexutil.Big)(mocks.MockBlock.Header().Difficulty),
}
expectedTransaction = eth.NewRPCTransaction(mocks.MockTransactions[0], mocks.MockBlock.Hash(), mocks.MockBlock.NumberU64(), 0)
)
var _ = Describe("API", func() {
var (
db *postgres.DB
retriever *eth.CIDRetriever
fetcher *eth.IPLDFetcher
indexer *eth.CIDIndexer
backend *eth.Backend
api *eth.PublicEthAPI
)
BeforeEach(func() {
var err error
db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred())
retriever = eth.NewCIDRetriever(db)
blocksToReturn := map[cid.Cid]blocks.Block{
mocks.HeaderCID: mocks.HeaderIPLD,
mocks.Trx1CID: mocks.Trx1IPLD,
mocks.Trx2CID: mocks.Trx2IPLD,
mocks.Rct1CID: mocks.Rct1IPLD,
mocks.Rct2CID: mocks.Rct2IPLD,
mocks.State1CID: mocks.State1IPLD,
mocks.State2CID: mocks.State2IPLD,
mocks.StorageCID: mocks.StorageIPLD,
}
mockBlockService := &mocks3.MockIPFSBlockService{
Blocks: blocksToReturn,
}
fetcher = &eth.IPLDFetcher{
BlockService: mockBlockService,
}
indexer = eth.NewCIDIndexer(db)
backend = &eth.Backend{
Retriever: retriever,
Fetcher: fetcher,
DB: db,
}
api = eth.NewPublicEthAPI(backend)
err = indexer.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
uncles := mocks.MockBlock.Uncles()
uncleHashes := make([]common.Hash, len(uncles))
for i, uncle := range uncles {
uncleHashes[i] = uncle.Hash()
}
expectedBlock["uncles"] = uncleHashes
})
AfterEach(func() {
eth.TearDownDB(db)
})
Describe("BlockNumber", func() {
It("Retrieves the head block number", func() {
bn := api.BlockNumber()
ubn := (uint64)(bn)
subn := strconv.FormatUint(ubn, 10)
Expect(subn).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockNumber))
})
})
Describe("GetTransactionByHash", func() {
It("Retrieves the head block number", func() {
hash := mocks.MockTransactions[0].Hash()
tx, err := api.GetTransactionByHash(context.Background(), hash)
Expect(err).ToNot(HaveOccurred())
Expect(tx).To(Equal(expectedTransaction))
})
})
Describe("GetBlockByNumber", func() {
It("Retrieves a block by number", func() {
// without full txs
number, err := strconv.ParseInt(mocks.MockCIDPayload.HeaderCID.BlockNumber, 10, 64)
Expect(err).ToNot(HaveOccurred())
block, err := api.GetBlockByNumber(context.Background(), rpc.BlockNumber(number), false)
Expect(err).ToNot(HaveOccurred())
transactionHashes := make([]interface{}, len(mocks.MockBlock.Transactions()))
for i, trx := range mocks.MockBlock.Transactions() {
transactionHashes[i] = trx.Hash()
}
expectedBlock["transactions"] = transactionHashes
for key, val := range expectedBlock {
Expect(val).To(Equal(block[key]))
}
// with full txs
block, err = api.GetBlockByNumber(context.Background(), rpc.BlockNumber(number), true)
Expect(err).ToNot(HaveOccurred())
transactions := make([]interface{}, len(mocks.MockBlock.Transactions()))
for i, trx := range mocks.MockBlock.Transactions() {
transactions[i] = eth.NewRPCTransactionFromBlockHash(mocks.MockBlock, trx.Hash())
}
expectedBlock["transactions"] = transactions
for key, val := range expectedBlock {
Expect(val).To(Equal(block[key]))
}
})
})
Describe("GetHeaderByNumber", func() {
It("Retrieves a header by number", func() {
number, err := strconv.ParseInt(mocks.MockCIDPayload.HeaderCID.BlockNumber, 10, 64)
Expect(err).ToNot(HaveOccurred())
header, err := api.GetHeaderByNumber(context.Background(), rpc.BlockNumber(number))
Expect(header).To(Equal(expectedHeader))
})
})
Describe("GetBlockByHash", func() {
It("Retrieves a block by hash", func() {
// without full txs
block, err := api.GetBlockByHash(context.Background(), mocks.MockBlock.Hash(), false)
Expect(err).ToNot(HaveOccurred())
transactionHashes := make([]interface{}, len(mocks.MockBlock.Transactions()))
for i, trx := range mocks.MockBlock.Transactions() {
transactionHashes[i] = trx.Hash()
}
expectedBlock["transactions"] = transactionHashes
for key, val := range expectedBlock {
Expect(val).To(Equal(block[key]))
}
// with full txs
block, err = api.GetBlockByHash(context.Background(), mocks.MockBlock.Hash(), true)
Expect(err).ToNot(HaveOccurred())
transactions := make([]interface{}, len(mocks.MockBlock.Transactions()))
for i, trx := range mocks.MockBlock.Transactions() {
transactions[i] = eth.NewRPCTransactionFromBlockHash(mocks.MockBlock, trx.Hash())
}
expectedBlock["transactions"] = transactions
for key, val := range expectedBlock {
Expect(val).To(Equal(block[key]))
}
})
})
Describe("GetLogs", func() {
It("Retrieves receipt logs that match the provided topcis within the provided range", func() {
crit := ethereum.FilterQuery{
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err := api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
common.HexToHash("0x05"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
common.HexToHash("0x06"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
},
{
common.HexToHash("0x07"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
},
{
common.HexToHash("0x06"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{
{
common.HexToHash("0x05"),
},
{
common.HexToHash("0x07"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{
{
common.HexToHash("0x05"),
},
{
common.HexToHash("0x06"),
common.HexToHash("0x07"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
common.HexToHash("0x05"),
},
{
common.HexToHash("0x06"),
common.HexToHash("0x07"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{
{},
{
common.HexToHash("0x07"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{
{},
{
common.HexToHash("0x06"),
},
},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
crit = ethereum.FilterQuery{
Topics: [][]common.Hash{},
FromBlock: mocks.MockBlock.Number(),
ToBlock: mocks.MockBlock.Number(),
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
})
It("Uses the provided blockhash if one is provided", func() {
hash := mocks.MockBlock.Hash()
crit := ethereum.FilterQuery{
BlockHash: &hash,
Topics: [][]common.Hash{
{},
{
common.HexToHash("0x06"),
},
},
}
logs, err := api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
crit = ethereum.FilterQuery{
BlockHash: &hash,
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
},
{
common.HexToHash("0x06"),
},
},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
crit = ethereum.FilterQuery{
BlockHash: &hash,
Topics: [][]common.Hash{
{},
{
common.HexToHash("0x07"),
},
},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
crit = ethereum.FilterQuery{
BlockHash: &hash,
Topics: [][]common.Hash{
{
common.HexToHash("0x05"),
},
{
common.HexToHash("0x07"),
},
},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
crit = ethereum.FilterQuery{
BlockHash: &hash,
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
},
{
common.HexToHash("0x07"),
},
},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0))
crit = ethereum.FilterQuery{
BlockHash: &hash,
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
common.HexToHash("0x05"),
},
{
common.HexToHash("0x07"),
},
},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
crit = ethereum.FilterQuery{
BlockHash: &hash,
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
common.HexToHash("0x05"),
},
},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
crit = ethereum.FilterQuery{
BlockHash: &hash,
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
common.HexToHash("0x05"),
},
{
common.HexToHash("0x06"),
common.HexToHash("0x07"),
},
},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
crit = ethereum.FilterQuery{
BlockHash: &hash,
Topics: [][]common.Hash{},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
})
It("Filters on contract address if any are provided", func() {
hash := mocks.MockBlock.Hash()
crit := ethereum.FilterQuery{
BlockHash: &hash,
Addresses: []common.Address{
mocks.Address,
},
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
common.HexToHash("0x05"),
},
{
common.HexToHash("0x06"),
common.HexToHash("0x07"),
},
},
}
logs, err := api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
hash = mocks.MockBlock.Hash()
crit = ethereum.FilterQuery{
BlockHash: &hash,
Addresses: []common.Address{
mocks.Address,
mocks.AnotherAddress,
},
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
common.HexToHash("0x05"),
},
{
common.HexToHash("0x06"),
common.HexToHash("0x07"),
},
},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
hash = mocks.MockBlock.Hash()
crit = ethereum.FilterQuery{
BlockHash: &hash,
Addresses: []common.Address{
mocks.Address,
mocks.AnotherAddress,
},
}
logs, err = api.GetLogs(context.Background(), crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
})
})
})

View File

@ -93,19 +93,19 @@ func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumbe
// Decode the first header at this block height and return it // Decode the first header at this block height and return it
// We throw an error in FetchHeaders() if the number of headers does not match the number of CIDs and we already // We throw an error in FetchHeaders() if the number of headers does not match the number of CIDs and we already
// confirmed the number of CIDs is greater than 0 so there is no need to bound check the slice before accessing // confirmed the number of CIDs is greater than 0 so there is no need to bound check the slice before accessing
header := new(types.Header) var header types.Header
if err := rlp.DecodeBytes(headerIPLDs[0], header); err != nil { if err := rlp.DecodeBytes(headerIPLDs[0].Data, &header); err != nil {
return nil, err return nil, err
} }
return header, nil return &header, nil
} }
// GetTd retrieves and returns the total difficulty at the given block hash // GetTd retrieves and returns the total difficulty at the given block hash
func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) { func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) {
pgStr := `SELECT header_cids.td FROM header_cids pgStr := `SELECT td FROM eth.header_cids
WHERE header_cids.block_hash = $1` WHERE header_cids.block_hash = $1`
var tdStr string var tdStr string
err := b.DB.Select(&tdStr, pgStr, blockHash.String()) err := b.DB.Get(&tdStr, pgStr, blockHash.String())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -142,7 +142,7 @@ func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log
logs := make([][]*types.Log, len(receiptIPLDs)) logs := make([][]*types.Log, len(receiptIPLDs))
for i, rctIPLD := range receiptIPLDs { for i, rctIPLD := range receiptIPLDs {
var rct types.Receipt var rct types.Receipt
if err := rlp.DecodeBytes(rctIPLD, &rct); err != nil { if err := rlp.DecodeBytes(rctIPLD.Data, &rct); err != nil {
return nil, err return nil, err
} }
logs[i] = rct.Logs logs[i] = rct.Logs
@ -170,13 +170,14 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Fetch and decode the header IPLD // Fetch and decode the header IPLD
headerIPLDs, err := b.Fetcher.FetchHeaders([]HeaderModel{headerCID}) headerIPLDs, err := b.Fetcher.FetchHeaders([]HeaderModel{headerCID})
if err != nil { if err != nil {
return nil, err return nil, err
} }
var header *types.Header var header types.Header
if err := rlp.DecodeBytes(headerIPLDs[0], header); err != nil { if err := rlp.DecodeBytes(headerIPLDs[0].Data, &header); err != nil {
return nil, err return nil, err
} }
// Fetch and decode the uncle IPLDs // Fetch and decode the uncle IPLDs
@ -186,11 +187,11 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
} }
var uncles []*types.Header var uncles []*types.Header
for _, uncleIPLD := range uncleIPLDs { for _, uncleIPLD := range uncleIPLDs {
var uncle *types.Header var uncle types.Header
if err := rlp.DecodeBytes(uncleIPLD, uncle); err != nil { if err := rlp.DecodeBytes(uncleIPLD.Data, &uncle); err != nil {
return nil, err return nil, err
} }
uncles = append(uncles, uncle) uncles = append(uncles, &uncle)
} }
// Fetch and decode the transaction IPLDs // Fetch and decode the transaction IPLDs
txIPLDs, err := b.Fetcher.FetchTrxs(txCIDs) txIPLDs, err := b.Fetcher.FetchTrxs(txCIDs)
@ -199,11 +200,11 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
} }
var transactions []*types.Transaction var transactions []*types.Transaction
for _, txIPLD := range txIPLDs { for _, txIPLD := range txIPLDs {
var tx *types.Transaction var tx types.Transaction
if err := rlp.DecodeBytes(txIPLD, tx); err != nil { if err := rlp.DecodeBytes(txIPLD.Data, &tx); err != nil {
return nil, err return nil, err
} }
transactions = append(transactions, tx) transactions = append(transactions, &tx)
} }
// Fetch and decode the receipt IPLDs // Fetch and decode the receipt IPLDs
rctIPLDs, err := b.Fetcher.FetchRcts(rctCIDs) rctIPLDs, err := b.Fetcher.FetchRcts(rctCIDs)
@ -212,14 +213,14 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
} }
var receipts []*types.Receipt var receipts []*types.Receipt
for _, rctIPLD := range rctIPLDs { for _, rctIPLD := range rctIPLDs {
var receipt *types.Receipt var receipt types.Receipt
if err := rlp.DecodeBytes(rctIPLD, receipt); err != nil { if err := rlp.DecodeBytes(rctIPLD.Data, &receipt); err != nil {
return nil, err return nil, err
} }
receipts = append(receipts, receipt) receipts = append(receipts, &receipt)
} }
// Compose everything together into a complete block // Compose everything together into a complete block
return types.NewBlock(header, transactions, uncles, receipts), nil return types.NewBlock(&header, transactions, uncles, receipts), nil
} }
// BlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full // BlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
@ -235,8 +236,8 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
if err != nil { if err != nil {
return nil, err return nil, err
} }
var header *types.Header var header types.Header
if err := rlp.DecodeBytes(headerIPLDs[0], header); err != nil { if err := rlp.DecodeBytes(headerIPLDs[0].Data, &header); err != nil {
return nil, err return nil, err
} }
// Fetch and decode the uncle IPLDs // Fetch and decode the uncle IPLDs
@ -246,11 +247,11 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
} }
var uncles []*types.Header var uncles []*types.Header
for _, uncleIPLD := range uncleIPLDs { for _, uncleIPLD := range uncleIPLDs {
var uncle *types.Header var uncle types.Header
if err := rlp.DecodeBytes(uncleIPLD, uncle); err != nil { if err := rlp.DecodeBytes(uncleIPLD.Data, &uncle); err != nil {
return nil, err return nil, err
} }
uncles = append(uncles, uncle) uncles = append(uncles, &uncle)
} }
// Fetch and decode the transaction IPLDs // Fetch and decode the transaction IPLDs
txIPLDs, err := b.Fetcher.FetchTrxs(txCIDs) txIPLDs, err := b.Fetcher.FetchTrxs(txCIDs)
@ -259,11 +260,11 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
} }
var transactions []*types.Transaction var transactions []*types.Transaction
for _, txIPLD := range txIPLDs { for _, txIPLD := range txIPLDs {
var tx *types.Transaction var tx types.Transaction
if err := rlp.DecodeBytes(txIPLD, tx); err != nil { if err := rlp.DecodeBytes(txIPLD.Data, &tx); err != nil {
return nil, err return nil, err
} }
transactions = append(transactions, tx) transactions = append(transactions, &tx)
} }
// Fetch and decode the receipt IPLDs // Fetch and decode the receipt IPLDs
rctIPLDs, err := b.Fetcher.FetchRcts(rctCIDs) rctIPLDs, err := b.Fetcher.FetchRcts(rctCIDs)
@ -272,21 +273,21 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
} }
var receipts []*types.Receipt var receipts []*types.Receipt
for _, rctIPLD := range rctIPLDs { for _, rctIPLD := range rctIPLDs {
var receipt *types.Receipt var receipt types.Receipt
if err := rlp.DecodeBytes(rctIPLD, receipt); err != nil { if err := rlp.DecodeBytes(rctIPLD.Data, &receipt); err != nil {
return nil, err return nil, err
} }
receipts = append(receipts, receipt) receipts = append(receipts, &receipt)
} }
// Compose everything together into a complete block // Compose everything together into a complete block
return types.NewBlock(header, transactions, uncles, receipts), nil return types.NewBlock(&header, transactions, uncles, receipts), nil
} }
// GetTransaction retrieves a tx by hash // GetTransaction retrieves a tx by hash
// It also returns the blockhash, blocknumber, and tx index associated with the transaction // It also returns the blockhash, blocknumber, and tx index associated with the transaction
func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
pgStr := `SELECT transaction_cids.cid, transaction_cids.index, header_cids.block_hash, header_cids.block_number pgStr := `SELECT transaction_cids.cid, transaction_cids.index, header_cids.block_hash, header_cids.block_number
FROM transaction_cids, header_cids FROM eth.transaction_cids, eth.header_cids
WHERE transaction_cids.header_id = header_cids.id WHERE transaction_cids.header_id = header_cids.id
AND transaction_cids.tx_hash = $1` AND transaction_cids.tx_hash = $1`
var txCIDWithHeaderInfo struct { var txCIDWithHeaderInfo struct {
@ -302,9 +303,9 @@ func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*type
if err != nil { if err != nil {
return nil, common.Hash{}, 0, 0, err return nil, common.Hash{}, 0, 0, err
} }
var transaction *types.Transaction var transaction types.Transaction
if err := rlp.DecodeBytes(txIPLD[0], transaction); err != nil { if err := rlp.DecodeBytes(txIPLD[0].Data, &transaction); err != nil {
return nil, common.Hash{}, 0, 0, err return nil, common.Hash{}, 0, 0, err
} }
return transaction, common.HexToHash(txCIDWithHeaderInfo.BlockHash), uint64(txCIDWithHeaderInfo.BlockNumber), uint64(txCIDWithHeaderInfo.Index), nil return &transaction, common.HexToHash(txCIDWithHeaderInfo.BlockHash), uint64(txCIDWithHeaderInfo.BlockNumber), uint64(txCIDWithHeaderInfo.Index), nil
} }

View File

@ -24,7 +24,10 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/multiformats/go-multihash"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared" "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
) )
@ -80,15 +83,29 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
if err != nil { if err != nil {
return err return err
} }
response.Headers = append(response.Headers, headerRLP) cid, err := ipld.RawdataToCid(ipld.MEthHeader, headerRLP, multihash.KECCAK_256)
if err != nil {
return err
}
response.Headers = append(response.Headers, ipfs.BlockModel{
Data: headerRLP,
CID: cid.String(),
})
if headerFilter.Uncles { if headerFilter.Uncles {
response.Uncles = make([][]byte, len(payload.Block.Body().Uncles)) response.Uncles = make([]ipfs.BlockModel, len(payload.Block.Body().Uncles))
for i, uncle := range payload.Block.Body().Uncles { for i, uncle := range payload.Block.Body().Uncles {
uncleRlp, err := rlp.EncodeToBytes(uncle) uncleRlp, err := rlp.EncodeToBytes(uncle)
if err != nil { if err != nil {
return err return err
} }
response.Uncles[i] = uncleRlp cid, err := ipld.RawdataToCid(ipld.MEthHeader, uncleRlp, multihash.KECCAK_256)
if err != nil {
return err
}
response.Uncles[i] = ipfs.BlockModel{
Data: uncleRlp,
CID: cid.String(),
}
} }
} }
} }
@ -107,14 +124,22 @@ func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLD
if !trxFilter.Off { if !trxFilter.Off {
trxLen := len(payload.Block.Body().Transactions) trxLen := len(payload.Block.Body().Transactions)
trxHashes = make([]common.Hash, 0, trxLen) trxHashes = make([]common.Hash, 0, trxLen)
response.Transactions = make([][]byte, 0, trxLen) response.Transactions = make([]ipfs.BlockModel, 0, trxLen)
for i, trx := range payload.Block.Body().Transactions { for i, trx := range payload.Block.Body().Transactions {
if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) { if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) {
trxBuffer := new(bytes.Buffer) trxBuffer := new(bytes.Buffer)
if err := trx.EncodeRLP(trxBuffer); err != nil { if err := trx.EncodeRLP(trxBuffer); err != nil {
return nil, err return nil, err
} }
response.Transactions = append(response.Transactions, trxBuffer.Bytes()) data := trxBuffer.Bytes()
cid, err := ipld.RawdataToCid(ipld.MEthTx, data, multihash.KECCAK_256)
if err != nil {
return nil, err
}
response.Transactions = append(response.Transactions, ipfs.BlockModel{
Data: data,
CID: cid.String(),
})
trxHashes = append(trxHashes, trx.Hash()) trxHashes = append(trxHashes, trx.Hash())
} }
} }
@ -143,17 +168,24 @@ func checkTransactionAddrs(wantedSrc, wantedDst []string, actualSrc, actualDst s
func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload ConvertedPayload, trxHashes []common.Hash) error { func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload ConvertedPayload, trxHashes []common.Hash) error {
if !receiptFilter.Off { if !receiptFilter.Off {
response.Receipts = make([][]byte, 0, len(payload.Receipts)) response.Receipts = make([]ipfs.BlockModel, 0, len(payload.Receipts))
for i, receipt := range payload.Receipts { for i, receipt := range payload.Receipts {
// topics is always length 4 // topics is always length 4
topics := [][]string{payload.ReceiptMetaData[i].Topic0s, payload.ReceiptMetaData[i].Topic1s, payload.ReceiptMetaData[i].Topic2s, payload.ReceiptMetaData[i].Topic3s} topics := [][]string{payload.ReceiptMetaData[i].Topic0s, payload.ReceiptMetaData[i].Topic1s, payload.ReceiptMetaData[i].Topic2s, payload.ReceiptMetaData[i].Topic3s}
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.Contracts, payload.ReceiptMetaData[i].Contract, trxHashes) { if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.Contracts, payload.ReceiptMetaData[i].Contract, trxHashes) {
receiptForStorage := (*types.ReceiptForStorage)(receipt)
receiptBuffer := new(bytes.Buffer) receiptBuffer := new(bytes.Buffer)
if err := receiptForStorage.EncodeRLP(receiptBuffer); err != nil { if err := receipt.EncodeRLP(receiptBuffer); err != nil {
return err return err
} }
response.Receipts = append(response.Receipts, receiptBuffer.Bytes()) data := receiptBuffer.Bytes()
cid, err := ipld.RawdataToCid(ipld.MEthTxReceipt, data, multihash.KECCAK_256)
if err != nil {
return err
}
response.Receipts = append(response.Receipts, ipfs.BlockModel{
Data: data,
CID: cid.String(),
})
} }
} }
} }
@ -231,10 +263,17 @@ func (s *ResponseFilterer) filterState(stateFilter StateFilter, response *IPLDs,
for _, stateNode := range payload.StateNodes { for _, stateNode := range payload.StateNodes {
if checkNodeKeys(keyFilters, stateNode.Key) { if checkNodeKeys(keyFilters, stateNode.Key) {
if stateNode.Leaf || stateFilter.IntermediateNodes { if stateNode.Leaf || stateFilter.IntermediateNodes {
cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.Value, multihash.KECCAK_256)
if err != nil {
return err
}
response.StateNodes = append(response.StateNodes, StateNode{ response.StateNodes = append(response.StateNodes, StateNode{
StateTrieKey: stateNode.Key, StateTrieKey: stateNode.Key,
IPLD: stateNode.Value, IPLD: ipfs.BlockModel{
Leaf: stateNode.Leaf, Data: stateNode.Value,
CID: cid.String(),
},
Leaf: stateNode.Leaf,
}) })
} }
} }
@ -271,11 +310,18 @@ func (s *ResponseFilterer) filterStorage(storageFilter StorageFilter, response *
if checkNodeKeys(stateKeyFilters, stateKey) { if checkNodeKeys(stateKeyFilters, stateKey) {
for _, storageNode := range storageNodes { for _, storageNode := range storageNodes {
if checkNodeKeys(storageKeyFilters, storageNode.Key) { if checkNodeKeys(storageKeyFilters, storageNode.Key) {
cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.Value, multihash.KECCAK_256)
if err != nil {
return err
}
response.StorageNodes = append(response.StorageNodes, StorageNode{ response.StorageNodes = append(response.StorageNodes, StorageNode{
StateTrieKey: stateKey, StateTrieKey: stateKey,
StorageTrieKey: storageNode.Key, StorageTrieKey: storageNode.Key,
IPLD: storageNode.Value, IPLD: ipfs.BlockModel{
Leaf: storageNode.Leaf, Data: storageNode.Value,
CID: cid.String(),
},
Leaf: storageNode.Leaf,
}) })
} }
} }

View File

@ -17,9 +17,8 @@
package eth_test package eth_test
import ( import (
"bytes" "github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -29,17 +28,13 @@ import (
) )
var ( var (
filterer *eth.ResponseFilterer filterer *eth.ResponseFilterer
expectedRctForStorageRLP1 []byte
expectedRctForStorageRLP2 []byte
) )
var _ = Describe("Filterer", func() { var _ = Describe("Filterer", func() {
Describe("FilterResponse", func() { Describe("FilterResponse", func() {
BeforeEach(func() { BeforeEach(func() {
filterer = eth.NewResponseFilterer() filterer = eth.NewResponseFilterer()
expectedRctForStorageRLP1 = getReceiptForStorageRLP(mocks.MockReceipts, 0)
expectedRctForStorageRLP2 = getReceiptForStorageRLP(mocks.MockReceipts, 1)
}) })
It("Transcribes all the data from the IPLDPayload into the StreamPayload if given an open filter", func() { It("Transcribes all the data from the IPLDPayload into the StreamPayload if given an open filter", func() {
@ -49,22 +44,28 @@ var _ = Describe("Filterer", func() {
Expect(ok).To(BeTrue()) Expect(ok).To(BeTrue())
Expect(iplds.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64())) Expect(iplds.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
Expect(iplds.Headers).To(Equal(mocks.MockIPLDs.Headers)) Expect(iplds.Headers).To(Equal(mocks.MockIPLDs.Headers))
var unclesRlp [][]byte var expectedEmptyUncles []ipfs.BlockModel
Expect(iplds.Uncles).To(Equal(unclesRlp)) Expect(iplds.Uncles).To(Equal(expectedEmptyUncles))
Expect(len(iplds.Transactions)).To(Equal(2)) Expect(len(iplds.Transactions)).To(Equal(2))
Expect(shared.ListContainsBytes(iplds.Transactions, mocks.MockTransactions.GetRlp(0))).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Transactions, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(shared.ListContainsBytes(iplds.Transactions, mocks.MockTransactions.GetRlp(1))).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Transactions, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(iplds.Receipts)).To(Equal(2)) Expect(len(iplds.Receipts)).To(Equal(2))
Expect(shared.ListContainsBytes(iplds.Receipts, expectedRctForStorageRLP1)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Receipts, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
Expect(shared.ListContainsBytes(iplds.Receipts, expectedRctForStorageRLP2)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Receipts, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
Expect(len(iplds.StateNodes)).To(Equal(2)) Expect(len(iplds.StateNodes)).To(Equal(2))
for _, stateNode := range iplds.StateNodes { for _, stateNode := range iplds.StateNodes {
Expect(stateNode.Leaf).To(BeTrue()) Expect(stateNode.Leaf).To(BeTrue())
if stateNode.StateTrieKey == mocks.ContractLeafKey { if stateNode.StateTrieKey == mocks.ContractLeafKey {
Expect(stateNode.IPLD).To(Equal(mocks.ValueBytes)) Expect(stateNode.IPLD).To(Equal(ipfs.BlockModel{
Data: mocks.State1IPLD.RawData(),
CID: mocks.State1IPLD.Cid().String(),
}))
} }
if stateNode.StateTrieKey == mocks.AnotherContractLeafKey { if stateNode.StateTrieKey == mocks.AnotherContractLeafKey {
Expect(stateNode.IPLD).To(Equal(mocks.AnotherValueBytes)) Expect(stateNode.IPLD).To(Equal(ipfs.BlockModel{
Data: mocks.State2IPLD.RawData(),
CID: mocks.State2IPLD.Cid().String(),
}))
} }
} }
Expect(iplds.StorageNodes).To(Equal(mocks.MockIPLDs.StorageNodes)) Expect(iplds.StorageNodes).To(Equal(mocks.MockIPLDs.StorageNodes))
@ -82,7 +83,10 @@ var _ = Describe("Filterer", func() {
Expect(len(iplds1.StorageNodes)).To(Equal(0)) Expect(len(iplds1.StorageNodes)).To(Equal(0))
Expect(len(iplds1.StateNodes)).To(Equal(0)) Expect(len(iplds1.StateNodes)).To(Equal(0))
Expect(len(iplds1.Receipts)).To(Equal(1)) Expect(len(iplds1.Receipts)).To(Equal(1))
Expect(iplds1.Receipts[0]).To(Equal(expectedRctForStorageRLP2)) Expect(iplds1.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: mocks.Rct2IPLD.RawData(),
CID: mocks.Rct2IPLD.Cid().String(),
}))
payload2, err := filterer.Filter(rctTopicsFilter, mocks.MockConvertedPayload) payload2, err := filterer.Filter(rctTopicsFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -95,7 +99,10 @@ var _ = Describe("Filterer", func() {
Expect(len(iplds2.StorageNodes)).To(Equal(0)) Expect(len(iplds2.StorageNodes)).To(Equal(0))
Expect(len(iplds2.StateNodes)).To(Equal(0)) Expect(len(iplds2.StateNodes)).To(Equal(0))
Expect(len(iplds2.Receipts)).To(Equal(1)) Expect(len(iplds2.Receipts)).To(Equal(1))
Expect(iplds2.Receipts[0]).To(Equal(expectedRctForStorageRLP1)) Expect(iplds2.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: mocks.Rct1IPLD.RawData(),
CID: mocks.Rct1IPLD.Cid().String(),
}))
payload3, err := filterer.Filter(rctTopicsAndContractFilter, mocks.MockConvertedPayload) payload3, err := filterer.Filter(rctTopicsAndContractFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -108,7 +115,10 @@ var _ = Describe("Filterer", func() {
Expect(len(iplds3.StorageNodes)).To(Equal(0)) Expect(len(iplds3.StorageNodes)).To(Equal(0))
Expect(len(iplds3.StateNodes)).To(Equal(0)) Expect(len(iplds3.StateNodes)).To(Equal(0))
Expect(len(iplds3.Receipts)).To(Equal(1)) Expect(len(iplds3.Receipts)).To(Equal(1))
Expect(iplds3.Receipts[0]).To(Equal(expectedRctForStorageRLP1)) Expect(iplds3.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: mocks.Rct1IPLD.RawData(),
CID: mocks.Rct1IPLD.Cid().String(),
}))
payload4, err := filterer.Filter(rctContractsAndTopicFilter, mocks.MockConvertedPayload) payload4, err := filterer.Filter(rctContractsAndTopicFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -121,7 +131,10 @@ var _ = Describe("Filterer", func() {
Expect(len(iplds4.StorageNodes)).To(Equal(0)) Expect(len(iplds4.StorageNodes)).To(Equal(0))
Expect(len(iplds4.StateNodes)).To(Equal(0)) Expect(len(iplds4.StateNodes)).To(Equal(0))
Expect(len(iplds4.Receipts)).To(Equal(1)) Expect(len(iplds4.Receipts)).To(Equal(1))
Expect(iplds4.Receipts[0]).To(Equal(expectedRctForStorageRLP2)) Expect(iplds4.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: mocks.Rct2IPLD.RawData(),
CID: mocks.Rct2IPLD.Cid().String(),
}))
payload5, err := filterer.Filter(rctsForAllCollectedTrxs, mocks.MockConvertedPayload) payload5, err := filterer.Filter(rctsForAllCollectedTrxs, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -131,13 +144,13 @@ var _ = Describe("Filterer", func() {
Expect(len(iplds5.Headers)).To(Equal(0)) Expect(len(iplds5.Headers)).To(Equal(0))
Expect(len(iplds5.Uncles)).To(Equal(0)) Expect(len(iplds5.Uncles)).To(Equal(0))
Expect(len(iplds5.Transactions)).To(Equal(2)) Expect(len(iplds5.Transactions)).To(Equal(2))
Expect(shared.ListContainsBytes(iplds5.Transactions, mocks.MockTransactions.GetRlp(0))).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(shared.ListContainsBytes(iplds5.Transactions, mocks.MockTransactions.GetRlp(1))).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(iplds5.StorageNodes)).To(Equal(0)) Expect(len(iplds5.StorageNodes)).To(Equal(0))
Expect(len(iplds5.StateNodes)).To(Equal(0)) Expect(len(iplds5.StateNodes)).To(Equal(0))
Expect(len(iplds5.Receipts)).To(Equal(2)) Expect(len(iplds5.Receipts)).To(Equal(2))
Expect(shared.ListContainsBytes(iplds5.Receipts, expectedRctForStorageRLP1)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Receipts, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
Expect(shared.ListContainsBytes(iplds5.Receipts, expectedRctForStorageRLP2)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Receipts, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
payload6, err := filterer.Filter(rctsForSelectCollectedTrxs, mocks.MockConvertedPayload) payload6, err := filterer.Filter(rctsForSelectCollectedTrxs, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -147,11 +160,14 @@ var _ = Describe("Filterer", func() {
Expect(len(iplds6.Headers)).To(Equal(0)) Expect(len(iplds6.Headers)).To(Equal(0))
Expect(len(iplds6.Uncles)).To(Equal(0)) Expect(len(iplds6.Uncles)).To(Equal(0))
Expect(len(iplds6.Transactions)).To(Equal(1)) Expect(len(iplds6.Transactions)).To(Equal(1))
Expect(shared.ListContainsBytes(iplds5.Transactions, mocks.MockTransactions.GetRlp(1))).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(iplds6.StorageNodes)).To(Equal(0)) Expect(len(iplds6.StorageNodes)).To(Equal(0))
Expect(len(iplds6.StateNodes)).To(Equal(0)) Expect(len(iplds6.StateNodes)).To(Equal(0))
Expect(len(iplds6.Receipts)).To(Equal(1)) Expect(len(iplds6.Receipts)).To(Equal(1))
Expect(iplds4.Receipts[0]).To(Equal(expectedRctForStorageRLP2)) Expect(iplds4.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: mocks.Rct2IPLD.RawData(),
CID: mocks.Rct2IPLD.Cid().String(),
}))
payload7, err := filterer.Filter(stateFilter, mocks.MockConvertedPayload) payload7, err := filterer.Filter(stateFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -165,7 +181,10 @@ var _ = Describe("Filterer", func() {
Expect(len(iplds7.Receipts)).To(Equal(0)) Expect(len(iplds7.Receipts)).To(Equal(0))
Expect(len(iplds7.StateNodes)).To(Equal(1)) Expect(len(iplds7.StateNodes)).To(Equal(1))
Expect(iplds7.StateNodes[0].StateTrieKey).To(Equal(mocks.ContractLeafKey)) Expect(iplds7.StateNodes[0].StateTrieKey).To(Equal(mocks.ContractLeafKey))
Expect(iplds7.StateNodes[0].IPLD).To(Equal(mocks.ValueBytes)) Expect(iplds7.StateNodes[0].IPLD).To(Equal(ipfs.BlockModel{
Data: mocks.State1IPLD.RawData(),
CID: mocks.State1IPLD.Cid().String(),
}))
payload8, err := filterer.Filter(rctTopicsAndContractFilterFail, mocks.MockConvertedPayload) payload8, err := filterer.Filter(rctTopicsAndContractFilterFail, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -181,11 +200,3 @@ var _ = Describe("Filterer", func() {
}) })
}) })
}) })
func getReceiptForStorageRLP(receipts types.Receipts, i int) []byte {
receiptForStorage := (*types.ReceiptForStorage)(receipts[i])
receiptBuffer := new(bytes.Buffer)
err := receiptForStorage.EncodeRLP(receiptBuffer)
Expect(err).ToNot(HaveOccurred())
return receiptBuffer.Bytes()
}

View File

@ -82,16 +82,16 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel, nodeID int64) (int64, error) { func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel, nodeID int64) (int64, error) {
var headerID int64 var headerID int64
err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id) VALUES ($1, $2, $3, $4, $5, $6) err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id) = ($3, $4, $5, $6) ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward) = ($3, $4, $5, $6, $7)
RETURNING id`, RETURNING id`,
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, nodeID).Scan(&headerID) header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, nodeID, header.Reward).Scan(&headerID)
return headerID, err return headerID, err
} }
func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle UncleModel, headerID int64) error { func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle UncleModel, headerID int64) error {
_, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid) VALUES ($1, $2, $3, $4) _, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid) = ($3, $4)`, ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward) = ($3, $4, $5)`,
uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID) uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID)
return err return err
} }

View File

@ -45,18 +45,20 @@ var _ = Describe("Indexer", func() {
It("Indexes CIDs and related metadata into vulcanizedb", func() { It("Indexes CIDs and related metadata into vulcanizedb", func() {
err = repo.Index(mocks.MockCIDPayload) err = repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
pgStr := `SELECT cid, td FROM eth.header_cids pgStr := `SELECT cid, td, reward FROM eth.header_cids
WHERE block_number = $1` WHERE block_number = $1`
// check header was properly indexed // check header was properly indexed
type res struct { type res struct {
CID string CID string
TD string TD string
Reward string
} }
headers := new(res) headers := new(res)
err = db.QueryRowx(pgStr, 1).StructScan(headers) err = db.QueryRowx(pgStr, 1).StructScan(headers)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(headers.CID).To(Equal("mockHeaderCID")) Expect(headers.CID).To(Equal(mocks.HeaderCID.String()))
Expect(headers.TD).To(Equal("1337")) Expect(headers.TD).To(Equal(mocks.MockBlock.Difficulty().String()))
Expect(headers.Reward).To(Equal("5000000000000000000"))
// check trxs were properly indexed // check trxs were properly indexed
trxs := make([]string, 0) trxs := make([]string, 0)
pgStr = `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id) pgStr = `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
@ -64,8 +66,8 @@ var _ = Describe("Indexer", func() {
err = db.Select(&trxs, pgStr, 1) err = db.Select(&trxs, pgStr, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(2)) Expect(len(trxs)).To(Equal(2))
Expect(shared.ListContainsString(trxs, "mockTrxCID1")).To(BeTrue()) Expect(shared.ListContainsString(trxs, mocks.Trx1CID.String())).To(BeTrue())
Expect(shared.ListContainsString(trxs, "mockTrxCID2")).To(BeTrue()) Expect(shared.ListContainsString(trxs, mocks.Trx2CID.String())).To(BeTrue())
// check receipts were properly indexed // check receipts were properly indexed
rcts := make([]string, 0) rcts := make([]string, 0)
pgStr = `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids pgStr = `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
@ -75,8 +77,8 @@ var _ = Describe("Indexer", func() {
err = db.Select(&rcts, pgStr, 1) err = db.Select(&rcts, pgStr, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(rcts)).To(Equal(2)) Expect(len(rcts)).To(Equal(2))
Expect(shared.ListContainsString(rcts, "mockRctCID1")).To(BeTrue()) Expect(shared.ListContainsString(rcts, mocks.Rct1CID.String())).To(BeTrue())
Expect(shared.ListContainsString(rcts, "mockRctCID2")).To(BeTrue()) Expect(shared.ListContainsString(rcts, mocks.Rct2CID.String())).To(BeTrue())
// check that state nodes were properly indexed // check that state nodes were properly indexed
stateNodes := make([]eth.StateNodeModel, 0) stateNodes := make([]eth.StateNodeModel, 0)
pgStr = `SELECT state_cids.cid, state_cids.state_key, state_cids.leaf FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id) pgStr = `SELECT state_cids.cid, state_cids.state_key, state_cids.leaf FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
@ -85,11 +87,11 @@ var _ = Describe("Indexer", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(stateNodes)).To(Equal(2)) Expect(len(stateNodes)).To(Equal(2))
for _, stateNode := range stateNodes { for _, stateNode := range stateNodes {
if stateNode.CID == "mockStateCID1" { if stateNode.CID == mocks.State1CID.String() {
Expect(stateNode.Leaf).To(Equal(true)) Expect(stateNode.Leaf).To(Equal(true))
Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex())) Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex()))
} }
if stateNode.CID == "mockStateCID2" { if stateNode.CID == mocks.State2CID.String() {
Expect(stateNode.Leaf).To(Equal(true)) Expect(stateNode.Leaf).To(Equal(true))
Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex())) Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex()))
} }
@ -104,7 +106,7 @@ var _ = Describe("Indexer", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(storageNodes)).To(Equal(1)) Expect(len(storageNodes)).To(Equal(1))
Expect(storageNodes[0]).To(Equal(eth.StorageNodeWithStateKeyModel{ Expect(storageNodes[0]).To(Equal(eth.StorageNodeWithStateKeyModel{
CID: "mockStorageCID", CID: mocks.StorageCID.String(),
Leaf: true, Leaf: true,
StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001", StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
StateKey: mocks.ContractLeafKey.Hex(), StateKey: mocks.ContractLeafKey.Hex(),

View File

@ -90,7 +90,7 @@ func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
// FetchHeaders fetches headers // FetchHeaders fetches headers
// It uses the f.fetchBatch method // It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([][]byte, error) { func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching header iplds") log.Debug("fetching header iplds")
headerCids := make([]cid.Cid, len(cids)) headerCids := make([]cid.Cid, len(cids))
for i, c := range cids { for i, c := range cids {
@ -101,20 +101,23 @@ func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([][]byte, error) {
headerCids[i] = dc headerCids[i] = dc
} }
headers := f.fetchBatch(headerCids) headers := f.fetchBatch(headerCids)
headersRLP := make([][]byte, len(headers)) headerIPLDs := make([]ipfs.BlockModel, len(headers))
for i, header := range headers { for i, header := range headers {
headersRLP[i] = header.RawData() headerIPLDs[i] = ipfs.BlockModel{
Data: header.RawData(),
CID: header.Cid().String(),
}
} }
if len(headersRLP) != len(headerCids) { if len(headerIPLDs) != len(headerCids) {
log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(headers), len(headerCids)) log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(headers), len(headerCids))
return headersRLP, errUnexpectedNumberOfIPLDs return headerIPLDs, errUnexpectedNumberOfIPLDs
} }
return headersRLP, nil return headerIPLDs, nil
} }
// FetchUncles fetches uncles // FetchUncles fetches uncles
// It uses the f.fetchBatch method // It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchUncles(cids []UncleModel) ([][]byte, error) { func (f *IPLDFetcher) FetchUncles(cids []UncleModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching uncle iplds") log.Debug("fetching uncle iplds")
uncleCids := make([]cid.Cid, len(cids)) uncleCids := make([]cid.Cid, len(cids))
for i, c := range cids { for i, c := range cids {
@ -125,20 +128,23 @@ func (f *IPLDFetcher) FetchUncles(cids []UncleModel) ([][]byte, error) {
uncleCids[i] = dc uncleCids[i] = dc
} }
uncles := f.fetchBatch(uncleCids) uncles := f.fetchBatch(uncleCids)
unclesRLP := make([][]byte, len(uncles)) uncleIPLDs := make([]ipfs.BlockModel, len(uncles))
for i, uncle := range uncles { for i, uncle := range uncles {
unclesRLP[i] = uncle.RawData() uncleIPLDs[i] = ipfs.BlockModel{
Data: uncle.RawData(),
CID: uncle.Cid().String(),
}
} }
if len(unclesRLP) != len(uncleCids) { if len(uncleIPLDs) != len(uncleCids) {
log.Errorf("ipfs fetcher: number of uncle blocks returned (%d) does not match number expected (%d)", len(uncles), len(uncleCids)) log.Errorf("ipfs fetcher: number of uncle blocks returned (%d) does not match number expected (%d)", len(uncles), len(uncleCids))
return unclesRLP, errUnexpectedNumberOfIPLDs return uncleIPLDs, errUnexpectedNumberOfIPLDs
} }
return unclesRLP, nil return uncleIPLDs, nil
} }
// FetchTrxs fetches transactions // FetchTrxs fetches transactions
// It uses the f.fetchBatch method // It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([][]byte, error) { func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching transaction iplds") log.Debug("fetching transaction iplds")
trxCids := make([]cid.Cid, len(cids)) trxCids := make([]cid.Cid, len(cids))
for i, c := range cids { for i, c := range cids {
@ -149,20 +155,24 @@ func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([][]byte, error) {
trxCids[i] = dc trxCids[i] = dc
} }
trxs := f.fetchBatch(trxCids) trxs := f.fetchBatch(trxCids)
trxsRLP := make([][]byte, len(trxs)) trxIPLDs := make([]ipfs.BlockModel, len(trxs))
for i, trx := range trxs { for i, trx := range trxs {
trxsRLP[i] = trx.RawData() trxIPLDs[i] = ipfs.BlockModel{
Data: trx.RawData(),
CID: trx.Cid().String(),
}
} }
if len(trxsRLP) != len(trxCids) { if len(trxIPLDs) != len(trxCids) {
log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids)) log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
return trxsRLP, errUnexpectedNumberOfIPLDs return trxIPLDs, errUnexpectedNumberOfIPLDs
} }
return trxsRLP, nil return trxIPLDs, nil
} }
// FetchRcts fetches receipts // FetchRcts fetches receipts
// It uses the f.fetchBatch method // It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([][]byte, error) { // batch fetch preserves order?
func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching receipt iplds") log.Debug("fetching receipt iplds")
rctCids := make([]cid.Cid, len(cids)) rctCids := make([]cid.Cid, len(cids))
for i, c := range cids { for i, c := range cids {
@ -173,15 +183,18 @@ func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([][]byte, error) {
rctCids[i] = dc rctCids[i] = dc
} }
rcts := f.fetchBatch(rctCids) rcts := f.fetchBatch(rctCids)
rctsRLP := make([][]byte, len(rcts)) rctIPLDs := make([]ipfs.BlockModel, len(rcts))
for i, rct := range rcts { for i, rct := range rcts {
rctsRLP[i] = rct.RawData() rctIPLDs[i] = ipfs.BlockModel{
Data: rct.RawData(),
CID: rct.Cid().String(),
}
} }
if len(rctsRLP) != len(rctCids) { if len(rctIPLDs) != len(rctCids) {
log.Errorf("ipfs fetcher: number of receipt blocks returned (%d) does not match number expected (%d)", len(rcts), len(rctCids)) log.Errorf("ipfs fetcher: number of receipt blocks returned (%d) does not match number expected (%d)", len(rcts), len(rctCids))
return rctsRLP, errUnexpectedNumberOfIPLDs return rctIPLDs, errUnexpectedNumberOfIPLDs
} }
return rctsRLP, nil return rctIPLDs, nil
} }
// FetchState fetches state nodes // FetchState fetches state nodes
@ -203,7 +216,10 @@ func (f *IPLDFetcher) FetchState(cids []StateNodeModel) ([]StateNode, error) {
return nil, err return nil, err
} }
stateNodes[i] = StateNode{ stateNodes[i] = StateNode{
IPLD: state.RawData(), IPLD: ipfs.BlockModel{
Data: state.RawData(),
CID: state.Cid().String(),
},
StateTrieKey: common.HexToHash(stateNode.StateKey), StateTrieKey: common.HexToHash(stateNode.StateKey),
Leaf: stateNode.Leaf, Leaf: stateNode.Leaf,
} }
@ -230,7 +246,10 @@ func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) ([]Stora
return nil, err return nil, err
} }
storageNodes[i] = StorageNode{ storageNodes[i] = StorageNode{
IPLD: storage.RawData(), IPLD: ipfs.BlockModel{
Data: storage.RawData(),
CID: storage.Cid().String(),
},
StateTrieKey: common.HexToHash(storageNode.StateKey), StateTrieKey: common.HexToHash(storageNode.StateKey),
StorageTrieKey: common.HexToHash(storageNode.StorageKey), StorageTrieKey: common.HexToHash(storageNode.StorageKey),
Leaf: storageNode.Leaf, Leaf: storageNode.Leaf,

View File

@ -20,6 +20,8 @@ import (
"bytes" "bytes"
"math/big" "math/big"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-block-format" "github.com/ipfs/go-block-format"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -106,26 +108,47 @@ var _ = Describe("Fetcher", func() {
Expect(ok).To(BeTrue()) Expect(ok).To(BeTrue())
Expect(iplds.BlockNumber).To(Equal(mockCIDWrapper.BlockNumber)) Expect(iplds.BlockNumber).To(Equal(mockCIDWrapper.BlockNumber))
Expect(len(iplds.Headers)).To(Equal(1)) Expect(len(iplds.Headers)).To(Equal(1))
Expect(iplds.Headers[0]).To(Equal(mockHeaderBlock.RawData())) Expect(iplds.Headers[0]).To(Equal(ipfs.BlockModel{
Data: mockHeaderBlock.RawData(),
CID: mockHeaderBlock.Cid().String(),
}))
Expect(len(iplds.Uncles)).To(Equal(1)) Expect(len(iplds.Uncles)).To(Equal(1))
Expect(iplds.Uncles[0]).To(Equal(mockUncleBlock.RawData())) Expect(iplds.Uncles[0]).To(Equal(ipfs.BlockModel{
Data: mockUncleBlock.RawData(),
CID: mockUncleBlock.Cid().String(),
}))
Expect(len(iplds.Transactions)).To(Equal(1)) Expect(len(iplds.Transactions)).To(Equal(1))
Expect(iplds.Transactions[0]).To(Equal(mockTrxBlock.RawData())) Expect(iplds.Transactions[0]).To(Equal(ipfs.BlockModel{
Data: mockTrxBlock.RawData(),
CID: mockTrxBlock.Cid().String(),
}))
Expect(len(iplds.Receipts)).To(Equal(1)) Expect(len(iplds.Receipts)).To(Equal(1))
Expect(iplds.Receipts[0]).To(Equal(mockReceiptBlock.RawData())) Expect(iplds.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: mockReceiptBlock.RawData(),
CID: mockReceiptBlock.Cid().String(),
}))
Expect(len(iplds.StateNodes)).To(Equal(1)) Expect(len(iplds.StateNodes)).To(Equal(1))
Expect(iplds.StateNodes[0].StateTrieKey).To(Equal(common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"))) Expect(iplds.StateNodes[0].StateTrieKey).To(Equal(common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")))
Expect(iplds.StateNodes[0].Leaf).To(BeTrue()) Expect(iplds.StateNodes[0].Leaf).To(BeTrue())
Expect(iplds.StateNodes[0].IPLD).To(Equal(mockStateBlock.RawData())) Expect(iplds.StateNodes[0].IPLD).To(Equal(ipfs.BlockModel{
Data: mockStateBlock.RawData(),
CID: mockStateBlock.Cid().String(),
}))
Expect(len(iplds.StorageNodes)).To(Equal(2)) Expect(len(iplds.StorageNodes)).To(Equal(2))
for _, storage := range iplds.StorageNodes { for _, storage := range iplds.StorageNodes {
Expect(storage.Leaf).To(BeTrue()) Expect(storage.Leaf).To(BeTrue())
Expect(storage.StateTrieKey).To(Equal(common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"))) Expect(storage.StateTrieKey).To(Equal(common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")))
if bytes.Equal(storage.StorageTrieKey.Bytes(), common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001").Bytes()) { if bytes.Equal(storage.StorageTrieKey.Bytes(), common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001").Bytes()) {
Expect(storage.IPLD).To(Equal(mockStorageBlock1.RawData())) Expect(storage.IPLD).To(Equal(ipfs.BlockModel{
Data: mockStorageBlock1.RawData(),
CID: mockStorageBlock1.Cid().String(),
}))
} }
if bytes.Equal(storage.StorageTrieKey.Bytes(), common.HexToHash("0000000000000000000000000000000000000000000000000000000000000002").Bytes()) { if bytes.Equal(storage.StorageTrieKey.Bytes(), common.HexToHash("0000000000000000000000000000000000000000000000000000000000000002").Bytes()) {
Expect(storage.IPLD).To(Equal(mockStorageBlock2.RawData())) Expect(storage.IPLD).To(Equal(ipfs.BlockModel{
Data: mockStorageBlock2.RawData(),
CID: mockStorageBlock2.Cid().String(),
}))
} }
} }
}) })

View File

@ -23,6 +23,8 @@ import (
"math/big" "math/big"
rand2 "math/rand" rand2 "math/rand"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld" "github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
@ -50,6 +52,8 @@ var (
Root: common.HexToHash("0x0"), Root: common.HexToHash("0x0"),
TxHash: common.HexToHash("0x0"), TxHash: common.HexToHash("0x0"),
ReceiptHash: common.HexToHash("0x0"), ReceiptHash: common.HexToHash("0x0"),
Difficulty: big.NewInt(5000000),
Extra: []byte{},
} }
MockTransactions, MockReceipts, senderAddr = createTransactionsAndReceipts() MockTransactions, MockReceipts, senderAddr = createTransactionsAndReceipts()
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts) ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
@ -62,7 +66,24 @@ var (
mockTopic12 = common.HexToHash("0x06") mockTopic12 = common.HexToHash("0x06")
mockTopic21 = common.HexToHash("0x05") mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07") mockTopic22 = common.HexToHash("0x07")
MockTrxMeta = []eth.TxModel{ MockLog1 = &types.Log{
Topics: []common.Hash{mockTopic11, mockTopic12},
Data: []byte{},
}
MockLog2 = &types.Log{
Topics: []common.Hash{mockTopic21, mockTopic22},
Data: []byte{},
}
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256)
Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256)
Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256)
Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256)
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ValueBytes, multihash.KECCAK_256)
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AnotherValueBytes, multihash.KECCAK_256)
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageValue, multihash.KECCAK_256)
MockTrxMeta = []eth.TxModel{
{ {
CID: "", // This is empty until we go to publish to ipfs CID: "", // This is empty until we go to publish to ipfs
Src: senderAddr.Hex(), Src: senderAddr.Hex(),
@ -80,14 +101,14 @@ var (
} }
MockTrxMetaPostPublsh = []eth.TxModel{ MockTrxMetaPostPublsh = []eth.TxModel{
{ {
CID: "mockTrxCID1", // This is empty until we go to publish to ipfs CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
Src: senderAddr.Hex(), Src: senderAddr.Hex(),
Dst: Address.String(), Dst: Address.String(),
Index: 0, Index: 0,
TxHash: MockTransactions[0].Hash().String(), TxHash: MockTransactions[0].Hash().String(),
}, },
{ {
CID: "mockTrxCID2", CID: Trx2CID.String(),
Src: senderAddr.Hex(), Src: senderAddr.Hex(),
Dst: AnotherAddress.String(), Dst: AnotherAddress.String(),
Index: 1, Index: 1,
@ -118,7 +139,7 @@ var (
} }
MockRctMetaPostPublish = []eth.ReceiptModel{ MockRctMetaPostPublish = []eth.ReceiptModel{
{ {
CID: "mockRctCID1", CID: Rct1CID.String(),
Topic0s: []string{ Topic0s: []string{
mockTopic11.String(), mockTopic11.String(),
}, },
@ -128,7 +149,7 @@ var (
Contract: Address.String(), Contract: Address.String(),
}, },
{ {
CID: "mockRctCID2", CID: Rct2CID.String(),
Topic0s: []string{ Topic0s: []string{
mockTopic21.String(), mockTopic21.String(),
}, },
@ -208,12 +229,12 @@ var (
} }
MockStateMetaPostPublish = []eth.StateNodeModel{ MockStateMetaPostPublish = []eth.StateNodeModel{
{ {
CID: "mockStateCID1", CID: State1CID.String(),
Leaf: true, Leaf: true,
StateKey: ContractLeafKey.String(), StateKey: ContractLeafKey.String(),
}, },
{ {
CID: "mockStateCID2", CID: State2CID.String(),
Leaf: true, Leaf: true,
StateKey: AnotherContractLeafKey.String(), StateKey: AnotherContractLeafKey.String(),
}, },
@ -233,11 +254,11 @@ var (
BlockRlp: MockBlockRlp, BlockRlp: MockBlockRlp,
StateDiffRlp: MockStateDiffBytes, StateDiffRlp: MockStateDiffBytes,
ReceiptsRlp: ReceiptsRlp, ReceiptsRlp: ReceiptsRlp,
TotalDifficulty: big.NewInt(1337), TotalDifficulty: MockBlock.Difficulty(),
} }
MockConvertedPayload = eth.ConvertedPayload{ MockConvertedPayload = eth.ConvertedPayload{
TotalDifficulty: big.NewInt(1337), TotalDifficulty: MockBlock.Difficulty(),
Block: MockBlock, Block: MockBlock,
Receipts: MockReceipts, Receipts: MockReceipts,
TxMetaData: MockTrxMeta, TxMetaData: MockTrxMeta,
@ -250,9 +271,10 @@ var (
HeaderCID: eth2.HeaderModel{ HeaderCID: eth2.HeaderModel{
BlockHash: MockBlock.Hash().String(), BlockHash: MockBlock.Hash().String(),
BlockNumber: MockBlock.Number().String(), BlockNumber: MockBlock.Number().String(),
CID: "mockHeaderCID", CID: HeaderCID.String(),
ParentHash: MockBlock.ParentHash().String(), ParentHash: MockBlock.ParentHash().String(),
TotalDifficulty: "1337", TotalDifficulty: MockBlock.Difficulty().String(),
Reward: "5000000000000000000",
}, },
UncleCIDs: []eth2.UncleModel{}, UncleCIDs: []eth2.UncleModel{},
TransactionCIDs: MockTrxMetaPostPublsh, TransactionCIDs: MockTrxMetaPostPublsh,
@ -264,7 +286,7 @@ var (
StorageNodeCIDs: map[common.Hash][]eth.StorageNodeModel{ StorageNodeCIDs: map[common.Hash][]eth.StorageNodeModel{
ContractLeafKey: { ContractLeafKey: {
{ {
CID: "mockStorageCID", CID: StorageCID.String(),
StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001", StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
Leaf: true, Leaf: true,
}, },
@ -279,8 +301,9 @@ var (
BlockNumber: "1", BlockNumber: "1",
BlockHash: MockBlock.Hash().String(), BlockHash: MockBlock.Hash().String(),
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000", ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
CID: "mockHeaderCID", CID: HeaderCID.String(),
TotalDifficulty: "1337", TotalDifficulty: MockBlock.Difficulty().String(),
Reward: "5000000000000000000",
}, },
}, },
Transactions: MockTrxMetaPostPublsh, Transactions: MockTrxMetaPostPublsh,
@ -289,54 +312,67 @@ var (
StateNodes: MockStateMetaPostPublish, StateNodes: MockStateMetaPostPublish,
StorageNodes: []eth.StorageNodeWithStateKeyModel{ StorageNodes: []eth.StorageNodeWithStateKeyModel{
{ {
CID: "mockStorageCID", CID: StorageCID.String(),
Leaf: true, Leaf: true,
StateKey: ContractLeafKey.Hex(), StateKey: ContractLeafKey.Hex(),
StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001", StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
}, },
}, },
} }
headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256)
trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256)
rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256)
rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256)
state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ValueBytes, multihash.KECCAK_256)
state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AnotherValueBytes, multihash.KECCAK_256)
storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageValue, multihash.KECCAK_256)
HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, headerCID) HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID)
Trx1IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(0), trx1CID) Trx1IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(0), Trx1CID)
Trx2IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(1), trx2CID) Trx2IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(1), Trx2CID)
Rct1IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(0), rct1CID) Rct1IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(0), Rct1CID)
Rct2IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(1), rct2CID) Rct2IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(1), Rct2CID)
State1IPLD, _ = blocks.NewBlockWithCid(ValueBytes, state1CID) State1IPLD, _ = blocks.NewBlockWithCid(ValueBytes, State1CID)
State2IPLD, _ = blocks.NewBlockWithCid(AnotherValueBytes, state2CID) State2IPLD, _ = blocks.NewBlockWithCid(AnotherValueBytes, State2CID)
StorageIPLD, _ = blocks.NewBlockWithCid(StorageValue, storageCID) StorageIPLD, _ = blocks.NewBlockWithCid(StorageValue, StorageCID)
MockIPLDs = eth.IPLDs{ MockIPLDs = eth.IPLDs{
BlockNumber: big.NewInt(1), BlockNumber: big.NewInt(1),
Headers: [][]byte{ Headers: []ipfs.BlockModel{
HeaderIPLD.RawData(), {
Data: HeaderIPLD.RawData(),
CID: HeaderIPLD.Cid().String(),
},
}, },
Transactions: [][]byte{ Transactions: []ipfs.BlockModel{
Trx1IPLD.RawData(), {
Trx2IPLD.RawData(), Data: Trx1IPLD.RawData(),
CID: Trx1IPLD.Cid().String(),
},
{
Data: Trx2IPLD.RawData(),
CID: Trx2IPLD.Cid().String(),
},
}, },
Receipts: [][]byte{ Receipts: []ipfs.BlockModel{
Rct1IPLD.RawData(), {
Rct2IPLD.RawData(), Data: Rct1IPLD.RawData(),
CID: Rct1IPLD.Cid().String(),
},
{
Data: Rct2IPLD.RawData(),
CID: Rct2IPLD.Cid().String(),
},
}, },
StateNodes: []eth2.StateNode{ StateNodes: []eth2.StateNode{
{ {
StateTrieKey: ContractLeafKey, StateTrieKey: ContractLeafKey,
Leaf: true, Leaf: true,
IPLD: State1IPLD.RawData(), IPLD: ipfs.BlockModel{
Data: State1IPLD.RawData(),
CID: State1IPLD.Cid().String(),
},
}, },
{ {
StateTrieKey: AnotherContractLeafKey, StateTrieKey: AnotherContractLeafKey,
Leaf: true, Leaf: true,
IPLD: State2IPLD.RawData(), IPLD: ipfs.BlockModel{
Data: State2IPLD.RawData(),
CID: State2IPLD.Cid().String(),
},
}, },
}, },
StorageNodes: []eth2.StorageNode{ StorageNodes: []eth2.StorageNode{
@ -344,7 +380,10 @@ var (
StateTrieKey: ContractLeafKey, StateTrieKey: ContractLeafKey,
StorageTrieKey: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), StorageTrieKey: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
Leaf: true, Leaf: true,
IPLD: StorageIPLD.RawData(), IPLD: ipfs.BlockModel{
Data: StorageIPLD.RawData(),
CID: StorageIPLD.Cid().String(),
},
}, },
}, },
} }
@ -353,8 +392,8 @@ var (
// createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs // createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) { func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
// make transactions // make transactions
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), nil) trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), nil) trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
transactionSigner := types.MakeSigner(params.MainnetChainConfig, BlockNumber) transactionSigner := types.MakeSigner(params.MainnetChainConfig, BlockNumber)
mockCurve := elliptic.P256() mockCurve := elliptic.P256()
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader) mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
@ -375,16 +414,10 @@ func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common
} }
// make receipts // make receipts
mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50) mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50)
mockLog1 := &types.Log{ mockReceipt1.Logs = []*types.Log{MockLog1}
Topics: []common.Hash{mockTopic11, mockTopic12},
}
mockReceipt1.Logs = []*types.Log{mockLog1}
mockReceipt1.TxHash = signedTrx1.Hash() mockReceipt1.TxHash = signedTrx1.Hash()
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100) mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
mockLog2 := &types.Log{ mockReceipt2.Logs = []*types.Log{MockLog2}
Topics: []common.Hash{mockTopic21, mockTopic22},
}
mockReceipt2.Logs = []*types.Log{mockLog2}
mockReceipt2.TxHash = signedTrx2.Hash() mockReceipt2.TxHash = signedTrx2.Hash()
return types.Transactions{signedTrx1, signedTrx2}, types.Receipts{mockReceipt1, mockReceipt2}, senderAddr return types.Transactions{signedTrx1, signedTrx2}, types.Receipts{mockReceipt1, mockReceipt2}, senderAddr
} }

View File

@ -27,6 +27,7 @@ type HeaderModel struct {
CID string `db:"cid"` CID string `db:"cid"`
TotalDifficulty string `db:"td"` TotalDifficulty string `db:"td"`
NodeID int64 `db:"node_id"` NodeID int64 `db:"node_id"`
Reward string `db:"reward"`
} }
// UncleModel is the db model for eth.uncle_cids // UncleModel is the db model for eth.uncle_cids
@ -36,6 +37,7 @@ type UncleModel struct {
BlockHash string `db:"block_hash"` BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"` ParentHash string `db:"parent_hash"`
CID string `db:"cid"` CID string `db:"cid"`
Reward string `db:"reward"`
} }
// TxModel is the db model for eth.transaction_cids // TxModel is the db model for eth.transaction_cids

View File

@ -20,12 +20,13 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
common2 "github.com/vulcanize/vulcanizedb/pkg/eth/converters/common"
"github.com/vulcanize/vulcanizedb/pkg/ipfs" "github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/dag_putters" "github.com/vulcanize/vulcanizedb/pkg/ipfs/dag_putters"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
) )
// IPLDPublisher satisfies the IPLDPublisher for ethereum // IPLDPublisher satisfies the IPLDPublisher for ethereum
@ -63,12 +64,14 @@ func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForI
if err != nil { if err != nil {
return nil, err return nil, err
} }
reward := common2.CalcEthBlockReward(ipldPayload.Block, ipldPayload.Receipts)
header := HeaderModel{ header := HeaderModel{
CID: headerCid, CID: headerCid,
ParentHash: ipldPayload.Block.ParentHash().String(), ParentHash: ipldPayload.Block.ParentHash().String(),
BlockNumber: ipldPayload.Block.Number().String(), BlockNumber: ipldPayload.Block.Number().String(),
BlockHash: ipldPayload.Block.Hash().String(), BlockHash: ipldPayload.Block.Hash().String(),
TotalDifficulty: ipldPayload.TotalDifficulty.String(), TotalDifficulty: ipldPayload.TotalDifficulty.String(),
Reward: reward.String(),
} }
// Process and publish uncles // Process and publish uncles
@ -78,10 +81,12 @@ func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForI
if err != nil { if err != nil {
return nil, err return nil, err
} }
uncleReward := common2.CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncle.Number.Int64())
uncleCids = append(uncleCids, UncleModel{ uncleCids = append(uncleCids, UncleModel{
CID: uncleCid, CID: uncleCid,
ParentHash: uncle.ParentHash.String(), ParentHash: uncle.ParentHash.String(),
BlockHash: uncle.Hash().String(), BlockHash: uncle.Hash().String(),
Reward: uncleReward.String(),
}) })
} }

View File

@ -45,16 +45,16 @@ var _ = Describe("Publisher", func() {
Describe("Publish", func() { Describe("Publish", func() {
It("Publishes the passed IPLDPayload objects to IPFS and returns a CIDPayload for indexing", func() { It("Publishes the passed IPLDPayload objects to IPFS and returns a CIDPayload for indexing", func() {
mockHeaderDagPutter.CIDsToReturn = []string{"mockHeaderCID"} mockHeaderDagPutter.CIDsToReturn = []string{mocks.HeaderCID.String()}
mockTrxDagPutter.CIDsToReturn = []string{"mockTrxCID1", "mockTrxCID2"} mockTrxDagPutter.CIDsToReturn = []string{mocks.Trx1CID.String(), mocks.Trx2CID.String()}
mockRctDagPutter.CIDsToReturn = []string{"mockRctCID1", "mockRctCID2"} mockRctDagPutter.CIDsToReturn = []string{mocks.Rct1CID.String(), mocks.Rct2CID.String()}
val1 := common.BytesToHash(mocks.MockConvertedPayload.StateNodes[0].Value) val1 := common.BytesToHash(mocks.MockConvertedPayload.StateNodes[0].Value)
val2 := common.BytesToHash(mocks.MockConvertedPayload.StateNodes[1].Value) val2 := common.BytesToHash(mocks.MockConvertedPayload.StateNodes[1].Value)
mockStateDagPutter.CIDsToReturn = map[common.Hash][]string{ mockStateDagPutter.CIDsToReturn = map[common.Hash][]string{
val1: {"mockStateCID1"}, val1: {mocks.State1CID.String()},
val2: {"mockStateCID2"}, val2: {mocks.State2CID.String()},
} }
mockStorageDagPutter.CIDsToReturn = []string{"mockStorageCID"} mockStorageDagPutter.CIDsToReturn = []string{mocks.StorageCID.String()}
publisher := eth.IPLDPublisher{ publisher := eth.IPLDPublisher{
HeaderPutter: mockHeaderDagPutter, HeaderPutter: mockHeaderDagPutter,
TransactionPutter: mockTrxDagPutter, TransactionPutter: mockTrxDagPutter,
@ -69,6 +69,7 @@ var _ = Describe("Publisher", func() {
Expect(cidPayload.HeaderCID.TotalDifficulty).To(Equal(mocks.MockConvertedPayload.TotalDifficulty.String())) Expect(cidPayload.HeaderCID.TotalDifficulty).To(Equal(mocks.MockConvertedPayload.TotalDifficulty.String()))
Expect(cidPayload.HeaderCID.BlockNumber).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockNumber)) Expect(cidPayload.HeaderCID.BlockNumber).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockNumber))
Expect(cidPayload.HeaderCID.BlockHash).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockHash)) Expect(cidPayload.HeaderCID.BlockHash).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockHash))
Expect(cidPayload.HeaderCID.Reward).To(Equal(mocks.MockCIDPayload.HeaderCID.Reward))
Expect(cidPayload.UncleCIDs).To(Equal(mocks.MockCIDPayload.UncleCIDs)) Expect(cidPayload.UncleCIDs).To(Equal(mocks.MockCIDPayload.UncleCIDs))
Expect(cidPayload.HeaderCID).To(Equal(mocks.MockCIDPayload.HeaderCID)) Expect(cidPayload.HeaderCID).To(Equal(mocks.MockCIDPayload.HeaderCID))
Expect(len(cidPayload.TransactionCIDs)).To(Equal(2)) Expect(len(cidPayload.TransactionCIDs)).To(Equal(2))

View File

@ -193,6 +193,7 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, blockNum
pgStr += fmt.Sprintf(` AND transaction_cids.src = ANY($%d::VARCHAR(66)[])`, id) pgStr += fmt.Sprintf(` AND transaction_cids.src = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(txFilter.Src)) args = append(args, pq.Array(txFilter.Src))
} }
pgStr += ` ORDER BY transaction_cids.index`
return results, tx.Select(&results, pgStr, args...) return results, tx.Select(&results, pgStr, args...)
} }
@ -224,7 +225,7 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
args = append(args, pq.Array(rctFilter.Contracts)) args = append(args, pq.Array(rctFilter.Contracts))
id++ id++
// Filter on topics if there are any // Filter on topics if there are any
if len(rctFilter.Topics) > 0 { if hasTopics(rctFilter.Topics) {
pgStr += " AND (" pgStr += " AND ("
first := true first := true
for i, topicSet := range rctFilter.Topics { for i, topicSet := range rctFilter.Topics {
@ -250,7 +251,7 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
pgStr += ")" pgStr += ")"
} else { // If there are no contract addresses to filter on } else { // If there are no contract addresses to filter on
// Filter on topics if there are any // Filter on topics if there are any
if len(rctFilter.Topics) > 0 { if hasTopics(rctFilter.Topics) {
pgStr += " AND ((" pgStr += " AND (("
first := true first := true
for i, topicSet := range rctFilter.Topics { for i, topicSet := range rctFilter.Topics {
@ -279,10 +280,20 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
args = append(args, pq.Array(trxIds)) args = append(args, pq.Array(trxIds))
} }
} }
pgStr += ` ORDER BY transaction_cids.index`
receiptCids := make([]ReceiptModel, 0) receiptCids := make([]ReceiptModel, 0)
return receiptCids, tx.Select(&receiptCids, pgStr, args...) return receiptCids, tx.Select(&receiptCids, pgStr, args...)
} }
func hasTopics(topics [][]string) bool {
for _, topicSet := range topics {
if len(topicSet) > 0 {
return true
}
}
return false
}
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided blockheight that conform to the provided filter parameters // RetrieveStateCIDs retrieves and returns all of the state node cids at the provided blockheight that conform to the provided filter parameters
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, blockNumber int64) ([]StateNodeModel, error) { func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, blockNumber int64) ([]StateNodeModel, error) {
log.Debug("retrieving state cids for block ", blockNumber) log.Debug("retrieving state cids for block ", blockNumber)
@ -474,7 +485,8 @@ func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.H
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]TxModel, error) { func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]TxModel, error) {
log.Debug("retrieving tx cids for block id ", headerID) log.Debug("retrieving tx cids for block id ", headerID)
pgStr := `SELECT * FROM eth.transaction_cids pgStr := `SELECT * FROM eth.transaction_cids
WHERE header_id = $1` WHERE header_id = $1
ORDER BY index`
var txCIDs []TxModel var txCIDs []TxModel
return txCIDs, tx.Select(&txCIDs, pgStr, headerID) return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
} }
@ -482,8 +494,13 @@ func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) (
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs // RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]ReceiptModel, error) {
log.Debugf("retrieving receipt cids for tx ids %v", txIDs) log.Debugf("retrieving receipt cids for tx ids %v", txIDs)
pgStr := `SELECT * FROM eth.receipt_cids pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid,
WHERE tx_id = ANY($1::INTEGER[])` receipt_cids.contract, receipt_cids.topic0s, receipt_cids.topic1s,
receipt_cids.topic2s, receipt_cids.topic3s
FROM eth.receipt_cids, eth.transaction_cids
WHERE tx_id = ANY($1::INTEGER[])
AND receipt_cids.tx_id = transaction_cids.id
ORDER BY transaction_cids.index`
var rctCIDs []ReceiptModel var rctCIDs []ReceiptModel
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txIDs)) return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txIDs))
} }

View File

@ -246,11 +246,11 @@ var _ = Describe("Retriever", func() {
Expect(eth.ReceiptModelsContainsCID(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[1].CID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[1].CID)).To(BeTrue())
Expect(len(cidWrapper.StateNodes)).To(Equal(2)) Expect(len(cidWrapper.StateNodes)).To(Equal(2))
for _, stateNode := range cidWrapper.StateNodes { for _, stateNode := range cidWrapper.StateNodes {
if stateNode.CID == "mockStateCID1" { if stateNode.CID == mocks.State1CID.String() {
Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex())) Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex()))
Expect(stateNode.Leaf).To(Equal(true)) Expect(stateNode.Leaf).To(Equal(true))
} }
if stateNode.CID == "mockStateCID2" { if stateNode.CID == mocks.State2CID.String() {
Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex())) Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex()))
Expect(stateNode.Leaf).To(Equal(true)) Expect(stateNode.Leaf).To(Equal(true))
} }
@ -335,13 +335,13 @@ var _ = Describe("Retriever", func() {
Expect(cidWrapper5.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber)) Expect(cidWrapper5.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper5.Headers)).To(Equal(0)) Expect(len(cidWrapper5.Headers)).To(Equal(0))
Expect(len(cidWrapper5.Transactions)).To(Equal(2)) Expect(len(cidWrapper5.Transactions)).To(Equal(2))
Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, "mockTrxCID1")).To(BeTrue()) Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, mocks.Trx1CID.String())).To(BeTrue())
Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, "mockTrxCID2")).To(BeTrue()) Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, mocks.Trx2CID.String())).To(BeTrue())
Expect(len(cidWrapper5.StateNodes)).To(Equal(0)) Expect(len(cidWrapper5.StateNodes)).To(Equal(0))
Expect(len(cidWrapper5.StorageNodes)).To(Equal(0)) Expect(len(cidWrapper5.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper5.Receipts)).To(Equal(2)) Expect(len(cidWrapper5.Receipts)).To(Equal(2))
Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, "mockRctCID1")).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, mocks.Rct1CID.String())).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, "mockRctCID2")).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, mocks.Rct2CID.String())).To(BeTrue())
cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1) cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -379,7 +379,7 @@ var _ = Describe("Retriever", func() {
HeaderID: cidWrapper7.StateNodes[0].HeaderID, HeaderID: cidWrapper7.StateNodes[0].HeaderID,
Leaf: true, Leaf: true,
StateKey: mocks.ContractLeafKey.Hex(), StateKey: mocks.ContractLeafKey.Hex(),
CID: "mockStateCID1", CID: mocks.State1CID.String(),
})) }))
_, empty, err = retriever.Retrieve(rctTopicsAndContractFilterFail, 1) _, empty, err = retriever.Retrieve(rctTopicsAndContractFilterFail, 1)

View File

@ -19,6 +19,8 @@ package eth
import ( import (
"math/big" "math/big"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
) )
@ -77,10 +79,10 @@ type CIDWrapper struct {
// Returned by IPLDFetcher and ResponseFilterer // Returned by IPLDFetcher and ResponseFilterer
type IPLDs struct { type IPLDs struct {
BlockNumber *big.Int BlockNumber *big.Int
Headers [][]byte Headers []ipfs.BlockModel
Uncles [][]byte Uncles []ipfs.BlockModel
Transactions [][]byte Transactions []ipfs.BlockModel
Receipts [][]byte Receipts []ipfs.BlockModel
StateNodes []StateNode StateNodes []StateNode
StorageNodes []StorageNode StorageNodes []StorageNode
} }
@ -92,13 +94,13 @@ func (i IPLDs) Height() int64 {
type StateNode struct { type StateNode struct {
StateTrieKey common.Hash StateTrieKey common.Hash
IPLD []byte IPLD ipfs.BlockModel
Leaf bool Leaf bool
} }
type StorageNode struct { type StorageNode struct {
StateTrieKey common.Hash StateTrieKey common.Hash
StorageTrieKey common.Hash StorageTrieKey common.Hash
IPLD []byte IPLD ipfs.BlockModel
Leaf bool Leaf bool
} }

View File

@ -23,14 +23,11 @@ import (
"time" "time"
"github.com/btcsuite/btcd/rpcclient" "github.com/btcsuite/btcd/rpcclient"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/config" "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/eth/client" "github.com/vulcanize/vulcanizedb/pkg/eth/client"
vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/core" "github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/node" "github.com/vulcanize/vulcanizedb/pkg/eth/node"
"github.com/vulcanize/vulcanizedb/pkg/postgres" "github.com/vulcanize/vulcanizedb/pkg/postgres"
@ -191,10 +188,6 @@ func getEthNodeAndClient(path string) (core.Node, interface{}, error) {
return core.Node{}, nil, err return core.Node{}, nil, err
} }
rpcClient := client.NewRPCClient(rawRPCClient, path) rpcClient := client.NewRPCClient(rawRPCClient, path)
ethClient := ethclient.NewClient(rawRPCClient)
vdbEthClient := client.NewEthClient(ethClient)
vdbNode := node.MakeNode(rpcClient) vdbNode := node.MakeNode(rpcClient)
transactionConverter := vRpc.NewRPCTransactionConverter(ethClient) return vdbNode, rpcClient, nil
blockChain := eth.NewBlockChain(vdbEthClient, rpcClient, vdbNode, transactionConverter)
return blockChain.Node(), rpcClient, nil
} }

View File

@ -18,6 +18,8 @@ package shared
import ( import (
"bytes" "bytes"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
) )
// ListContainsString used to check if a list of strings contains a particular string // ListContainsString used to check if a list of strings contains a particular string
@ -30,10 +32,10 @@ func ListContainsString(sss []string, s string) bool {
return false return false
} }
// ListContainsBytes used to check if a list of byte arrays contains a particular byte array // IPLDsContainBytes used to check if a list of strings contains a particular string
func ListContainsBytes(bbb [][]byte, b []byte) bool { func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool {
for _, by := range bbb { for _, ipld := range iplds {
if bytes.Equal(by, b) { if bytes.Equal(ipld.Data, b) {
return true return true
} }
} }