super node backfill breaks batch call into smaller bins; retrieve gap

test
This commit is contained in:
Ian Norden 2019-11-01 14:03:28 -05:00
parent c16ac026db
commit 5be205ffa6
8 changed files with 214 additions and 72 deletions

View File

@ -120,5 +120,5 @@ func newBackFiller() (super_node.BackFillInterface, error) {
} else { } else {
frequency = time.Duration(freq) frequency = time.Duration(freq)
} }
return super_node.NewBackFillService(ipfsPath, &db, archivalRPCClient, time.Minute*frequency) return super_node.NewBackFillService(ipfsPath, &db, archivalRPCClient, time.Minute*frequency, super_node.DefaultMaxBatchSize)
} }

View File

@ -23,8 +23,6 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher" "github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
@ -34,9 +32,10 @@ import (
) )
const ( const (
DefaultMaxBatchSize uint64 = 5000 DefaultMaxBatchSize uint64 = 1000
defaultMaxBatchNumber int64 = 100 defaultMaxBatchNumber int64 = 10
) )
// BackFillInterface for filling in gaps in the super node // BackFillInterface for filling in gaps in the super node
type BackFillInterface interface { type BackFillInterface interface {
// Method for the super node to periodically check for and fill in gaps in its data using an archival node // Method for the super node to periodically check for and fill in gaps in its data using an archival node
@ -58,11 +57,11 @@ type BackFillService struct {
// Check frequency // Check frequency
GapCheckFrequency time.Duration GapCheckFrequency time.Duration
// size of batch fetches // size of batch fetches
batchSize uint64 BatchSize uint64
} }
// NewBackFillService returns a new BackFillInterface // NewBackFillService returns a new BackFillInterface
func NewBackFillService(ipfsPath string, db *postgres.DB, archivalNodeRPCClient core.RpcClient, freq time.Duration) (BackFillInterface, error) { func NewBackFillService(ipfsPath string, db *postgres.DB, archivalNodeRPCClient core.RpcClient, freq time.Duration, batchSize uint64) (BackFillInterface, error) {
publisher, err := ipfs.NewIPLDPublisher(ipfsPath) publisher, err := ipfs.NewIPLDPublisher(ipfsPath)
if err != nil { if err != nil {
return nil, err return nil, err
@ -74,7 +73,7 @@ func NewBackFillService(ipfsPath string, db *postgres.DB, archivalNodeRPCClient
Retriever: NewCIDRetriever(db), Retriever: NewCIDRetriever(db),
Fetcher: fetcher.NewStateDiffFetcher(archivalNodeRPCClient), Fetcher: fetcher.NewStateDiffFetcher(archivalNodeRPCClient),
GapCheckFrequency: freq, GapCheckFrequency: freq,
batchSize: DefaultMaxBatchSize, BatchSize: batchSize,
}, nil }, nil
} }
@ -118,6 +117,7 @@ func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup, quitChan <-chan bool) {
} }
func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) { func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) {
log.Infof("going to fill in gap from %d to %d", startingBlock, endingBlock)
errChan := make(chan error) errChan := make(chan error)
done := make(chan bool) done := make(chan bool)
backFillInitErr := bfs.BackFill(startingBlock, endingBlock, errChan, done) backFillInitErr := bfs.BackFill(startingBlock, endingBlock, errChan, done)
@ -130,12 +130,12 @@ func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) {
case err := <-errChan: case err := <-errChan:
log.Error(err) log.Error(err)
case <-done: case <-done:
log.Infof("finished filling in gap from %d to %d", startingBlock, endingBlock)
return return
} }
} }
} }
// BackFill fetches, processes, and returns utils.StorageDiffs over a range of blocks // BackFill fetches, processes, and returns utils.StorageDiffs over a range of blocks
// It splits a large range up into smaller chunks, batch fetching and processing those chunks concurrently // It splits a large range up into smaller chunks, batch fetching and processing those chunks concurrently
func (bfs *BackFillService) BackFill(startingBlock, endingBlock uint64, errChan chan error, done chan bool) error { func (bfs *BackFillService) BackFill(startingBlock, endingBlock uint64, errChan chan error, done chan bool) error {
@ -144,14 +144,14 @@ func (bfs *BackFillService) BackFill(startingBlock, endingBlock uint64, errChan
} }
// break the range up into bins of smaller ranges // break the range up into bins of smaller ranges
length := endingBlock - startingBlock + 1 length := endingBlock - startingBlock + 1
numberOfBins := length / bfs.batchSize numberOfBins := length / bfs.BatchSize
remainder := length % bfs.batchSize remainder := length % bfs.BatchSize
if remainder != 0 { if remainder != 0 {
numberOfBins++ numberOfBins++
} }
blockRangeBins := make([][]uint64, numberOfBins) blockRangeBins := make([][]uint64, numberOfBins)
for i := range blockRangeBins { for i := range blockRangeBins {
nextBinStart := startingBlock + uint64(bfs.batchSize) nextBinStart := startingBlock + uint64(bfs.BatchSize)
if nextBinStart > endingBlock { if nextBinStart > endingBlock {
nextBinStart = endingBlock + 1 nextBinStart = endingBlock + 1
} }
@ -166,7 +166,7 @@ func (bfs *BackFillService) BackFill(startingBlock, endingBlock uint64, errChan
// int64 for atomic incrementing and decrementing to track the number of active processing goroutines we have // int64 for atomic incrementing and decrementing to track the number of active processing goroutines we have
var activeCount int64 var activeCount int64
// channel for processing goroutines to signal when they are done // channel for processing goroutines to signal when they are done
processingDone := make(chan bool) processingDone := make(chan [2]uint64)
forwardDone := make(chan bool) forwardDone := make(chan bool)
// for each block range bin spin up a goroutine to batch fetch and process state diffs for that range // for each block range bin spin up a goroutine to batch fetch and process state diffs for that range
@ -184,29 +184,23 @@ func (bfs *BackFillService) BackFill(startingBlock, endingBlock uint64, errChan
errChan <- fetchErr errChan <- fetchErr
} }
for _, payload := range payloads { for _, payload := range payloads {
stateDiff := new(statediff.StateDiff)
stateDiffDecodeErr := rlp.DecodeBytes(payload.StateDiffRlp, stateDiff)
if stateDiffDecodeErr != nil {
errChan <- stateDiffDecodeErr
continue
}
ipldPayload, convertErr := bfs.Converter.Convert(payload) ipldPayload, convertErr := bfs.Converter.Convert(payload)
if convertErr != nil { if convertErr != nil {
log.Error(convertErr) errChan <- convertErr
continue continue
} }
cidPayload, publishErr := bfs.Publisher.Publish(ipldPayload) cidPayload, publishErr := bfs.Publisher.Publish(ipldPayload)
if publishErr != nil { if publishErr != nil {
log.Error(publishErr) errChan <- publishErr
continue continue
} }
indexErr := bfs.Repository.Index(cidPayload) indexErr := bfs.Repository.Index(cidPayload)
if indexErr != nil { if indexErr != nil {
log.Error(indexErr) errChan <- indexErr
} }
} }
// when this goroutine is done, send out a signal // when this goroutine is done, send out a signal
processingDone <- true processingDone <- [2]uint64{blockHeights[0], blockHeights[len(blockHeights)-1]}
}(blockHeights) }(blockHeights)
} }
}() }()
@ -218,13 +212,14 @@ func (bfs *BackFillService) BackFill(startingBlock, endingBlock uint64, errChan
goroutinesFinished := 0 goroutinesFinished := 0
for { for {
select { select {
case <-processingDone: case doneWithHeights := <-processingDone:
atomic.AddInt64(&activeCount, -1) atomic.AddInt64(&activeCount, -1)
select { select {
// if we are waiting for a process to finish, signal that one has // if we are waiting for a process to finish, signal that one has
case forwardDone <- true: case forwardDone <- true:
default: default:
} }
log.Infof("finished filling in gap sub-bin from %d to %d", doneWithHeights[0], doneWithHeights[1])
goroutinesFinished++ goroutinesFinished++
if goroutinesFinished == int(numberOfBins) { if goroutinesFinished == int(numberOfBins) {
done <- true done <- true

View File

@ -47,7 +47,7 @@ var _ = Describe("BackFiller", func() {
} }
mockRetriever := &mocks3.MockCIDRetriever{ mockRetriever := &mocks3.MockCIDRetriever{
FirstBlockNumberToReturn: 1, FirstBlockNumberToReturn: 1,
GapsToRetrieve: [][2]int64{ GapsToRetrieve: [][2]uint64{
{ {
100, 101, 100, 101,
}, },
@ -63,9 +63,10 @@ var _ = Describe("BackFiller", func() {
Repository: mockCidRepo, Repository: mockCidRepo,
Publisher: mockPublisher, Publisher: mockPublisher,
Converter: mockConverter, Converter: mockConverter,
StateDiffFetcher: mockFetcher, Fetcher: mockFetcher,
Retriever: mockRetriever, Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2, GapCheckFrequency: time.Second * 2,
BatchSize: super_node.DefaultMaxBatchSize,
} }
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
quitChan := make(chan bool, 1) quitChan := make(chan bool, 1)
@ -100,7 +101,7 @@ var _ = Describe("BackFiller", func() {
} }
mockRetriever := &mocks3.MockCIDRetriever{ mockRetriever := &mocks3.MockCIDRetriever{
FirstBlockNumberToReturn: 1, FirstBlockNumberToReturn: 1,
GapsToRetrieve: [][2]int64{ GapsToRetrieve: [][2]uint64{
{ {
100, 100, 100, 100,
}, },
@ -115,9 +116,10 @@ var _ = Describe("BackFiller", func() {
Repository: mockCidRepo, Repository: mockCidRepo,
Publisher: mockPublisher, Publisher: mockPublisher,
Converter: mockConverter, Converter: mockConverter,
StateDiffFetcher: mockFetcher, Fetcher: mockFetcher,
Retriever: mockRetriever, Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2, GapCheckFrequency: time.Second * 2,
BatchSize: super_node.DefaultMaxBatchSize,
} }
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
quitChan := make(chan bool, 1) quitChan := make(chan bool, 1)
@ -149,7 +151,7 @@ var _ = Describe("BackFiller", func() {
} }
mockRetriever := &mocks3.MockCIDRetriever{ mockRetriever := &mocks3.MockCIDRetriever{
FirstBlockNumberToReturn: 3, FirstBlockNumberToReturn: 3,
GapsToRetrieve: [][2]int64{}, GapsToRetrieve: [][2]uint64{},
} }
mockFetcher := &mocks2.StateDiffFetcher{ mockFetcher := &mocks2.StateDiffFetcher{
PayloadsToReturn: map[uint64]statediff.Payload{ PayloadsToReturn: map[uint64]statediff.Payload{
@ -161,9 +163,10 @@ var _ = Describe("BackFiller", func() {
Repository: mockCidRepo, Repository: mockCidRepo,
Publisher: mockPublisher, Publisher: mockPublisher,
Converter: mockConverter, Converter: mockConverter,
StateDiffFetcher: mockFetcher, Fetcher: mockFetcher,
Retriever: mockRetriever, Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2, GapCheckFrequency: time.Second * 2,
BatchSize: super_node.DefaultMaxBatchSize,
} }
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
quitChan := make(chan bool, 1) quitChan := make(chan bool, 1)

View File

@ -7,7 +7,7 @@ import (
// MockCIDRetriever is a mock CID retriever for use in tests // MockCIDRetriever is a mock CID retriever for use in tests
type MockCIDRetriever struct { type MockCIDRetriever struct {
GapsToRetrieve [][2]int64 GapsToRetrieve [][2]uint64
GapsToRetrieveErr error GapsToRetrieveErr error
CalledTimes int CalledTimes int
FirstBlockNumberToReturn int64 FirstBlockNumberToReturn int64
@ -30,15 +30,15 @@ func (mcr *MockCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
} }
// RetrieveGapsInData mock method // RetrieveGapsInData mock method
func (mcr *MockCIDRetriever) RetrieveGapsInData() ([][2]int64, error) { func (mcr *MockCIDRetriever) RetrieveGapsInData() ([][2]uint64, error) {
mcr.CalledTimes++ mcr.CalledTimes++
return mcr.GapsToRetrieve, mcr.GapsToRetrieveErr return mcr.GapsToRetrieve, mcr.GapsToRetrieveErr
} }
// SetGapsToRetrieve mock method // SetGapsToRetrieve mock method
func (mcr *MockCIDRetriever) SetGapsToRetrieve(gaps [][2]int64) { func (mcr *MockCIDRetriever) SetGapsToRetrieve(gaps [][2]uint64) {
if mcr.GapsToRetrieve == nil { if mcr.GapsToRetrieve == nil {
mcr.GapsToRetrieve = make([][2]int64, 0) mcr.GapsToRetrieve = make([][2]uint64, 0)
} }
mcr.GapsToRetrieve = append(mcr.GapsToRetrieve, gaps...) mcr.GapsToRetrieve = append(mcr.GapsToRetrieve, gaps...)
} }

View File

@ -26,13 +26,12 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/super_node" "github.com/vulcanize/vulcanizedb/pkg/super_node"
) )
var _ = Describe("Repository", func() {
var ( var (
db *postgres.DB db *postgres.DB
err error err error
repo super_node.CIDRepository repo super_node.CIDRepository
) )
var _ = Describe("Repository", func() {
BeforeEach(func() { BeforeEach(func() {
db, err = super_node.SetupDB() db, err = super_node.SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())

View File

@ -33,7 +33,7 @@ type CIDRetriever interface {
RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error) RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error)
RetrieveLastBlockNumber() (int64, error) RetrieveLastBlockNumber() (int64, error)
RetrieveFirstBlockNumber() (int64, error) RetrieveFirstBlockNumber() (int64, error)
RetrieveGapsInData() ([][2]int64, error) RetrieveGapsInData() ([][2]uint64, error)
} }
// EthCIDRetriever is the underlying struct supporting the CIDRetriever interface // EthCIDRetriever is the underlying struct supporting the CIDRetriever interface
@ -312,12 +312,12 @@ func (ecr *EthCIDRetriever) retrieveStorageCIDs(tx *sqlx.Tx, streamFilters confi
} }
type gap struct { type gap struct {
Start int64 `db:"start"` Start uint64 `db:"start"`
Stop int64 `db:"stop"` Stop uint64 `db:"stop"`
} }
// RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db // RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db
func (ecr *EthCIDRetriever) RetrieveGapsInData() ([][2]int64, error) { func (ecr *EthCIDRetriever) RetrieveGapsInData() ([][2]uint64, error) {
pgStr := `SELECT header_cids.block_number + 1 AS start, min(fr.block_number) - 1 AS stop FROM header_cids pgStr := `SELECT header_cids.block_number + 1 AS start, min(fr.block_number) - 1 AS stop FROM header_cids
LEFT JOIN header_cids r on header_cids.block_number = r.block_number - 1 LEFT JOIN header_cids r on header_cids.block_number = r.block_number - 1
LEFT JOIN header_cids fr on header_cids.block_number < fr.block_number LEFT JOIN header_cids fr on header_cids.block_number < fr.block_number
@ -328,9 +328,9 @@ func (ecr *EthCIDRetriever) RetrieveGapsInData() ([][2]int64, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
gapRanges := make([][2]int64, 0) gapRanges := make([][2]uint64, 0)
for _, gap := range gaps { for _, gap := range gaps {
gapRanges = append(gapRanges, [2]int64{gap.Start, gap.Stop}) gapRanges = append(gapRanges, [2]uint64{gap.Start, gap.Stop})
} }
return gapRanges, nil return gapRanges, nil
} }

View File

@ -23,6 +23,7 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/config" "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/ipfs" "github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks" "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node" "github.com/vulcanize/vulcanizedb/pkg/super_node"
@ -177,12 +178,15 @@ var (
) )
var _ = Describe("Retriever", func() { var _ = Describe("Retriever", func() {
var (
db *postgres.DB
repo super_node.CIDRepository
)
BeforeEach(func() { BeforeEach(func() {
var err error
db, err = super_node.SetupDB() db, err = super_node.SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
repo = super_node.NewCIDRepository(db) repo = super_node.NewCIDRepository(db)
err = repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
retriever = super_node.NewCIDRetriever(db) retriever = super_node.NewCIDRetriever(db)
}) })
AfterEach(func() { AfterEach(func() {
@ -190,6 +194,10 @@ var _ = Describe("Retriever", func() {
}) })
Describe("RetrieveCIDs", func() { Describe("RetrieveCIDs", func() {
BeforeEach(func() {
indexErr := repo.Index(mocks.MockCIDPayload)
Expect(indexErr).ToNot(HaveOccurred())
})
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() { It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
cidWrapper, err := retriever.RetrieveCIDs(openFilter, 1) cidWrapper, err := retriever.RetrieveCIDs(openFilter, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -216,12 +224,10 @@ var _ = Describe("Retriever", func() {
Expect(len(cidWrapper.StorageNodes)).To(Equal(1)) Expect(len(cidWrapper.StorageNodes)).To(Equal(1))
Expect(cidWrapper.StorageNodes).To(Equal(mocks.MockCIDWrapper.StorageNodes)) Expect(cidWrapper.StorageNodes).To(Equal(mocks.MockCIDWrapper.StorageNodes))
}) })
})
Describe("RetrieveCIDs", func() {
It("Applies filters from the provided config.Subscription", func() { It("Applies filters from the provided config.Subscription", func() {
cidWrapper1, err := retriever.RetrieveCIDs(rctContractFilter, 1) cidWrapper1, err1 := retriever.RetrieveCIDs(rctContractFilter, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err1).ToNot(HaveOccurred())
Expect(cidWrapper1.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber)) Expect(cidWrapper1.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper1.Headers)).To(Equal(0)) Expect(len(cidWrapper1.Headers)).To(Equal(0))
Expect(len(cidWrapper1.Transactions)).To(Equal(0)) Expect(len(cidWrapper1.Transactions)).To(Equal(0))
@ -230,8 +236,8 @@ var _ = Describe("Retriever", func() {
Expect(len(cidWrapper1.Receipts)).To(Equal(1)) Expect(len(cidWrapper1.Receipts)).To(Equal(1))
Expect(cidWrapper1.Receipts[0]).To(Equal("mockRctCID2")) Expect(cidWrapper1.Receipts[0]).To(Equal("mockRctCID2"))
cidWrapper2, err := retriever.RetrieveCIDs(rctTopicsFilter, 1) cidWrapper2, err2 := retriever.RetrieveCIDs(rctTopicsFilter, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err2).ToNot(HaveOccurred())
Expect(cidWrapper2.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber)) Expect(cidWrapper2.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper2.Headers)).To(Equal(0)) Expect(len(cidWrapper2.Headers)).To(Equal(0))
Expect(len(cidWrapper2.Transactions)).To(Equal(0)) Expect(len(cidWrapper2.Transactions)).To(Equal(0))
@ -240,8 +246,8 @@ var _ = Describe("Retriever", func() {
Expect(len(cidWrapper2.Receipts)).To(Equal(1)) Expect(len(cidWrapper2.Receipts)).To(Equal(1))
Expect(cidWrapper2.Receipts[0]).To(Equal("mockRctCID1")) Expect(cidWrapper2.Receipts[0]).To(Equal("mockRctCID1"))
cidWrapper3, err := retriever.RetrieveCIDs(rctTopicsAndContractFilter, 1) cidWrapper3, err3 := retriever.RetrieveCIDs(rctTopicsAndContractFilter, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err3).ToNot(HaveOccurred())
Expect(cidWrapper3.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber)) Expect(cidWrapper3.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper3.Headers)).To(Equal(0)) Expect(len(cidWrapper3.Headers)).To(Equal(0))
Expect(len(cidWrapper3.Transactions)).To(Equal(0)) Expect(len(cidWrapper3.Transactions)).To(Equal(0))
@ -250,8 +256,8 @@ var _ = Describe("Retriever", func() {
Expect(len(cidWrapper3.Receipts)).To(Equal(1)) Expect(len(cidWrapper3.Receipts)).To(Equal(1))
Expect(cidWrapper3.Receipts[0]).To(Equal("mockRctCID1")) Expect(cidWrapper3.Receipts[0]).To(Equal("mockRctCID1"))
cidWrapper4, err := retriever.RetrieveCIDs(rctContractsAndTopicFilter, 1) cidWrapper4, err4 := retriever.RetrieveCIDs(rctContractsAndTopicFilter, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err4).ToNot(HaveOccurred())
Expect(cidWrapper4.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber)) Expect(cidWrapper4.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper4.Headers)).To(Equal(0)) Expect(len(cidWrapper4.Headers)).To(Equal(0))
Expect(len(cidWrapper4.Transactions)).To(Equal(0)) Expect(len(cidWrapper4.Transactions)).To(Equal(0))
@ -260,8 +266,8 @@ var _ = Describe("Retriever", func() {
Expect(len(cidWrapper4.Receipts)).To(Equal(1)) Expect(len(cidWrapper4.Receipts)).To(Equal(1))
Expect(cidWrapper4.Receipts[0]).To(Equal("mockRctCID2")) Expect(cidWrapper4.Receipts[0]).To(Equal("mockRctCID2"))
cidWrapper5, err := retriever.RetrieveCIDs(rctsForAllCollectedTrxs, 1) cidWrapper5, err5 := retriever.RetrieveCIDs(rctsForAllCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err5).ToNot(HaveOccurred())
Expect(cidWrapper5.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber)) Expect(cidWrapper5.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper5.Headers)).To(Equal(0)) Expect(len(cidWrapper5.Headers)).To(Equal(0))
Expect(len(cidWrapper5.Transactions)).To(Equal(2)) Expect(len(cidWrapper5.Transactions)).To(Equal(2))
@ -273,8 +279,8 @@ var _ = Describe("Retriever", func() {
Expect(super_node.ListContainsString(cidWrapper5.Receipts, "mockRctCID1")).To(BeTrue()) Expect(super_node.ListContainsString(cidWrapper5.Receipts, "mockRctCID1")).To(BeTrue())
Expect(super_node.ListContainsString(cidWrapper5.Receipts, "mockRctCID2")).To(BeTrue()) Expect(super_node.ListContainsString(cidWrapper5.Receipts, "mockRctCID2")).To(BeTrue())
cidWrapper6, err := retriever.RetrieveCIDs(rctsForSelectCollectedTrxs, 1) cidWrapper6, err6 := retriever.RetrieveCIDs(rctsForSelectCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err6).ToNot(HaveOccurred())
Expect(cidWrapper6.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber)) Expect(cidWrapper6.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper6.Headers)).To(Equal(0)) Expect(len(cidWrapper6.Headers)).To(Equal(0))
Expect(len(cidWrapper6.Transactions)).To(Equal(1)) Expect(len(cidWrapper6.Transactions)).To(Equal(1))
@ -284,8 +290,8 @@ var _ = Describe("Retriever", func() {
Expect(len(cidWrapper6.Receipts)).To(Equal(1)) Expect(len(cidWrapper6.Receipts)).To(Equal(1))
Expect(cidWrapper6.Receipts[0]).To(Equal("mockRctCID2")) Expect(cidWrapper6.Receipts[0]).To(Equal("mockRctCID2"))
cidWrapper7, err := retriever.RetrieveCIDs(stateFilter, 1) cidWrapper7, err7 := retriever.RetrieveCIDs(stateFilter, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err7).ToNot(HaveOccurred())
Expect(cidWrapper7.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber)) Expect(cidWrapper7.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper7.Headers)).To(Equal(0)) Expect(len(cidWrapper7.Headers)).To(Equal(0))
Expect(len(cidWrapper7.Transactions)).To(Equal(0)) Expect(len(cidWrapper7.Transactions)).To(Equal(0))
@ -302,17 +308,146 @@ var _ = Describe("Retriever", func() {
Describe("RetrieveFirstBlockNumber", func() { Describe("RetrieveFirstBlockNumber", func() {
It("Gets the number of the first block that has data in the database", func() { It("Gets the number of the first block that has data in the database", func() {
num, err := retriever.RetrieveFirstBlockNumber() indexErr := repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred()) Expect(indexErr).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveFirstBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1))) Expect(num).To(Equal(int64(1)))
}) })
It("Gets the number of the first block that has data in the database", func() {
payload := *mocks.MockCIDPayload
payload.BlockNumber = "1010101"
indexErr := repo.Index(&payload)
Expect(indexErr).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveFirstBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the first block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "1010101"
payload2 := payload1
payload2.BlockNumber = "5"
indexErr := repo.Index(&payload1)
Expect(indexErr).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload2)
Expect(indexErr2).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveFirstBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(5)))
})
}) })
Describe("RetrieveLastBlockNumber", func() { Describe("RetrieveLastBlockNumber", func() {
It("Gets the number of the latest block that has data in the database", func() { It("Gets the number of the latest block that has data in the database", func() {
num, err := retriever.RetrieveLastBlockNumber() indexErr := repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred()) Expect(indexErr).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveLastBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1))) Expect(num).To(Equal(int64(1)))
}) })
It("Gets the number of the latest block that has data in the database", func() {
payload := *mocks.MockCIDPayload
payload.BlockNumber = "1010101"
indexErr := repo.Index(&payload)
Expect(indexErr).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveLastBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the latest block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "1010101"
payload2 := payload1
payload2.BlockNumber = "5"
indexErr := repo.Index(&payload1)
Expect(indexErr).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload2)
Expect(indexErr2).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveLastBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
})
Describe("RetrieveGapsInData", func() {
It("Doesn't return gaps if there are none", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "2"
payload2 := payload1
payload2.BlockNumber = "3"
indexErr1 := repo.Index(mocks.MockCIDPayload)
Expect(indexErr1).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload1)
Expect(indexErr2).ToNot(HaveOccurred())
indexErr3 := repo.Index(&payload2)
Expect(indexErr3).ToNot(HaveOccurred())
gaps, retrieveErr := retriever.RetrieveGapsInData()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(0))
})
It("Doesn't return the gap from 0 to the earliest block", func() {
payload := *mocks.MockCIDPayload
payload.BlockNumber = "5"
indexErr := repo.Index(&payload)
Expect(indexErr).ToNot(HaveOccurred())
gaps, retrieveErr := retriever.RetrieveGapsInData()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(0))
})
It("Finds gap between two entries", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "1010101"
payload2 := payload1
payload2.BlockNumber = "5"
indexErr := repo.Index(&payload1)
Expect(indexErr).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload2)
Expect(indexErr2).ToNot(HaveOccurred())
gaps, retrieveErr := retriever.RetrieveGapsInData()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(1))
Expect(gaps[0][0]).To(Equal(uint64(6)))
Expect(gaps[0][1]).To(Equal(uint64(1010100)))
})
It("Finds gaps between multiple entries", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "1010101"
payload2 := payload1
payload2.BlockNumber = "5"
payload3 := payload2
payload3.BlockNumber = "100"
payload4 := payload3
payload4.BlockNumber = "101"
payload5 := payload4
payload5.BlockNumber = "102"
payload6 := payload5
payload6.BlockNumber = "1000"
indexErr := repo.Index(&payload1)
Expect(indexErr).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload2)
Expect(indexErr2).ToNot(HaveOccurred())
indexErr3 := repo.Index(&payload3)
Expect(indexErr3).ToNot(HaveOccurred())
indexErr4 := repo.Index(&payload4)
Expect(indexErr4).ToNot(HaveOccurred())
indexErr5 := repo.Index(&payload5)
Expect(indexErr5).ToNot(HaveOccurred())
indexErr6 := repo.Index(&payload6)
Expect(indexErr6).ToNot(HaveOccurred())
gaps, retrieveErr := retriever.RetrieveGapsInData()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(3))
Expect(super_node.ListContainsRange(gaps, [2]uint64{6, 99})).To(BeTrue())
Expect(super_node.ListContainsRange(gaps, [2]uint64{103, 999})).To(BeTrue())
Expect(super_node.ListContainsRange(gaps, [2]uint64{1001, 1010100})).To(BeTrue())
})
}) })
}) })

View File

@ -76,3 +76,13 @@ func ListContainsBytes(bbb [][]byte, b []byte) bool {
} }
return false return false
} }
// ListContainsRange used to check if a list of [2]uint64 contains a particula [2]uint64
func ListContainsRange(rangeList [][2]uint64, rng [2]uint64) bool {
for _, rangeInList := range rangeList {
if rangeInList == rng {
return true
}
}
return false
}