2021-07-27 12:41:36 +00:00
|
|
|
package store
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
2023-03-21 11:06:46 +00:00
|
|
|
"errors"
|
2022-08-20 00:41:03 +00:00
|
|
|
"fmt"
|
2021-07-27 12:41:36 +00:00
|
|
|
"io"
|
2022-08-20 00:41:03 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
2021-07-27 12:41:36 +00:00
|
|
|
|
|
|
|
"github.com/ipfs/go-cid"
|
2023-03-21 11:06:46 +00:00
|
|
|
format "github.com/ipfs/go-ipld-format"
|
2023-01-26 15:41:28 +00:00
|
|
|
blocks "github.com/ipfs/go-libipfs/blocks"
|
2021-07-27 12:41:36 +00:00
|
|
|
"github.com/ipld/go-car"
|
|
|
|
carutil "github.com/ipld/go-car/util"
|
2022-11-28 21:41:14 +00:00
|
|
|
carv2 "github.com/ipld/go-car/v2"
|
2022-05-20 22:53:27 +00:00
|
|
|
mh "github.com/multiformats/go-multihash"
|
2021-07-27 12:41:36 +00:00
|
|
|
cbg "github.com/whyrusleeping/cbor-gen"
|
2023-02-02 16:51:52 +00:00
|
|
|
"go.uber.org/atomic"
|
2022-08-20 00:41:03 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2021-07-27 12:41:36 +00:00
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-06-14 15:00:51 +00:00
|
|
|
|
2021-07-27 12:41:36 +00:00
|
|
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
|
|
|
"github.com/filecoin-project/lotus/build"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
|
|
)
|
|
|
|
|
2023-01-20 18:27:04 +00:00
|
|
|
const TipsetkeyBackfillRange = 2 * build.Finality
|
2023-01-18 16:22:05 +00:00
|
|
|
|
2022-03-08 21:48:08 +00:00
|
|
|
func (cs *ChainStore) UnionStore() bstore.Blockstore {
|
|
|
|
return bstore.Union(cs.stateBlockstore, cs.chainBlockstore)
|
|
|
|
}
|
|
|
|
|
2021-07-27 12:41:36 +00:00
|
|
|
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error {
|
|
|
|
h := &car.CarHeader{
|
|
|
|
Roots: ts.Cids(),
|
|
|
|
Version: 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := car.WriteHeader(h, w); err != nil {
|
|
|
|
return xerrors.Errorf("failed to write car header: %s", err)
|
|
|
|
}
|
|
|
|
|
2022-03-08 21:48:08 +00:00
|
|
|
unionBs := cs.UnionStore()
|
2021-07-27 12:41:36 +00:00
|
|
|
return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error {
|
2021-12-11 21:03:00 +00:00
|
|
|
blk, err := unionBs.Get(ctx, c)
|
2021-07-27 12:41:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("writing object to car, bs.Get: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := carutil.LdWrite(w, c.Bytes(), blk.RawData()); err != nil {
|
|
|
|
return xerrors.Errorf("failed to write block to car output: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-12-17 09:42:09 +00:00
|
|
|
func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, error) {
|
2021-07-27 12:41:36 +00:00
|
|
|
// TODO: writing only to the state blockstore is incorrect.
|
|
|
|
// At this time, both the state and chain blockstores are backed by the
|
|
|
|
// universal store. When we physically segregate the stores, we will need
|
|
|
|
// to route state objects to the state blockstore, and chain objects to
|
|
|
|
// the chain blockstore.
|
2022-11-28 21:41:14 +00:00
|
|
|
|
|
|
|
br, err := carv2.NewBlockReader(r)
|
2021-07-27 12:41:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("loadcar failed: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-11-28 21:41:14 +00:00
|
|
|
s := cs.StateBlockstore()
|
|
|
|
|
|
|
|
parallelPuts := 5
|
|
|
|
putThrottle := make(chan error, parallelPuts)
|
|
|
|
for i := 0; i < parallelPuts; i++ {
|
|
|
|
putThrottle <- nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf []blocks.Block
|
|
|
|
for {
|
|
|
|
blk, err := br.Next()
|
|
|
|
if err != nil {
|
|
|
|
if err == io.EOF {
|
|
|
|
if len(buf) > 0 {
|
|
|
|
if err := s.PutMany(ctx, buf); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = append(buf, blk)
|
|
|
|
|
|
|
|
if len(buf) > 1000 {
|
|
|
|
if lastErr := <-putThrottle; lastErr != nil { // consume one error to have the right to add one
|
|
|
|
return nil, lastErr
|
|
|
|
}
|
|
|
|
|
|
|
|
go func(buf []blocks.Block) {
|
|
|
|
putThrottle <- s.PutMany(ctx, buf)
|
|
|
|
}(buf)
|
|
|
|
buf = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check errors
|
|
|
|
for i := 0; i < parallelPuts; i++ {
|
|
|
|
if lastErr := <-putThrottle; lastErr != nil {
|
|
|
|
return nil, lastErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
root, err := cs.LoadTipSet(ctx, types.NewTipSetKey(br.Roots...))
|
2021-07-27 12:41:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err)
|
|
|
|
}
|
|
|
|
|
2023-01-18 16:22:05 +00:00
|
|
|
ts := root
|
2023-01-20 18:27:04 +00:00
|
|
|
for i := 0; i < int(TipsetkeyBackfillRange); i++ {
|
2023-01-18 16:22:05 +00:00
|
|
|
err = cs.PersistTipset(ctx, ts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
parentTsKey := ts.Parents()
|
|
|
|
ts, err = cs.LoadTipSet(ctx, parentTsKey)
|
|
|
|
if ts == nil || err != nil {
|
|
|
|
log.Warnf("Only able to load the last %d tipsets", i)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-27 12:41:36 +00:00
|
|
|
return root, nil
|
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
type walkSchedTaskType int
|
|
|
|
|
|
|
|
const (
|
|
|
|
finishTask walkSchedTaskType = -1
|
|
|
|
blockTask walkSchedTaskType = iota
|
|
|
|
messageTask
|
|
|
|
receiptTask
|
|
|
|
stateTask
|
|
|
|
dagTask
|
|
|
|
)
|
|
|
|
|
|
|
|
func (t walkSchedTaskType) String() string {
|
|
|
|
switch t {
|
|
|
|
case finishTask:
|
|
|
|
return "finish"
|
|
|
|
case blockTask:
|
|
|
|
return "block"
|
|
|
|
case messageTask:
|
|
|
|
return "message"
|
|
|
|
case receiptTask:
|
|
|
|
return "receipt"
|
|
|
|
case stateTask:
|
|
|
|
return "state"
|
|
|
|
case dagTask:
|
|
|
|
return "dag"
|
|
|
|
}
|
|
|
|
panic(fmt.Sprintf("unknow task %d", t))
|
|
|
|
}
|
|
|
|
|
2022-08-20 00:41:03 +00:00
|
|
|
type walkTask struct {
|
2023-03-16 12:38:40 +00:00
|
|
|
c cid.Cid
|
|
|
|
taskType walkSchedTaskType
|
|
|
|
topLevelTaskType walkSchedTaskType
|
2023-03-21 11:06:46 +00:00
|
|
|
blockCid cid.Cid
|
2023-03-16 14:31:58 +00:00
|
|
|
epoch abi.ChainEpoch
|
2023-02-02 16:51:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// an ever growing FIFO
|
|
|
|
type taskFifo struct {
|
|
|
|
in chan walkTask
|
|
|
|
out chan walkTask
|
|
|
|
fifo []walkTask
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
type taskResult struct {
|
2022-08-20 00:41:03 +00:00
|
|
|
c cid.Cid
|
2023-02-02 16:51:52 +00:00
|
|
|
b blocks.Block
|
|
|
|
}
|
|
|
|
|
|
|
|
func newTaskFifo(bufferLen int) *taskFifo {
|
|
|
|
f := taskFifo{
|
|
|
|
in: make(chan walkTask, bufferLen),
|
|
|
|
out: make(chan walkTask, bufferLen),
|
|
|
|
fifo: make([]walkTask, 0),
|
|
|
|
}
|
|
|
|
|
|
|
|
go f.run()
|
|
|
|
|
|
|
|
return &f
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *taskFifo) Close() error {
|
|
|
|
close(f.in)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *taskFifo) run() {
|
|
|
|
for {
|
|
|
|
if len(f.fifo) > 0 {
|
|
|
|
// we have items in slice
|
|
|
|
// try to put next out or read something in.
|
|
|
|
// blocks if nothing works.
|
|
|
|
next := f.fifo[0]
|
|
|
|
select {
|
|
|
|
case f.out <- next:
|
|
|
|
f.fifo = f.fifo[1:]
|
|
|
|
case elem, ok := <-f.in:
|
|
|
|
if !ok {
|
|
|
|
// drain and close out.
|
|
|
|
for _, elem := range f.fifo {
|
|
|
|
f.out <- elem
|
|
|
|
}
|
|
|
|
close(f.out)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
f.fifo = append(f.fifo, elem)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// no elements in fifo to put out.
|
|
|
|
// Try to read in and block.
|
|
|
|
// When done, try to put out or add to fifo.
|
|
|
|
select {
|
|
|
|
case elem, ok := <-f.in:
|
|
|
|
if !ok {
|
|
|
|
close(f.out)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case f.out <- elem:
|
|
|
|
default:
|
|
|
|
f.fifo = append(f.fifo, elem)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type walkSchedulerConfig struct {
|
2023-02-02 16:51:52 +00:00
|
|
|
numWorkers int
|
|
|
|
|
|
|
|
head *types.TipSet // Tipset to start walking from.
|
|
|
|
tail *types.TipSet // Tipset to end at.
|
2022-08-20 00:41:03 +00:00
|
|
|
includeMessages bool
|
|
|
|
includeReceipts bool
|
|
|
|
includeState bool
|
|
|
|
}
|
|
|
|
|
|
|
|
type walkScheduler struct {
|
2023-02-02 16:51:52 +00:00
|
|
|
ctx context.Context
|
|
|
|
cancel context.CancelFunc
|
|
|
|
|
|
|
|
store bstore.Blockstore
|
|
|
|
cfg walkSchedulerConfig
|
|
|
|
writer io.Writer
|
|
|
|
|
|
|
|
workerTasks *taskFifo
|
|
|
|
totalTasks atomic.Int64
|
|
|
|
results chan taskResult
|
|
|
|
writeErrorChan chan error
|
|
|
|
|
2022-08-20 00:41:03 +00:00
|
|
|
// tracks number of inflight tasks
|
2023-02-02 16:51:52 +00:00
|
|
|
//taskWg sync.WaitGroup
|
2022-08-20 00:41:03 +00:00
|
|
|
// launches workers and collects errors if any occur
|
2023-02-02 16:51:52 +00:00
|
|
|
workers *errgroup.Group
|
|
|
|
// set of CIDs already exported
|
2022-08-20 00:41:03 +00:00
|
|
|
seen sync.Map
|
2023-02-02 16:51:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newWalkScheduler(ctx context.Context, store bstore.Blockstore, cfg walkSchedulerConfig, w io.Writer) (*walkScheduler, error) {
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
workers, ctx := errgroup.WithContext(ctx)
|
|
|
|
s := &walkScheduler{
|
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
|
|
|
store: store,
|
|
|
|
cfg: cfg,
|
|
|
|
writer: w,
|
|
|
|
results: make(chan taskResult, cfg.numWorkers*64),
|
|
|
|
workerTasks: newTaskFifo(cfg.numWorkers * 64),
|
|
|
|
writeErrorChan: make(chan error, 1),
|
|
|
|
workers: workers,
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(s.writeErrorChan)
|
|
|
|
for r := range s.results {
|
|
|
|
// Write
|
|
|
|
if err := carutil.LdWrite(s.writer, r.c.Bytes(), r.b.RawData()); err != nil {
|
|
|
|
// abort operations
|
|
|
|
cancel()
|
|
|
|
s.writeErrorChan <- err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// workers
|
|
|
|
for i := 0; i < cfg.numWorkers; i++ {
|
|
|
|
f := func(n int) func() error {
|
|
|
|
return func() error {
|
|
|
|
return s.workerFunc(n)
|
|
|
|
}
|
|
|
|
}(i)
|
|
|
|
s.workers.Go(f)
|
|
|
|
}
|
|
|
|
|
|
|
|
s.totalTasks.Add(int64(len(cfg.head.Blocks())))
|
|
|
|
for _, b := range cfg.head.Blocks() {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
log.Errorw("context done while sending root tasks", ctx.Err())
|
|
|
|
cancel() // kill workers
|
|
|
|
return nil, ctx.Err()
|
|
|
|
case s.workerTasks.in <- walkTask{
|
2023-03-16 12:38:40 +00:00
|
|
|
c: b.Cid(),
|
|
|
|
taskType: blockTask,
|
|
|
|
topLevelTaskType: blockTask,
|
2023-03-21 11:06:46 +00:00
|
|
|
blockCid: b.Cid(),
|
2023-03-16 14:31:58 +00:00
|
|
|
epoch: cfg.head.Height(),
|
2023-02-02 16:51:52 +00:00
|
|
|
}:
|
|
|
|
}
|
|
|
|
}
|
2022-08-20 00:41:03 +00:00
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
return s, nil
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *walkScheduler) Wait() error {
|
2023-02-02 16:51:52 +00:00
|
|
|
err := s.workers.Wait()
|
|
|
|
// all workers done. One would have reached genesis and notified the
|
|
|
|
// rest to exit. Yet, there might be some pending tasks in the queue,
|
|
|
|
// so we need to run a "single worker".
|
|
|
|
if err != nil {
|
|
|
|
log.Errorw("export workers finished with error", "error", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
if n := s.totalTasks.Load(); n == 0 {
|
|
|
|
break // finally fully done
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case task := <-s.workerTasks.out:
|
|
|
|
s.totalTasks.Add(-1)
|
|
|
|
if err != nil {
|
|
|
|
continue // just drain if errors happened.
|
|
|
|
}
|
|
|
|
err = s.processTask(task, 0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(s.results)
|
|
|
|
errWrite := <-s.writeErrorChan
|
|
|
|
if errWrite != nil {
|
|
|
|
log.Errorw("error writing to CAR file", "error", err)
|
|
|
|
return errWrite
|
|
|
|
}
|
|
|
|
s.workerTasks.Close() //nolint:errcheck
|
|
|
|
return err
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
func (s *walkScheduler) enqueueIfNew(task walkTask) {
|
2022-08-20 00:41:03 +00:00
|
|
|
if task.c.Prefix().MhType == mh.IDENTITY {
|
|
|
|
//log.Infow("ignored", "cid", todo.c.String())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if task.c.Prefix().Codec != cid.Raw && task.c.Prefix().Codec != cid.DagCBOR {
|
|
|
|
//log.Infow("ignored", "cid", todo.c.String())
|
|
|
|
return
|
|
|
|
}
|
2023-02-02 16:51:52 +00:00
|
|
|
if _, loaded := s.seen.LoadOrStore(task.c, struct{}{}); loaded {
|
|
|
|
// we already had it on the map
|
2022-08-20 00:41:03 +00:00
|
|
|
return
|
|
|
|
}
|
2023-02-02 16:51:52 +00:00
|
|
|
|
2022-08-20 00:41:03 +00:00
|
|
|
log.Debugw("enqueue", "type", task.taskType.String(), "cid", task.c.String())
|
2023-02-02 16:51:52 +00:00
|
|
|
s.totalTasks.Add(1)
|
|
|
|
s.workerTasks.in <- task
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
func (s *walkScheduler) sendFinish(workerN int) error {
|
|
|
|
log.Infow("worker finished work", "worker", workerN)
|
|
|
|
s.totalTasks.Add(1)
|
|
|
|
s.workerTasks.in <- walkTask{
|
|
|
|
taskType: finishTask,
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2022-08-20 00:41:03 +00:00
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
func (s *walkScheduler) workerFunc(workerN int) error {
|
|
|
|
log.Infow("starting worker", "worker", workerN)
|
|
|
|
for t := range s.workerTasks.out {
|
|
|
|
s.totalTasks.Add(-1)
|
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
return s.ctx.Err()
|
|
|
|
default:
|
|
|
|
// A worker reached genesis, so we wind down and let others do
|
|
|
|
// the same. Exit.
|
|
|
|
if t.taskType == finishTask {
|
|
|
|
return s.sendFinish(workerN)
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
err := s.processTask(t, workerN)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// continue
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
2023-02-02 16:51:52 +00:00
|
|
|
return nil
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
func (s *walkScheduler) processTask(t walkTask, workerN int) error {
|
|
|
|
if t.taskType == finishTask {
|
|
|
|
return nil
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
blk, err := s.store.Get(s.ctx, t.c)
|
2023-03-21 11:06:46 +00:00
|
|
|
if errors.Is(err, format.ErrNotFound{}) && t.topLevelTaskType == receiptTask {
|
2023-03-28 13:05:43 +00:00
|
|
|
log.Debugw("ignoring not-found block in Receipts",
|
2023-03-21 11:06:46 +00:00
|
|
|
"block", t.blockCid,
|
|
|
|
"epoch", t.epoch,
|
|
|
|
"cid", t.c)
|
|
|
|
return nil
|
|
|
|
}
|
2023-02-02 16:51:52 +00:00
|
|
|
if err != nil {
|
2023-03-21 11:06:46 +00:00
|
|
|
return xerrors.Errorf(
|
|
|
|
"blockstore.Get(%s). Task: %s. Block: %s (%s). Epoch: %d. Err: %w",
|
|
|
|
t.c, t.taskType, t.topLevelTaskType, t.blockCid, t.epoch, err)
|
2023-02-02 16:51:52 +00:00
|
|
|
}
|
2022-08-20 00:41:03 +00:00
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
s.results <- taskResult{
|
|
|
|
c: t.c,
|
|
|
|
b: blk,
|
|
|
|
}
|
2022-08-20 00:41:03 +00:00
|
|
|
|
|
|
|
// extract relevant dags to walk from the block
|
2023-02-02 16:51:52 +00:00
|
|
|
if t.taskType == blockTask {
|
2022-08-20 00:41:03 +00:00
|
|
|
var b types.BlockHeader
|
2023-03-16 12:38:40 +00:00
|
|
|
if err := b.UnmarshalCBOR(bytes.NewBuffer(blk.RawData())); err != nil {
|
2022-08-20 00:41:03 +00:00
|
|
|
return xerrors.Errorf("unmarshalling block header (cid=%s): %w", blk, err)
|
|
|
|
}
|
|
|
|
if b.Height%1_000 == 0 {
|
|
|
|
log.Infow("block export", "height", b.Height)
|
|
|
|
}
|
|
|
|
if b.Height == 0 {
|
|
|
|
log.Info("exporting genesis block")
|
|
|
|
for i := range b.Parents {
|
2023-02-02 16:51:52 +00:00
|
|
|
s.enqueueIfNew(walkTask{
|
2023-03-16 12:38:40 +00:00
|
|
|
c: b.Parents[i],
|
|
|
|
taskType: dagTask,
|
|
|
|
topLevelTaskType: blockTask,
|
2023-03-21 11:06:46 +00:00
|
|
|
blockCid: b.Parents[i],
|
2023-03-16 14:31:58 +00:00
|
|
|
epoch: 0,
|
2022-08-20 00:41:03 +00:00
|
|
|
})
|
|
|
|
}
|
2023-02-02 16:51:52 +00:00
|
|
|
s.enqueueIfNew(walkTask{
|
2023-03-16 12:38:40 +00:00
|
|
|
c: b.ParentStateRoot,
|
|
|
|
taskType: stateTask,
|
|
|
|
topLevelTaskType: stateTask,
|
2023-03-21 11:06:46 +00:00
|
|
|
blockCid: t.c,
|
2023-03-16 14:31:58 +00:00
|
|
|
epoch: 0,
|
2022-08-20 00:41:03 +00:00
|
|
|
})
|
2023-02-02 16:51:52 +00:00
|
|
|
|
|
|
|
return s.sendFinish(workerN)
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
// enqueue block parents
|
|
|
|
for i := range b.Parents {
|
2023-02-02 16:51:52 +00:00
|
|
|
s.enqueueIfNew(walkTask{
|
2023-03-16 12:38:40 +00:00
|
|
|
c: b.Parents[i],
|
|
|
|
taskType: blockTask,
|
|
|
|
topLevelTaskType: blockTask,
|
2023-03-21 11:06:46 +00:00
|
|
|
blockCid: b.Parents[i],
|
2023-03-16 14:31:58 +00:00
|
|
|
epoch: b.Height,
|
2022-08-20 00:41:03 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
if s.cfg.tail.Height() >= b.Height {
|
2023-03-16 12:38:40 +00:00
|
|
|
log.Debugw("tail reached: only blocks will be exported from now until genesis", "cid", t.c.String())
|
2022-08-20 00:41:03 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.cfg.includeMessages {
|
|
|
|
// enqueue block messages
|
2023-02-02 16:51:52 +00:00
|
|
|
s.enqueueIfNew(walkTask{
|
2023-03-16 12:38:40 +00:00
|
|
|
c: b.Messages,
|
|
|
|
taskType: messageTask,
|
|
|
|
topLevelTaskType: messageTask,
|
2023-03-21 11:06:46 +00:00
|
|
|
blockCid: t.c,
|
2023-03-16 14:31:58 +00:00
|
|
|
epoch: b.Height,
|
2022-08-20 00:41:03 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
if s.cfg.includeReceipts {
|
|
|
|
// enqueue block receipts
|
2023-02-02 16:51:52 +00:00
|
|
|
s.enqueueIfNew(walkTask{
|
2023-03-16 12:38:40 +00:00
|
|
|
c: b.ParentMessageReceipts,
|
|
|
|
taskType: receiptTask,
|
|
|
|
topLevelTaskType: receiptTask,
|
2023-03-21 11:06:46 +00:00
|
|
|
blockCid: t.c,
|
2023-03-16 14:31:58 +00:00
|
|
|
epoch: b.Height,
|
2022-08-20 00:41:03 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
if s.cfg.includeState {
|
2023-02-02 16:51:52 +00:00
|
|
|
s.enqueueIfNew(walkTask{
|
2023-03-16 12:38:40 +00:00
|
|
|
c: b.ParentStateRoot,
|
|
|
|
taskType: stateTask,
|
|
|
|
topLevelTaskType: stateTask,
|
2023-03-21 11:06:46 +00:00
|
|
|
blockCid: t.c,
|
2023-03-16 14:31:58 +00:00
|
|
|
epoch: b.Height,
|
2022-08-20 00:41:03 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2023-02-02 16:51:52 +00:00
|
|
|
|
|
|
|
// Not a chain-block: we scan for CIDs in the raw block-data
|
|
|
|
return cbg.ScanForLinks(bytes.NewReader(blk.RawData()), func(c cid.Cid) {
|
|
|
|
if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
|
2022-08-20 00:41:03 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
s.enqueueIfNew(walkTask{
|
2023-03-16 12:38:40 +00:00
|
|
|
c: c,
|
|
|
|
taskType: dagTask,
|
|
|
|
topLevelTaskType: t.topLevelTaskType,
|
2023-03-21 11:06:46 +00:00
|
|
|
blockCid: t.blockCid,
|
2023-03-16 14:31:58 +00:00
|
|
|
epoch: t.epoch,
|
2022-08-20 00:41:03 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
func (cs *ChainStore) ExportRange(
|
|
|
|
ctx context.Context,
|
|
|
|
w io.Writer,
|
|
|
|
head, tail *types.TipSet,
|
|
|
|
messages, receipts, stateroots bool,
|
2023-02-06 11:01:05 +00:00
|
|
|
workers int) error {
|
2023-02-02 16:51:52 +00:00
|
|
|
|
|
|
|
h := &car.CarHeader{
|
|
|
|
Roots: head.Cids(),
|
|
|
|
Version: 1,
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
2023-02-02 16:51:52 +00:00
|
|
|
if err := car.WriteHeader(h, w); err != nil {
|
|
|
|
return xerrors.Errorf("failed to write car header: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
log.Infow("walking snapshot range",
|
|
|
|
"head", head.Key(),
|
|
|
|
"tail", tail.Key(),
|
|
|
|
"messages", messages,
|
|
|
|
"receipts", receipts,
|
|
|
|
"stateroots",
|
|
|
|
stateroots,
|
|
|
|
"workers", workers)
|
|
|
|
|
|
|
|
cfg := walkSchedulerConfig{
|
2022-08-20 00:41:03 +00:00
|
|
|
numWorkers: workers,
|
2023-02-02 16:51:52 +00:00
|
|
|
head: head,
|
2022-08-20 00:41:03 +00:00
|
|
|
tail: tail,
|
|
|
|
includeMessages: messages,
|
|
|
|
includeState: stateroots,
|
|
|
|
includeReceipts: receipts,
|
|
|
|
}
|
|
|
|
|
2023-02-06 11:01:05 +00:00
|
|
|
pw, err := newWalkScheduler(ctx, cs.UnionStore(), cfg, w)
|
2023-02-02 16:51:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-08-20 00:41:03 +00:00
|
|
|
|
|
|
|
// wait until all workers are done.
|
2023-02-02 16:51:52 +00:00
|
|
|
err = pw.Wait()
|
2022-08-20 00:41:03 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorw("walker scheduler", "error", err)
|
2023-02-02 16:51:52 +00:00
|
|
|
return err
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Infow("walking snapshot range complete", "duration", time.Since(start), "success", err == nil)
|
2023-02-02 16:51:52 +00:00
|
|
|
return nil
|
2022-08-20 00:41:03 +00:00
|
|
|
}
|
|
|
|
|
2021-07-27 12:41:36 +00:00
|
|
|
func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs, skipMsgReceipts bool, cb func(cid.Cid) error) error {
|
|
|
|
if ts == nil {
|
|
|
|
ts = cs.GetHeaviestTipSet()
|
|
|
|
}
|
|
|
|
|
|
|
|
seen := cid.NewSet()
|
|
|
|
walked := cid.NewSet()
|
|
|
|
|
|
|
|
blocksToWalk := ts.Cids()
|
|
|
|
currentMinHeight := ts.Height()
|
|
|
|
|
|
|
|
walkChain := func(blk cid.Cid) error {
|
|
|
|
if !seen.Visit(blk) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := cb(blk); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
data, err := cs.chainBlockstore.Get(ctx, blk)
|
2021-07-27 12:41:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting block: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var b types.BlockHeader
|
|
|
|
if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil {
|
|
|
|
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if currentMinHeight > b.Height {
|
|
|
|
currentMinHeight = b.Height
|
|
|
|
if currentMinHeight%builtin.EpochsInDay == 0 {
|
|
|
|
log.Infow("export", "height", currentMinHeight)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var cids []cid.Cid
|
|
|
|
if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots {
|
|
|
|
if walked.Visit(b.Messages) {
|
2021-12-11 21:03:00 +00:00
|
|
|
mcids, err := recurseLinks(ctx, cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages})
|
2021-07-27 12:41:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("recursing messages failed: %w", err)
|
|
|
|
}
|
|
|
|
cids = mcids
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if b.Height > 0 {
|
|
|
|
for _, p := range b.Parents {
|
|
|
|
blocksToWalk = append(blocksToWalk, p)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// include the genesis block
|
|
|
|
cids = append(cids, b.Parents...)
|
|
|
|
}
|
|
|
|
|
|
|
|
out := cids
|
|
|
|
|
|
|
|
if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots {
|
|
|
|
if walked.Visit(b.ParentStateRoot) {
|
2021-12-11 21:03:00 +00:00
|
|
|
cids, err := recurseLinks(ctx, cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot})
|
2021-07-27 12:41:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("recursing genesis state failed: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
out = append(out, cids...)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !skipMsgReceipts && walked.Visit(b.ParentMessageReceipts) {
|
|
|
|
out = append(out, b.ParentMessageReceipts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range out {
|
|
|
|
if seen.Visit(c) {
|
2022-05-20 22:53:27 +00:00
|
|
|
prefix := c.Prefix()
|
|
|
|
|
|
|
|
// Don't include identity CIDs.
|
|
|
|
if prefix.MhType == mh.IDENTITY {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We only include raw and dagcbor, for now.
|
|
|
|
// Raw for "code" CIDs.
|
|
|
|
switch prefix.Codec {
|
|
|
|
case cid.Raw, cid.DagCBOR:
|
|
|
|
default:
|
2021-07-27 12:41:36 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := cb(c); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infow("export started")
|
|
|
|
exportStart := build.Clock.Now()
|
|
|
|
|
|
|
|
for len(blocksToWalk) > 0 {
|
|
|
|
next := blocksToWalk[0]
|
|
|
|
blocksToWalk = blocksToWalk[1:]
|
|
|
|
if err := walkChain(next); err != nil {
|
|
|
|
return xerrors.Errorf("walk chain failed: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infow("export finished", "duration", build.Clock.Now().Sub(exportStart).Seconds())
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
func recurseLinks(ctx context.Context, bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
|
2021-07-27 12:41:36 +00:00
|
|
|
if root.Prefix().Codec != cid.DagCBOR {
|
|
|
|
return in, nil
|
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
data, err := bs.Get(ctx, root)
|
2021-07-27 12:41:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var rerr error
|
|
|
|
err = cbg.ScanForLinks(bytes.NewReader(data.RawData()), func(c cid.Cid) {
|
|
|
|
if rerr != nil {
|
|
|
|
// No error return on ScanForLinks :(
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// traversed this already...
|
|
|
|
if !walked.Visit(c) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
in = append(in, c)
|
|
|
|
var err error
|
2021-12-11 21:03:00 +00:00
|
|
|
in, err = recurseLinks(ctx, bs, walked, c, in)
|
2021-07-27 12:41:36 +00:00
|
|
|
if err != nil {
|
|
|
|
rerr = err
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("scanning for links failed: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return in, rerr
|
|
|
|
}
|