2019-07-26 04:54:22 +00:00
|
|
|
package store
|
2019-07-05 14:29:17 +00:00
|
|
|
|
|
|
|
import (
|
2020-01-16 18:05:07 +00:00
|
|
|
"bytes"
|
2019-07-05 14:29:17 +00:00
|
|
|
"context"
|
2019-11-19 15:53:00 +00:00
|
|
|
"encoding/binary"
|
2019-07-17 03:05:55 +00:00
|
|
|
"encoding/json"
|
2020-09-08 06:16:34 +00:00
|
|
|
"errors"
|
2020-01-16 18:05:07 +00:00
|
|
|
"io"
|
2020-04-20 17:43:02 +00:00
|
|
|
"os"
|
2020-08-11 22:07:17 +00:00
|
|
|
"strconv"
|
2020-10-09 20:41:34 +00:00
|
|
|
"strings"
|
2019-07-05 14:29:17 +00:00
|
|
|
"sync"
|
2021-06-18 17:15:08 +00:00
|
|
|
"time"
|
2019-07-05 14:29:17 +00:00
|
|
|
|
2021-05-07 03:51:42 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/state"
|
|
|
|
|
2020-09-28 13:56:44 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2020-09-07 03:49:10 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/crypto"
|
2020-02-29 00:05:56 +00:00
|
|
|
"github.com/minio/blake2b-simd"
|
|
|
|
|
2019-12-19 20:13:17 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2020-09-07 03:49:10 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2020-09-28 21:25:58 +00:00
|
|
|
|
2020-10-08 01:09:33 +00:00
|
|
|
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
2020-02-08 02:18:32 +00:00
|
|
|
|
2020-04-23 22:15:00 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2021-01-29 20:01:00 +00:00
|
|
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
2020-09-25 20:08:28 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2020-09-28 21:25:58 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
2020-10-08 01:09:33 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/vm"
|
2020-06-22 22:38:36 +00:00
|
|
|
"github.com/filecoin-project/lotus/journal"
|
2020-03-02 00:26:09 +00:00
|
|
|
"github.com/filecoin-project/lotus/metrics"
|
2020-07-23 02:05:11 +00:00
|
|
|
|
2020-03-02 00:26:09 +00:00
|
|
|
"go.opencensus.io/stats"
|
2019-10-13 09:08:34 +00:00
|
|
|
"go.opencensus.io/trace"
|
2019-11-25 16:04:03 +00:00
|
|
|
"go.uber.org/multierr"
|
2019-09-30 23:55:35 +00:00
|
|
|
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2019-07-08 12:51:45 +00:00
|
|
|
|
2019-10-24 06:24:58 +00:00
|
|
|
lru "github.com/hashicorp/golang-lru"
|
2019-07-26 04:54:22 +00:00
|
|
|
block "github.com/ipfs/go-block-format"
|
2019-07-05 14:29:17 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2020-10-09 20:41:34 +00:00
|
|
|
"github.com/ipfs/go-datastore"
|
2019-07-05 14:29:17 +00:00
|
|
|
dstore "github.com/ipfs/go-datastore"
|
2020-10-09 20:41:34 +00:00
|
|
|
"github.com/ipfs/go-datastore/query"
|
2020-02-04 22:19:05 +00:00
|
|
|
cbor "github.com/ipfs/go-ipld-cbor"
|
2020-01-08 19:10:57 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2020-11-04 14:39:01 +00:00
|
|
|
"github.com/ipld/go-car"
|
2020-05-05 01:31:56 +00:00
|
|
|
carutil "github.com/ipld/go-car/util"
|
2019-09-17 01:56:37 +00:00
|
|
|
cbg "github.com/whyrusleeping/cbor-gen"
|
2020-11-04 14:39:01 +00:00
|
|
|
"github.com/whyrusleeping/pubsub"
|
2019-09-17 01:56:37 +00:00
|
|
|
"golang.org/x/xerrors"
|
2019-07-05 14:29:17 +00:00
|
|
|
)
|
|
|
|
|
2019-07-26 04:54:22 +00:00
|
|
|
var log = logging.Logger("chainstore")
|
2019-07-05 14:29:17 +00:00
|
|
|
|
2019-07-24 02:45:00 +00:00
|
|
|
var chainHeadKey = dstore.NewKey("head")
|
2020-06-14 09:49:20 +00:00
|
|
|
var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
|
2019-07-24 02:45:00 +00:00
|
|
|
|
2020-08-11 22:07:17 +00:00
|
|
|
var DefaultTipSetCacheSize = 8192
|
2020-09-02 18:43:05 +00:00
|
|
|
var DefaultMsgMetaCacheSize = 2048
|
2020-08-11 22:07:17 +00:00
|
|
|
|
2020-09-08 06:16:34 +00:00
|
|
|
var ErrNotifeeDone = errors.New("notifee is done and should be removed")
|
|
|
|
|
2020-08-11 22:07:17 +00:00
|
|
|
func init() {
|
|
|
|
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
|
|
|
|
tscs, err := strconv.Atoi(s)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to parse 'LOTUS_CHAIN_TIPSET_CACHE' env var: %s", err)
|
|
|
|
}
|
|
|
|
DefaultTipSetCacheSize = tscs
|
|
|
|
}
|
2020-09-02 18:43:05 +00:00
|
|
|
|
|
|
|
if s := os.Getenv("LOTUS_CHAIN_MSGMETA_CACHE"); s != "" {
|
|
|
|
mmcs, err := strconv.Atoi(s)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to parse 'LOTUS_CHAIN_MSGMETA_CACHE' env var: %s", err)
|
|
|
|
}
|
|
|
|
DefaultMsgMetaCacheSize = mmcs
|
|
|
|
}
|
2020-08-11 22:07:17 +00:00
|
|
|
}
|
|
|
|
|
2020-06-12 18:16:54 +00:00
|
|
|
// ReorgNotifee represents a callback that gets called upon reorgs.
|
2021-03-01 17:38:02 +00:00
|
|
|
type ReorgNotifee = func(rev, app []*types.TipSet) error
|
2020-06-12 18:16:54 +00:00
|
|
|
|
2020-07-17 17:54:26 +00:00
|
|
|
// Journal event types.
|
|
|
|
const (
|
|
|
|
evtTypeHeadChange = iota
|
|
|
|
)
|
|
|
|
|
2020-07-20 13:45:17 +00:00
|
|
|
type HeadChangeEvt struct {
|
|
|
|
From types.TipSetKey
|
|
|
|
FromHeight abi.ChainEpoch
|
|
|
|
To types.TipSetKey
|
|
|
|
ToHeight abi.ChainEpoch
|
|
|
|
RevertCount int
|
|
|
|
ApplyCount int
|
|
|
|
}
|
|
|
|
|
2020-06-23 21:51:25 +00:00
|
|
|
// ChainStore is the main point of access to chain data.
|
|
|
|
//
|
|
|
|
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
|
|
|
|
// latest head tipset references) being tracked in the Datastore (key-value
|
|
|
|
// store).
|
|
|
|
//
|
|
|
|
// To alleviate disk access, the ChainStore has two ARC caches:
|
|
|
|
// 1. a tipset cache
|
|
|
|
// 2. a block => messages references cache.
|
2019-07-05 14:29:17 +00:00
|
|
|
type ChainStore struct {
|
2021-02-28 22:48:36 +00:00
|
|
|
chainBlockstore bstore.Blockstore
|
|
|
|
stateBlockstore bstore.Blockstore
|
|
|
|
metadataDs dstore.Batching
|
2019-07-05 14:29:17 +00:00
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
chainLocalBlockstore bstore.Blockstore
|
2020-11-10 13:35:36 +00:00
|
|
|
|
2021-04-06 11:01:48 +00:00
|
|
|
heaviestLk sync.RWMutex
|
2019-07-26 04:54:22 +00:00
|
|
|
heaviest *types.TipSet
|
2019-07-05 14:29:17 +00:00
|
|
|
|
|
|
|
bestTips *pubsub.PubSub
|
2019-09-17 22:43:47 +00:00
|
|
|
pubLk sync.Mutex
|
2019-07-05 14:29:17 +00:00
|
|
|
|
2019-09-03 04:36:07 +00:00
|
|
|
tstLk sync.Mutex
|
2020-02-08 02:18:32 +00:00
|
|
|
tipsets map[abi.ChainEpoch][]cid.Cid
|
2019-09-03 04:36:07 +00:00
|
|
|
|
2020-06-04 00:14:36 +00:00
|
|
|
cindex *ChainIndex
|
|
|
|
|
2020-06-12 18:16:54 +00:00
|
|
|
reorgCh chan<- reorg
|
|
|
|
reorgNotifeeCh chan ReorgNotifee
|
2019-10-24 06:24:58 +00:00
|
|
|
|
|
|
|
mmCache *lru.ARCCache
|
2019-12-16 19:22:56 +00:00
|
|
|
tsCache *lru.ARCCache
|
2020-01-13 20:47:27 +00:00
|
|
|
|
2020-07-18 13:46:47 +00:00
|
|
|
vmcalls vm.SyscallBuilder
|
2020-07-17 17:54:26 +00:00
|
|
|
|
|
|
|
evtTypes [1]journal.EventType
|
2020-10-09 19:52:04 +00:00
|
|
|
journal journal.Journal
|
2020-11-16 22:22:08 +00:00
|
|
|
|
|
|
|
cancelFn context.CancelFunc
|
|
|
|
wg sync.WaitGroup
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore {
|
|
|
|
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
|
|
|
|
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
|
2020-10-09 19:52:04 +00:00
|
|
|
if j == nil {
|
|
|
|
j = journal.NilJournal()
|
|
|
|
}
|
2020-11-16 22:22:08 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2021-02-28 22:48:36 +00:00
|
|
|
// unwraps the fallback store in case one is configured.
|
|
|
|
// some methods _need_ to operate on a local blockstore only.
|
|
|
|
localbs, _ := bstore.UnwrapFallbackStore(chainBs)
|
2019-07-28 05:35:32 +00:00
|
|
|
cs := &ChainStore{
|
2021-02-28 22:48:36 +00:00
|
|
|
chainBlockstore: chainBs,
|
|
|
|
stateBlockstore: stateBs,
|
|
|
|
chainLocalBlockstore: localbs,
|
|
|
|
metadataDs: ds,
|
|
|
|
bestTips: pubsub.New(64),
|
|
|
|
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
|
|
|
|
mmCache: c,
|
|
|
|
tsCache: tsc,
|
|
|
|
vmcalls: vmcalls,
|
|
|
|
cancelFn: cancel,
|
|
|
|
journal: j,
|
2020-11-10 13:35:36 +00:00
|
|
|
}
|
|
|
|
|
2020-07-17 17:54:26 +00:00
|
|
|
cs.evtTypes = [1]journal.EventType{
|
2020-10-09 19:52:04 +00:00
|
|
|
evtTypeHeadChange: j.RegisterEventType("sync", "head_change"),
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
2019-07-28 05:35:32 +00:00
|
|
|
|
2020-06-04 00:14:36 +00:00
|
|
|
ci := NewChainIndex(cs.LoadTipSet)
|
|
|
|
|
|
|
|
cs.cindex = ci
|
|
|
|
|
2019-07-28 05:35:32 +00:00
|
|
|
hcnf := func(rev, app []*types.TipSet) error {
|
2019-09-17 22:43:47 +00:00
|
|
|
cs.pubLk.Lock()
|
|
|
|
defer cs.pubLk.Unlock()
|
2019-09-18 11:01:52 +00:00
|
|
|
|
2020-04-23 22:15:00 +00:00
|
|
|
notif := make([]*api.HeadChange, len(rev)+len(app))
|
2019-09-18 11:01:52 +00:00
|
|
|
|
|
|
|
for i, r := range rev {
|
2020-04-23 22:15:00 +00:00
|
|
|
notif[i] = &api.HeadChange{
|
2019-07-28 05:35:32 +00:00
|
|
|
Type: HCRevert,
|
|
|
|
Val: r,
|
2019-09-18 11:01:52 +00:00
|
|
|
}
|
2019-07-28 05:35:32 +00:00
|
|
|
}
|
2019-09-18 11:01:52 +00:00
|
|
|
for i, r := range app {
|
2020-04-23 22:15:00 +00:00
|
|
|
notif[i+len(rev)] = &api.HeadChange{
|
2019-07-28 05:35:32 +00:00
|
|
|
Type: HCApply,
|
|
|
|
Val: r,
|
2019-09-18 11:01:52 +00:00
|
|
|
}
|
2019-07-28 05:35:32 +00:00
|
|
|
}
|
2019-09-18 11:01:52 +00:00
|
|
|
|
|
|
|
cs.bestTips.Pub(notif, "headchange")
|
2019-07-28 05:35:32 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-02 00:26:09 +00:00
|
|
|
hcmetric := func(rev, app []*types.TipSet) error {
|
|
|
|
for _, r := range app {
|
2020-11-16 22:22:08 +00:00
|
|
|
stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height())))
|
2020-03-02 00:26:09 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-12 18:16:54 +00:00
|
|
|
cs.reorgNotifeeCh = make(chan ReorgNotifee)
|
2020-11-16 15:52:19 +00:00
|
|
|
cs.reorgCh = cs.reorgWorker(ctx, []ReorgNotifee{hcnf, hcmetric})
|
2019-07-28 05:35:32 +00:00
|
|
|
|
|
|
|
return cs
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2020-11-16 22:22:08 +00:00
|
|
|
func (cs *ChainStore) Close() error {
|
|
|
|
cs.cancelFn()
|
|
|
|
cs.wg.Wait()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-24 02:45:00 +00:00
|
|
|
func (cs *ChainStore) Load() error {
|
2021-02-28 22:48:36 +00:00
|
|
|
head, err := cs.metadataDs.Get(chainHeadKey)
|
2019-07-24 21:10:27 +00:00
|
|
|
if err == dstore.ErrNotFound {
|
|
|
|
log.Warn("no previous chain state found")
|
|
|
|
return nil
|
|
|
|
}
|
2019-07-24 02:45:00 +00:00
|
|
|
if err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return xerrors.Errorf("failed to load chain state from datastore: %w", err)
|
2019-07-24 02:45:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var tscids []cid.Cid
|
|
|
|
if err := json.Unmarshal(head, &tscids); err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return xerrors.Errorf("failed to unmarshal stored chain head: %w", err)
|
2019-07-24 02:45:00 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 19:22:56 +00:00
|
|
|
ts, err := cs.LoadTipSet(types.NewTipSetKey(tscids...))
|
2019-07-24 02:45:00 +00:00
|
|
|
if err != nil {
|
2019-09-30 23:55:35 +00:00
|
|
|
return xerrors.Errorf("loading tipset: %w", err)
|
2019-07-24 02:45:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cs.heaviest = ts
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-26 04:54:22 +00:00
|
|
|
func (cs *ChainStore) writeHead(ts *types.TipSet) error {
|
2019-07-24 02:45:00 +00:00
|
|
|
data, err := json.Marshal(ts.Cids())
|
|
|
|
if err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return xerrors.Errorf("failed to marshal tipset: %w", err)
|
2019-07-24 02:45:00 +00:00
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
if err := cs.metadataDs.Put(chainHeadKey, data); err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return xerrors.Errorf("failed to write chain head to datastore: %w", err)
|
2019-07-24 02:45:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-17 03:05:55 +00:00
|
|
|
const (
|
2019-09-17 22:43:47 +00:00
|
|
|
HCRevert = "revert"
|
|
|
|
HCApply = "apply"
|
|
|
|
HCCurrent = "current"
|
2019-07-17 03:05:55 +00:00
|
|
|
)
|
|
|
|
|
2020-04-23 22:15:00 +00:00
|
|
|
func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange {
|
2019-09-17 22:43:47 +00:00
|
|
|
cs.pubLk.Lock()
|
2019-07-17 03:05:55 +00:00
|
|
|
subch := cs.bestTips.Sub("headchange")
|
2019-09-17 22:43:47 +00:00
|
|
|
head := cs.GetHeaviestTipSet()
|
|
|
|
cs.pubLk.Unlock()
|
|
|
|
|
2020-04-23 22:15:00 +00:00
|
|
|
out := make(chan []*api.HeadChange, 16)
|
|
|
|
out <- []*api.HeadChange{{
|
2019-09-17 22:43:47 +00:00
|
|
|
Type: HCCurrent,
|
|
|
|
Val: head,
|
2019-09-18 11:01:52 +00:00
|
|
|
}}
|
2019-09-17 22:43:47 +00:00
|
|
|
|
2019-07-17 03:05:55 +00:00
|
|
|
go func() {
|
|
|
|
defer close(out)
|
2019-12-11 12:37:52 +00:00
|
|
|
var unsubOnce sync.Once
|
|
|
|
|
2019-07-28 19:19:33 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case val, ok := <-subch:
|
|
|
|
if !ok {
|
2019-08-31 01:03:10 +00:00
|
|
|
log.Warn("chain head sub exit loop")
|
2019-07-28 19:19:33 +00:00
|
|
|
return
|
|
|
|
}
|
2020-11-03 12:28:31 +00:00
|
|
|
if len(out) > 5 {
|
2019-09-18 11:01:52 +00:00
|
|
|
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
|
|
|
|
}
|
2019-08-31 01:03:10 +00:00
|
|
|
select {
|
2020-04-23 22:15:00 +00:00
|
|
|
case out <- val.([]*api.HeadChange):
|
2019-08-31 01:03:10 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
2019-07-28 19:19:33 +00:00
|
|
|
case <-ctx.Done():
|
2019-12-11 12:37:52 +00:00
|
|
|
unsubOnce.Do(func() {
|
|
|
|
go cs.bestTips.Unsub(subch)
|
|
|
|
})
|
2019-07-28 19:19:33 +00:00
|
|
|
}
|
2019-07-17 03:05:55 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
2020-06-12 18:16:54 +00:00
|
|
|
func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) {
|
|
|
|
cs.reorgNotifeeCh <- f
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2020-06-14 09:49:20 +00:00
|
|
|
func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) {
|
|
|
|
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
return cs.metadataDs.Has(key)
|
2020-06-14 09:49:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
|
|
|
|
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
if err := cs.metadataDs.Put(key, []byte{0}); err != nil {
|
2020-06-14 09:49:20 +00:00
|
|
|
return xerrors.Errorf("cache block validation: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-27 21:52:26 +00:00
|
|
|
func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
|
|
|
|
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
if err := cs.metadataDs.Delete(key); err != nil {
|
2020-09-27 21:52:26 +00:00
|
|
|
return xerrors.Errorf("removing from valid block cache: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-25 22:15:03 +00:00
|
|
|
func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error {
|
2019-10-06 03:32:56 +00:00
|
|
|
ts, err := types.NewTipSet([]*types.BlockHeader{b})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-10-15 04:33:29 +00:00
|
|
|
if err := cs.PutTipSet(context.TODO(), ts); err != nil {
|
2019-07-05 14:29:17 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
return cs.metadataDs.Put(dstore.NewKey("0"), b.Cid().Bytes())
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-10-15 04:33:29 +00:00
|
|
|
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
|
2019-10-06 03:32:56 +00:00
|
|
|
for _, b := range ts.Blocks() {
|
2019-11-12 10:18:46 +00:00
|
|
|
if err := cs.PersistBlockHeaders(b); err != nil {
|
2019-07-05 14:29:17 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-06 03:32:56 +00:00
|
|
|
expanded, err := cs.expandTipset(ts.Blocks()[0])
|
2019-09-08 20:14:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("errored while expanding tipset: %w", err)
|
|
|
|
}
|
2019-10-06 03:32:56 +00:00
|
|
|
log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids())
|
2019-09-08 20:14:01 +00:00
|
|
|
|
2019-10-15 04:33:29 +00:00
|
|
|
if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err)
|
2019-07-28 05:35:32 +00:00
|
|
|
}
|
2019-07-05 14:29:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-23 21:51:25 +00:00
|
|
|
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
|
|
|
|
// internal state as our new head, if and only if it is heavier than the current
|
2021-01-18 15:34:25 +00:00
|
|
|
// head and does not exceed the maximum fork length.
|
2019-10-15 04:33:29 +00:00
|
|
|
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
|
2021-06-18 17:15:08 +00:00
|
|
|
for {
|
|
|
|
cs.heaviestLk.Lock()
|
|
|
|
if len(cs.reorgCh) < reorgChBuf/2 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
cs.heaviestLk.Unlock()
|
|
|
|
log.Errorf("reorg channel is heavily backlogged, waiting a bit before trying to take process new tipsets")
|
|
|
|
select {
|
|
|
|
case <-time.After(time.Second / 2):
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-05 14:29:17 +00:00
|
|
|
defer cs.heaviestLk.Unlock()
|
2019-10-15 04:33:29 +00:00
|
|
|
w, err := cs.Weight(ctx, ts)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
heaviestW, err := cs.Weight(ctx, cs.heaviest)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if w.GreaterThan(heaviestW) {
|
2019-07-31 07:13:49 +00:00
|
|
|
// TODO: don't do this for initial sync. Now that we don't have a
|
|
|
|
// difference between 'bootstrap sync' and 'caught up' sync, we need
|
|
|
|
// some other heuristic.
|
2021-01-18 15:34:25 +00:00
|
|
|
|
|
|
|
exceeds, err := cs.exceedsForkLength(cs.heaviest, ts)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if exceeds {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-09 20:14:40 +00:00
|
|
|
return cs.takeHeaviestTipSet(ctx, ts)
|
2020-10-28 15:53:13 +00:00
|
|
|
} else if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
|
|
|
|
log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
|
2019-10-10 03:50:50 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-18 15:34:25 +00:00
|
|
|
// Check if the two tipsets have a fork length above `ForkLengthThreshold`.
|
|
|
|
// `synced` is the head of the chain we are currently synced to and `external`
|
|
|
|
// is the incoming tipset potentially belonging to a forked chain. It assumes
|
|
|
|
// the external chain has already been validated and available in the ChainStore.
|
|
|
|
// The "fast forward" case is covered in this logic as a valid fork of length 0.
|
|
|
|
//
|
|
|
|
// FIXME: We may want to replace some of the logic in `syncFork()` with this.
|
|
|
|
// `syncFork()` counts the length on both sides of the fork at the moment (we
|
|
|
|
// need to settle on that) but here we just enforce it on the `synced` side.
|
|
|
|
func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, error) {
|
|
|
|
if synced == nil || external == nil {
|
|
|
|
// FIXME: If `cs.heaviest` is nil we should just bypass the entire
|
|
|
|
// `MaybeTakeHeavierTipSet` logic (instead of each of the called
|
|
|
|
// functions having to handle the nil case on their own).
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
// `forkLength`: number of tipsets we need to walk back from the our `synced`
|
|
|
|
// chain to the common ancestor with the new `external` head in order to
|
|
|
|
// adopt the fork.
|
|
|
|
for forkLength := 0; forkLength < int(build.ForkLengthThreshold); forkLength++ {
|
|
|
|
// First walk back as many tipsets in the external chain to match the
|
|
|
|
// `synced` height to compare them. If we go past the `synced` height
|
|
|
|
// the subsequent match will fail but it will still be useful to get
|
|
|
|
// closer to the `synced` head parent's height in the next loop.
|
|
|
|
for external.Height() > synced.Height() {
|
|
|
|
if external.Height() == 0 {
|
|
|
|
// We reached the genesis of the external chain without a match;
|
|
|
|
// this is considered a fork outside the allowed limit (of "infinite"
|
|
|
|
// length).
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
external, err = cs.LoadTipSet(external.Parents())
|
|
|
|
if err != nil {
|
|
|
|
return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now check if we arrived at the common ancestor.
|
|
|
|
if synced.Equals(external) {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we didn't, go back *one* tipset on the `synced` side (incrementing
|
|
|
|
// the `forkLength`).
|
|
|
|
if synced.Height() == 0 {
|
|
|
|
// Same check as the `external` side, if we reach the start (genesis)
|
|
|
|
// there is no common ancestor.
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
synced, err = cs.LoadTipSet(synced.Parents())
|
|
|
|
if err != nil {
|
|
|
|
return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We traversed the fork length allowed without finding a common ancestor.
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2020-11-04 14:39:01 +00:00
|
|
|
// ForceHeadSilent forces a chain head tipset without triggering a reorg
|
|
|
|
// operation.
|
|
|
|
//
|
|
|
|
// CAUTION: Use it only for testing, such as to teleport the chain to a
|
|
|
|
// particular tipset to carry out a benchmark, verification, etc. on a chain
|
|
|
|
// segment.
|
|
|
|
func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error {
|
2020-11-16 16:03:47 +00:00
|
|
|
log.Warnf("(!!!) forcing a new head silently; new head: %s", ts)
|
2020-11-04 14:39:01 +00:00
|
|
|
|
|
|
|
cs.heaviestLk.Lock()
|
|
|
|
defer cs.heaviestLk.Unlock()
|
|
|
|
cs.heaviest = ts
|
|
|
|
|
|
|
|
err := cs.writeHead(ts)
|
|
|
|
if err != nil {
|
|
|
|
err = xerrors.Errorf("failed to write chain head: %s", err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-10 23:50:49 +00:00
|
|
|
type reorg struct {
|
|
|
|
old *types.TipSet
|
|
|
|
new *types.TipSet
|
|
|
|
}
|
|
|
|
|
2021-06-18 17:15:08 +00:00
|
|
|
const reorgChBuf = 32
|
|
|
|
|
2020-06-12 18:16:54 +00:00
|
|
|
func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg {
|
2021-06-18 17:15:08 +00:00
|
|
|
out := make(chan reorg, reorgChBuf)
|
2020-06-12 18:16:54 +00:00
|
|
|
notifees := make([]ReorgNotifee, len(initialNotifees))
|
|
|
|
copy(notifees, initialNotifees)
|
|
|
|
|
2020-11-16 22:22:08 +00:00
|
|
|
cs.wg.Add(1)
|
2019-10-10 23:50:49 +00:00
|
|
|
go func() {
|
2020-11-16 22:22:08 +00:00
|
|
|
defer cs.wg.Done()
|
2019-10-10 23:50:49 +00:00
|
|
|
defer log.Warn("reorgWorker quit")
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2020-06-12 18:16:54 +00:00
|
|
|
case n := <-cs.reorgNotifeeCh:
|
|
|
|
notifees = append(notifees, n)
|
|
|
|
|
2019-10-10 23:50:49 +00:00
|
|
|
case r := <-out:
|
|
|
|
revert, apply, err := cs.ReorgOps(r.old, r.new)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("computing reorg ops failed: ", err)
|
|
|
|
continue
|
|
|
|
}
|
2019-11-18 18:18:59 +00:00
|
|
|
|
2020-10-09 19:52:04 +00:00
|
|
|
cs.journal.RecordEvent(cs.evtTypes[evtTypeHeadChange], func() interface{} {
|
2020-07-20 13:45:17 +00:00
|
|
|
return HeadChangeEvt{
|
2020-07-17 17:54:26 +00:00
|
|
|
From: r.old.Key(),
|
|
|
|
FromHeight: r.old.Height(),
|
|
|
|
To: r.new.Key(),
|
|
|
|
ToHeight: r.new.Height(),
|
|
|
|
RevertCount: len(revert),
|
|
|
|
ApplyCount: len(apply),
|
|
|
|
}
|
2020-06-22 22:38:36 +00:00
|
|
|
})
|
|
|
|
|
2019-11-18 18:18:59 +00:00
|
|
|
// reverse the apply array
|
|
|
|
for i := len(apply)/2 - 1; i >= 0; i-- {
|
|
|
|
opp := len(apply) - 1 - i
|
|
|
|
apply[i], apply[opp] = apply[opp], apply[i]
|
|
|
|
}
|
|
|
|
|
2020-09-08 06:16:34 +00:00
|
|
|
var toremove map[int]struct{}
|
|
|
|
for i, hcf := range notifees {
|
|
|
|
err := hcf(revert, apply)
|
|
|
|
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
|
|
|
|
case ErrNotifeeDone:
|
|
|
|
if toremove == nil {
|
|
|
|
toremove = make(map[int]struct{})
|
|
|
|
}
|
|
|
|
toremove[i] = struct{}{}
|
|
|
|
|
|
|
|
default:
|
2019-10-10 23:50:49 +00:00
|
|
|
log.Error("head change func errored (BAD): ", err)
|
|
|
|
}
|
|
|
|
}
|
2020-09-08 06:16:34 +00:00
|
|
|
|
|
|
|
if len(toremove) > 0 {
|
|
|
|
newNotifees := make([]ReorgNotifee, 0, len(notifees)-len(toremove))
|
|
|
|
for i, hcf := range notifees {
|
|
|
|
_, remove := toremove[i]
|
|
|
|
if remove {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
newNotifees = append(newNotifees, hcf)
|
|
|
|
}
|
|
|
|
notifees = newNotifees
|
|
|
|
}
|
|
|
|
|
2019-10-10 23:50:49 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
2020-06-23 21:51:25 +00:00
|
|
|
// takeHeaviestTipSet actually sets the incoming tipset as our head both in
|
|
|
|
// memory and in the ChainStore. It also sends a notification to deliver to
|
|
|
|
// ReorgNotifees.
|
2019-11-09 20:14:40 +00:00
|
|
|
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
|
2019-12-05 05:14:19 +00:00
|
|
|
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
|
2019-11-09 20:14:40 +00:00
|
|
|
defer span.End()
|
|
|
|
|
2019-10-10 23:50:49 +00:00
|
|
|
if cs.heaviest != nil { // buf
|
|
|
|
if len(cs.reorgCh) > 0 {
|
|
|
|
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
|
2019-10-10 03:50:50 +00:00
|
|
|
}
|
2019-10-10 23:50:49 +00:00
|
|
|
cs.reorgCh <- reorg{
|
|
|
|
old: cs.heaviest,
|
|
|
|
new: ts,
|
2019-07-17 03:05:55 +00:00
|
|
|
}
|
2019-10-10 03:50:50 +00:00
|
|
|
} else {
|
2019-10-16 08:01:41 +00:00
|
|
|
log.Warnf("no heaviest tipset found, using %s", ts.Cids())
|
2019-10-10 03:50:50 +00:00
|
|
|
}
|
2019-08-14 03:31:58 +00:00
|
|
|
|
2019-11-09 20:14:40 +00:00
|
|
|
span.AddAttributes(trace.BoolAttribute("newHead", true))
|
|
|
|
|
2019-12-03 23:04:52 +00:00
|
|
|
log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height())
|
2019-10-10 03:50:50 +00:00
|
|
|
cs.heaviest = ts
|
2019-07-24 02:45:00 +00:00
|
|
|
|
2019-10-10 03:50:50 +00:00
|
|
|
if err := cs.writeHead(ts); err != nil {
|
|
|
|
log.Errorf("failed to write chain head: %s", err)
|
|
|
|
return nil
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
2019-10-10 03:50:50 +00:00
|
|
|
|
2019-07-05 14:29:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-09 20:41:34 +00:00
|
|
|
// FlushValidationCache removes all results of block validation from the
|
|
|
|
// chain metadata store. Usually the first step after a new chain import.
|
|
|
|
func (cs *ChainStore) FlushValidationCache() error {
|
2021-02-28 22:48:36 +00:00
|
|
|
return FlushValidationCache(cs.metadataDs)
|
2021-01-15 19:57:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func FlushValidationCache(ds datastore.Batching) error {
|
2020-10-09 20:41:34 +00:00
|
|
|
log.Infof("clearing block validation cache...")
|
|
|
|
|
2021-01-15 19:57:32 +00:00
|
|
|
dsWalk, err := ds.Query(query.Query{
|
2020-10-10 13:36:32 +00:00
|
|
|
// Potential TODO: the validation cache is not a namespace on its own
|
|
|
|
// but is rather constructed as prefixed-key `foo:bar` via .Instance(), which
|
|
|
|
// in turn does not work with the filter, which can match only on `foo/bar`
|
|
|
|
//
|
|
|
|
// If this is addressed (blockcache goes into its own sub-namespace) then
|
|
|
|
// strings.HasPrefix(...) below can be skipped
|
|
|
|
//
|
|
|
|
//Prefix: blockValidationCacheKeyPrefix.String()
|
|
|
|
KeysOnly: true,
|
|
|
|
})
|
2020-10-09 20:41:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("failed to initialize key listing query: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
allKeys, err := dsWalk.Rest()
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("failed to run key listing query: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-01-15 19:57:32 +00:00
|
|
|
batch, err := ds.Batch()
|
2020-10-09 20:41:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("failed to open a DS batch: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
delCnt := 0
|
|
|
|
for _, k := range allKeys {
|
|
|
|
if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) {
|
|
|
|
delCnt++
|
2020-10-10 02:00:12 +00:00
|
|
|
batch.Delete(datastore.RawKey(k.Key)) // nolint:errcheck
|
2020-10-09 20:41:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := batch.Commit(); err != nil {
|
|
|
|
return xerrors.Errorf("failed to commit the DS batch: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("%d block validation entries cleared.", delCnt)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-10 03:50:50 +00:00
|
|
|
// SetHead sets the chainstores current 'best' head node.
|
|
|
|
// This should only be called if something is broken and needs fixing
|
|
|
|
func (cs *ChainStore) SetHead(ts *types.TipSet) error {
|
|
|
|
cs.heaviestLk.Lock()
|
|
|
|
defer cs.heaviestLk.Unlock()
|
2019-11-09 20:14:40 +00:00
|
|
|
return cs.takeHeaviestTipSet(context.TODO(), ts)
|
2019-10-10 03:50:50 +00:00
|
|
|
}
|
|
|
|
|
2020-06-23 21:51:25 +00:00
|
|
|
// Contains returns whether our BlockStore has all blocks in the supplied TipSet.
|
2019-07-26 04:54:22 +00:00
|
|
|
func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
|
|
|
|
for _, c := range ts.Cids() {
|
2021-02-28 22:48:36 +00:00
|
|
|
has, err := cs.chainBlockstore.Has(c)
|
2019-07-05 14:29:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !has {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2020-06-23 21:51:25 +00:00
|
|
|
// GetBlock fetches a BlockHeader with the supplied CID. It returns
|
|
|
|
// blockstore.ErrNotFound if the block was not found in the BlockStore.
|
2019-07-25 22:15:03 +00:00
|
|
|
func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) {
|
2020-11-10 13:35:36 +00:00
|
|
|
var blk *types.BlockHeader
|
2021-02-28 22:48:36 +00:00
|
|
|
err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) {
|
2020-11-10 13:35:36 +00:00
|
|
|
blk, err = types.DecodeBlock(b)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
return blk, err
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 19:22:56 +00:00
|
|
|
func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
|
|
|
|
v, ok := cs.tsCache.Get(tsk)
|
|
|
|
if ok {
|
|
|
|
return v.(*types.TipSet), nil
|
|
|
|
}
|
|
|
|
|
2020-09-28 13:56:44 +00:00
|
|
|
// Fetch tipset block headers from blockstore in parallel
|
|
|
|
var eg errgroup.Group
|
|
|
|
cids := tsk.Cids()
|
2020-09-28 14:35:37 +00:00
|
|
|
blks := make([]*types.BlockHeader, len(cids))
|
|
|
|
for i, c := range cids {
|
|
|
|
i, c := i, c
|
2020-09-28 13:56:44 +00:00
|
|
|
eg.Go(func() error {
|
|
|
|
b, err := cs.GetBlock(c)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("get block %s: %w", c, err)
|
|
|
|
}
|
2019-07-05 14:29:17 +00:00
|
|
|
|
2020-09-28 14:35:37 +00:00
|
|
|
blks[i] = b
|
2020-09-28 13:56:44 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
err := eg.Wait()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 19:22:56 +00:00
|
|
|
ts, err := types.NewTipSet(blks)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cs.tsCache.Add(tsk, ts)
|
|
|
|
|
|
|
|
return ts, nil
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2020-06-02 14:29:39 +00:00
|
|
|
// IsAncestorOf returns true if 'a' is an ancestor of 'b'
|
2019-07-26 04:54:22 +00:00
|
|
|
func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) {
|
2019-07-05 14:29:17 +00:00
|
|
|
if b.Height() <= a.Height() {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
cur := b
|
|
|
|
for !a.Equals(cur) && cur.Height() > a.Height() {
|
2020-09-09 23:09:55 +00:00
|
|
|
next, err := cs.LoadTipSet(cur.Parents())
|
2019-07-05 14:29:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cur = next
|
|
|
|
}
|
|
|
|
|
|
|
|
return cur.Equals(a), nil
|
|
|
|
}
|
|
|
|
|
2019-07-26 04:54:22 +00:00
|
|
|
func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, error) {
|
2019-07-05 14:29:17 +00:00
|
|
|
l, _, err := cs.ReorgOps(a, b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return cs.LoadTipSet(l[len(l)-1].Parents())
|
|
|
|
}
|
|
|
|
|
2019-07-26 04:54:22 +00:00
|
|
|
func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
|
2020-08-25 00:21:03 +00:00
|
|
|
return ReorgOps(cs.LoadTipSet, a, b)
|
|
|
|
}
|
|
|
|
|
|
|
|
func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
|
2019-07-05 14:29:17 +00:00
|
|
|
left := a
|
|
|
|
right := b
|
|
|
|
|
2019-07-26 04:54:22 +00:00
|
|
|
var leftChain, rightChain []*types.TipSet
|
2019-07-05 14:29:17 +00:00
|
|
|
for !left.Equals(right) {
|
|
|
|
if left.Height() > right.Height() {
|
|
|
|
leftChain = append(leftChain, left)
|
2020-08-25 00:21:03 +00:00
|
|
|
par, err := lts(left.Parents())
|
2019-07-05 14:29:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
left = par
|
|
|
|
} else {
|
|
|
|
rightChain = append(rightChain, right)
|
2020-08-25 00:21:03 +00:00
|
|
|
par, err := lts(right.Parents())
|
2019-07-05 14:29:17 +00:00
|
|
|
if err != nil {
|
2019-07-31 07:13:49 +00:00
|
|
|
log.Infof("failed to fetch right.Parents: %s", err)
|
2019-07-05 14:29:17 +00:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
right = par
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return leftChain, rightChain, nil
|
2020-08-25 00:21:03 +00:00
|
|
|
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2020-06-23 21:51:25 +00:00
|
|
|
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
|
2021-04-06 11:01:48 +00:00
|
|
|
func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) {
|
|
|
|
cs.heaviestLk.RLock()
|
|
|
|
ts = cs.heaviest
|
|
|
|
cs.heaviestLk.RUnlock()
|
|
|
|
return
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-10-10 03:04:10 +00:00
|
|
|
func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error {
|
2019-09-03 04:36:07 +00:00
|
|
|
cs.tstLk.Lock()
|
|
|
|
defer cs.tstLk.Unlock()
|
|
|
|
|
|
|
|
tss := cs.tipsets[b.Height]
|
|
|
|
for _, oc := range tss {
|
|
|
|
if oc == b.Cid() {
|
2019-09-25 13:38:59 +00:00
|
|
|
log.Debug("tried to add block to tipset tracker that was already there")
|
2019-09-03 04:36:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-11-09 02:57:36 +00:00
|
|
|
h, err := cs.GetBlock(oc)
|
|
|
|
if err == nil && h != nil {
|
|
|
|
if h.Miner == b.Miner {
|
|
|
|
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid())
|
|
|
|
}
|
|
|
|
}
|
2019-09-03 04:36:07 +00:00
|
|
|
}
|
2020-11-26 22:36:38 +00:00
|
|
|
// This function is called 5 times per epoch on average
|
|
|
|
// It is also called with tipsets that are done with initial validation
|
|
|
|
// so they cannot be from the future.
|
|
|
|
// We are guaranteed not to use tipsets older than 900 epochs (fork limit)
|
|
|
|
// This means that we ideally want to keep only most recent 900 epochs in here
|
|
|
|
// Golang's map iteration starts at a random point in a map.
|
|
|
|
// With 5 tries per epoch, and 900 entries to keep, on average we will have
|
|
|
|
// ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
|
|
|
|
// Seems good enough to me
|
|
|
|
|
|
|
|
for height := range cs.tipsets {
|
|
|
|
if height < b.Height-build.Finality {
|
|
|
|
delete(cs.tipsets, height)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2019-09-03 04:36:07 +00:00
|
|
|
|
|
|
|
cs.tipsets[b.Height] = append(tss, b.Cid())
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-25 16:04:03 +00:00
|
|
|
func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error {
|
2019-11-12 10:18:46 +00:00
|
|
|
sbs := make([]block.Block, len(b))
|
|
|
|
|
|
|
|
for i, header := range b {
|
2019-11-25 16:04:03 +00:00
|
|
|
var err error
|
2019-11-12 10:18:46 +00:00
|
|
|
sbs[i], err = header.ToStorageBlock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-11-25 16:04:03 +00:00
|
|
|
batchSize := 256
|
|
|
|
calls := len(b) / batchSize
|
|
|
|
|
|
|
|
var err error
|
|
|
|
for i := 0; i <= calls; i++ {
|
|
|
|
start := batchSize * i
|
|
|
|
end := start + batchSize
|
|
|
|
if end > len(b) {
|
|
|
|
end = len(b)
|
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(sbs[start:end]))
|
2019-11-25 16:04:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-07-26 04:54:22 +00:00
|
|
|
type storable interface {
|
|
|
|
ToStorageBlock() (block.Block, error)
|
|
|
|
}
|
|
|
|
|
2019-11-19 03:24:48 +00:00
|
|
|
func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) {
|
2019-07-31 07:13:49 +00:00
|
|
|
b, err := m.ToStorageBlock()
|
2019-07-05 14:29:17 +00:00
|
|
|
if err != nil {
|
2019-07-26 04:54:22 +00:00
|
|
|
return cid.Undef, err
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-07-31 07:13:49 +00:00
|
|
|
if err := bs.Put(b); err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return b.Cid(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) {
|
2021-02-28 22:48:36 +00:00
|
|
|
return PutMessage(cs.chainBlockstore, m)
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 04:36:07 +00:00
|
|
|
func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) {
|
|
|
|
// Hold lock for the whole function for now, if it becomes a problem we can
|
|
|
|
// fix pretty easily
|
|
|
|
cs.tstLk.Lock()
|
|
|
|
defer cs.tstLk.Unlock()
|
|
|
|
|
|
|
|
all := []*types.BlockHeader{b}
|
|
|
|
|
|
|
|
tsets, ok := cs.tipsets[b.Height]
|
|
|
|
if !ok {
|
|
|
|
return types.NewTipSet(all)
|
|
|
|
}
|
|
|
|
|
2020-11-09 02:57:36 +00:00
|
|
|
inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
|
2019-09-03 04:36:07 +00:00
|
|
|
for _, bhc := range tsets {
|
|
|
|
if bhc == b.Cid() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
h, err := cs.GetBlock(bhc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
|
|
|
|
}
|
|
|
|
|
2020-11-09 02:57:36 +00:00
|
|
|
if cid, found := inclMiners[h.Miner]; found {
|
|
|
|
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
|
2019-12-10 11:07:07 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-09-03 04:36:07 +00:00
|
|
|
if types.CidArrsEqual(h.Parents, b.Parents) {
|
|
|
|
all = append(all, h)
|
2020-11-09 02:57:36 +00:00
|
|
|
inclMiners[h.Miner] = bhc
|
2019-09-03 04:36:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: other validation...?
|
|
|
|
|
|
|
|
return types.NewTipSet(all)
|
|
|
|
}
|
|
|
|
|
2019-10-15 04:33:29 +00:00
|
|
|
func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error {
|
2019-11-12 10:18:46 +00:00
|
|
|
if err := cs.PersistBlockHeaders(b); err != nil {
|
2019-07-05 14:29:17 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-09-03 04:36:07 +00:00
|
|
|
ts, err := cs.expandTipset(b)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-15 04:33:29 +00:00
|
|
|
if err := cs.MaybeTakeHeavierTipSet(ctx, ts); err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return xerrors.Errorf("MaybeTakeHeavierTipSet failed: %w", err)
|
2019-07-28 05:35:32 +00:00
|
|
|
}
|
2019-07-05 14:29:17 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-25 22:15:03 +00:00
|
|
|
func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) {
|
2021-02-28 22:48:36 +00:00
|
|
|
data, err := cs.metadataDs.Get(dstore.NewKey("0"))
|
2019-07-05 14:29:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
c, err := cid.Cast(data)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-11-10 15:36:03 +00:00
|
|
|
return cs.GetBlock(c)
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 19:13:09 +00:00
|
|
|
func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) {
|
2019-10-08 05:51:34 +00:00
|
|
|
m, err := cs.GetMessage(c)
|
|
|
|
if err == nil {
|
|
|
|
return m, nil
|
|
|
|
}
|
2019-11-24 16:35:50 +00:00
|
|
|
if err != bstore.ErrNotFound {
|
2020-07-21 00:17:53 +00:00
|
|
|
log.Warnf("GetCMessage: unexpected error getting unsigned message: %s", err)
|
2019-11-24 16:35:50 +00:00
|
|
|
}
|
2019-10-08 05:51:34 +00:00
|
|
|
|
|
|
|
return cs.GetSignedMessage(c)
|
|
|
|
}
|
|
|
|
|
2019-08-01 20:40:47 +00:00
|
|
|
func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) {
|
2020-11-10 13:35:36 +00:00
|
|
|
var msg *types.Message
|
2021-02-28 22:48:36 +00:00
|
|
|
err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) {
|
2020-11-10 13:35:36 +00:00
|
|
|
msg, err = types.DecodeMessage(b)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
return msg, err
|
2019-08-01 20:40:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) {
|
2020-11-10 13:35:36 +00:00
|
|
|
var msg *types.SignedMessage
|
2021-02-28 22:48:36 +00:00
|
|
|
err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) {
|
2020-11-10 13:35:36 +00:00
|
|
|
msg, err = types.DecodeSignedMessage(b)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
return msg, err
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-09-17 01:56:37 +00:00
|
|
|
func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) {
|
2020-02-05 02:26:42 +00:00
|
|
|
ctx := context.TODO()
|
2020-09-28 21:25:58 +00:00
|
|
|
// block headers use adt0, for now.
|
2021-02-28 22:48:36 +00:00
|
|
|
a, err := blockadt.AsArray(cs.ActorStore(ctx), root)
|
2019-07-05 14:29:17 +00:00
|
|
|
if err != nil {
|
2019-09-17 01:56:37 +00:00
|
|
|
return nil, xerrors.Errorf("amt load: %w", err)
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2020-07-23 00:14:54 +00:00
|
|
|
var (
|
|
|
|
cids []cid.Cid
|
|
|
|
cborCid cbg.CborCid
|
|
|
|
)
|
|
|
|
if err := a.ForEach(&cborCid, func(i int64) error {
|
|
|
|
c := cid.Cid(cborCid)
|
|
|
|
cids = append(cids, c)
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to traverse amt: %w", err)
|
|
|
|
}
|
2019-07-05 14:29:17 +00:00
|
|
|
|
2020-07-23 00:14:54 +00:00
|
|
|
if uint64(len(cids)) != a.Length() {
|
|
|
|
return nil, xerrors.Errorf("found %d cids, expected %d", len(cids), a.Length())
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-07-17 03:05:55 +00:00
|
|
|
return cids, nil
|
|
|
|
}
|
|
|
|
|
2020-08-09 01:37:49 +00:00
|
|
|
type BlockMessages struct {
|
|
|
|
Miner address.Address
|
|
|
|
BlsMessages []types.ChainMsg
|
|
|
|
SecpkMessages []types.ChainMsg
|
|
|
|
WinCount int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
|
2019-09-29 06:29:03 +00:00
|
|
|
applied := make(map[address.Address]uint64)
|
|
|
|
|
2021-05-07 03:51:42 +00:00
|
|
|
cst := cbor.NewCborStore(cs.stateBlockstore)
|
|
|
|
st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to load state tree")
|
|
|
|
}
|
|
|
|
|
2020-08-09 01:37:49 +00:00
|
|
|
selectMsg := func(m *types.Message) (bool, error) {
|
2021-05-07 03:51:42 +00:00
|
|
|
var sender address.Address
|
|
|
|
if ts.Height() >= build.UpgradeHyperdriveHeight {
|
|
|
|
sender, err = st.LookupID(m.From)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
sender = m.From
|
|
|
|
}
|
|
|
|
|
2020-09-09 09:13:51 +00:00
|
|
|
// The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise
|
2021-05-07 03:51:42 +00:00
|
|
|
if _, ok := applied[sender]; !ok {
|
|
|
|
applied[sender] = m.Nonce
|
2020-08-09 01:37:49 +00:00
|
|
|
}
|
|
|
|
|
2021-05-07 03:51:42 +00:00
|
|
|
if applied[sender] != m.Nonce {
|
2020-08-09 01:37:49 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
2020-09-09 09:13:51 +00:00
|
|
|
|
2021-05-07 03:51:42 +00:00
|
|
|
applied[sender]++
|
2020-08-09 01:37:49 +00:00
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out []BlockMessages
|
2019-09-29 06:29:03 +00:00
|
|
|
for _, b := range ts.Blocks() {
|
2020-08-09 01:37:49 +00:00
|
|
|
|
2019-09-29 06:29:03 +00:00
|
|
|
bms, sms, err := cs.MessagesForBlock(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to get messages for block: %w", err)
|
|
|
|
}
|
|
|
|
|
2020-08-09 01:37:49 +00:00
|
|
|
bm := BlockMessages{
|
|
|
|
Miner: b.Miner,
|
|
|
|
BlsMessages: make([]types.ChainMsg, 0, len(bms)),
|
|
|
|
SecpkMessages: make([]types.ChainMsg, 0, len(sms)),
|
|
|
|
WinCount: b.ElectionProof.WinCount,
|
2019-09-29 06:29:03 +00:00
|
|
|
}
|
|
|
|
|
2020-08-09 01:37:49 +00:00
|
|
|
for _, bmsg := range bms {
|
|
|
|
b, err := selectMsg(bmsg.VMMessage())
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err)
|
2019-09-29 06:29:03 +00:00
|
|
|
}
|
|
|
|
|
2020-08-09 01:37:49 +00:00
|
|
|
if b {
|
|
|
|
bm.BlsMessages = append(bm.BlsMessages, bmsg)
|
2019-09-29 06:29:03 +00:00
|
|
|
}
|
2020-08-09 01:37:49 +00:00
|
|
|
}
|
2019-09-29 06:29:03 +00:00
|
|
|
|
2020-08-09 01:37:49 +00:00
|
|
|
for _, smsg := range sms {
|
|
|
|
b, err := selectMsg(smsg.VMMessage())
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err)
|
2019-09-29 06:29:03 +00:00
|
|
|
}
|
|
|
|
|
2020-08-09 01:37:49 +00:00
|
|
|
if b {
|
|
|
|
bm.SecpkMessages = append(bm.SecpkMessages, smsg)
|
2019-09-29 06:29:03 +00:00
|
|
|
}
|
2020-08-09 01:37:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
out = append(out, bm)
|
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) {
|
|
|
|
bmsgs, err := cs.BlockMsgsForTipset(ts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var out []types.ChainMsg
|
|
|
|
for _, bm := range bmsgs {
|
|
|
|
for _, blsm := range bm.BlsMessages {
|
|
|
|
out = append(out, blsm)
|
|
|
|
}
|
2019-09-29 06:29:03 +00:00
|
|
|
|
2020-08-09 01:37:49 +00:00
|
|
|
for _, secm := range bm.SecpkMessages {
|
|
|
|
out = append(out, secm)
|
2019-09-29 06:29:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2019-10-24 06:24:58 +00:00
|
|
|
type mmCids struct {
|
|
|
|
bls []cid.Cid
|
|
|
|
secpk []cid.Cid
|
|
|
|
}
|
|
|
|
|
2020-09-02 18:43:05 +00:00
|
|
|
func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) {
|
2019-10-24 06:24:58 +00:00
|
|
|
o, ok := cs.mmCache.Get(mmc)
|
|
|
|
if ok {
|
|
|
|
mmcids := o.(*mmCids)
|
|
|
|
return mmcids.bls, mmcids.secpk, nil
|
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
cst := cbor.NewCborStore(cs.chainLocalBlockstore)
|
2019-08-01 20:40:47 +00:00
|
|
|
var msgmeta types.MsgMeta
|
2019-10-24 06:24:58 +00:00
|
|
|
if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil {
|
2020-03-21 00:18:57 +00:00
|
|
|
return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err)
|
2019-08-01 20:40:47 +00:00
|
|
|
}
|
|
|
|
|
2019-09-17 01:56:37 +00:00
|
|
|
blscids, err := cs.readAMTCids(msgmeta.BlsMessages)
|
2019-08-01 20:40:47 +00:00
|
|
|
if err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return nil, nil, xerrors.Errorf("loading bls message cids for block: %w", err)
|
2019-08-01 20:40:47 +00:00
|
|
|
}
|
|
|
|
|
2019-09-17 01:56:37 +00:00
|
|
|
secpkcids, err := cs.readAMTCids(msgmeta.SecpkMessages)
|
2019-08-01 20:40:47 +00:00
|
|
|
if err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return nil, nil, xerrors.Errorf("loading secpk message cids for block: %w", err)
|
2019-08-01 20:40:47 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 06:24:58 +00:00
|
|
|
cs.mmCache.Add(mmc, &mmCids{
|
|
|
|
bls: blscids,
|
|
|
|
secpk: secpkcids,
|
|
|
|
})
|
|
|
|
|
|
|
|
return blscids, secpkcids, nil
|
|
|
|
}
|
|
|
|
|
2020-04-23 22:15:00 +00:00
|
|
|
func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) {
|
2020-01-15 00:24:08 +00:00
|
|
|
fts, err := cs.LoadTipSet(from)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("loading from tipset %s: %w", from, err)
|
|
|
|
}
|
|
|
|
tts, err := cs.LoadTipSet(to)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("loading to tipset %s: %w", to, err)
|
|
|
|
}
|
|
|
|
revert, apply, err := cs.ReorgOps(fts, tts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("error getting tipset branches: %w", err)
|
|
|
|
}
|
|
|
|
|
2020-04-23 22:15:00 +00:00
|
|
|
path := make([]*api.HeadChange, len(revert)+len(apply))
|
2020-01-15 00:24:08 +00:00
|
|
|
for i, r := range revert {
|
2020-04-23 22:15:00 +00:00
|
|
|
path[i] = &api.HeadChange{Type: HCRevert, Val: r}
|
2020-01-15 00:24:08 +00:00
|
|
|
}
|
|
|
|
for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 {
|
2020-04-23 22:15:00 +00:00
|
|
|
path[j+len(revert)] = &api.HeadChange{Type: HCApply, Val: apply[i]}
|
2020-01-15 00:24:08 +00:00
|
|
|
}
|
|
|
|
return path, nil
|
|
|
|
}
|
|
|
|
|
2019-10-24 06:24:58 +00:00
|
|
|
func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
|
2020-09-02 18:43:05 +00:00
|
|
|
blscids, secpkcids, err := cs.ReadMsgMetaCids(b.Messages)
|
2019-10-24 06:24:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2019-08-01 20:40:47 +00:00
|
|
|
blsmsgs, err := cs.LoadMessagesFromCids(blscids)
|
|
|
|
if err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return nil, nil, xerrors.Errorf("loading bls messages for block: %w", err)
|
2019-08-01 20:40:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids)
|
2019-07-17 03:05:55 +00:00
|
|
|
if err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return nil, nil, xerrors.Errorf("loading secpk messages for block: %w", err)
|
2019-07-17 03:05:55 +00:00
|
|
|
}
|
|
|
|
|
2019-08-01 20:40:47 +00:00
|
|
|
return blsmsgs, secpkmsgs, nil
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
2019-10-03 20:22:21 +00:00
|
|
|
func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) {
|
2020-02-05 02:26:42 +00:00
|
|
|
ctx := context.TODO()
|
2020-09-28 21:25:58 +00:00
|
|
|
// block headers use adt0, for now.
|
2021-02-28 22:48:36 +00:00
|
|
|
a, err := blockadt.AsArray(cs.ActorStore(ctx), b.ParentMessageReceipts)
|
2019-07-17 03:05:55 +00:00
|
|
|
if err != nil {
|
2019-11-22 16:20:56 +00:00
|
|
|
return nil, xerrors.Errorf("amt load: %w", err)
|
2019-07-17 03:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var r types.MessageReceipt
|
2020-07-23 00:14:54 +00:00
|
|
|
if found, err := a.Get(uint64(i), &r); err != nil {
|
2019-07-17 03:05:55 +00:00
|
|
|
return nil, err
|
2020-07-23 00:14:54 +00:00
|
|
|
} else if !found {
|
|
|
|
return nil, xerrors.Errorf("failed to find receipt %d", i)
|
2019-07-17 03:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &r, nil
|
|
|
|
}
|
|
|
|
|
2019-08-01 20:40:47 +00:00
|
|
|
func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, error) {
|
|
|
|
msgs := make([]*types.Message, 0, len(cids))
|
2019-07-31 07:13:49 +00:00
|
|
|
for i, c := range cids {
|
2019-07-05 14:29:17 +00:00
|
|
|
m, err := cs.GetMessage(c)
|
|
|
|
if err != nil {
|
2020-08-20 04:49:10 +00:00
|
|
|
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
|
2019-07-05 14:29:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
msgs = append(msgs, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
return msgs, nil
|
|
|
|
}
|
2019-07-18 20:26:04 +00:00
|
|
|
|
2019-08-01 20:40:47 +00:00
|
|
|
func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.SignedMessage, error) {
|
|
|
|
msgs := make([]*types.SignedMessage, 0, len(cids))
|
|
|
|
for i, c := range cids {
|
|
|
|
m, err := cs.GetSignedMessage(c)
|
|
|
|
if err != nil {
|
2020-08-20 04:49:10 +00:00
|
|
|
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
|
2019-08-01 20:40:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
msgs = append(msgs, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
return msgs, nil
|
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
// ChainBlockstore returns the chain blockstore. Currently the chain and state
|
|
|
|
// // stores are both backed by the same physical store, albeit with different
|
|
|
|
// // caching policies, but in the future they will segregate.
|
|
|
|
func (cs *ChainStore) ChainBlockstore() bstore.Blockstore {
|
|
|
|
return cs.chainBlockstore
|
|
|
|
}
|
|
|
|
|
|
|
|
// StateBlockstore returns the state blockstore. Currently the chain and state
|
|
|
|
// stores are both backed by the same physical store, albeit with different
|
|
|
|
// caching policies, but in the future they will segregate.
|
|
|
|
func (cs *ChainStore) StateBlockstore() bstore.Blockstore {
|
|
|
|
return cs.stateBlockstore
|
2019-07-26 04:54:22 +00:00
|
|
|
}
|
2019-08-02 00:13:57 +00:00
|
|
|
|
2020-07-23 02:05:11 +00:00
|
|
|
func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store {
|
2020-07-23 00:14:54 +00:00
|
|
|
return adt.WrapStore(ctx, cbor.NewCborStore(bs))
|
2020-02-08 02:18:32 +00:00
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store {
|
|
|
|
return ActorStore(ctx, cs.stateBlockstore)
|
2020-02-08 02:18:32 +00:00
|
|
|
}
|
|
|
|
|
2020-07-18 13:46:47 +00:00
|
|
|
func (cs *ChainStore) VMSys() vm.SyscallBuilder {
|
2020-01-13 20:47:27 +00:00
|
|
|
return cs.vmcalls
|
|
|
|
}
|
|
|
|
|
2019-08-02 00:13:57 +00:00
|
|
|
func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) {
|
|
|
|
var out []*types.FullBlock
|
|
|
|
|
|
|
|
for _, b := range ts.Blocks() {
|
2019-08-01 20:40:47 +00:00
|
|
|
bmsgs, smsgs, err := cs.MessagesForBlock(b)
|
2019-08-02 00:13:57 +00:00
|
|
|
if err != nil {
|
2019-08-02 00:57:29 +00:00
|
|
|
// TODO: check for 'not found' errors, and only return nil if this
|
|
|
|
// is actually a 'not found' error
|
2019-08-02 00:13:57 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
fb := &types.FullBlock{
|
2019-08-01 20:40:47 +00:00
|
|
|
Header: b,
|
|
|
|
BlsMessages: bmsgs,
|
|
|
|
SecpkMessages: smsgs,
|
2019-08-02 00:13:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
out = append(out, fb)
|
|
|
|
}
|
|
|
|
return NewFullTipSet(out), nil
|
|
|
|
}
|
2019-09-06 06:26:02 +00:00
|
|
|
|
2020-04-08 15:11:42 +00:00
|
|
|
func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
2020-02-23 20:00:47 +00:00
|
|
|
h := blake2b.New256()
|
|
|
|
if err := binary.Write(h, binary.BigEndian, int64(pers)); err != nil {
|
|
|
|
return nil, xerrors.Errorf("deriving randomness: %w", err)
|
|
|
|
}
|
2020-04-08 15:11:42 +00:00
|
|
|
VRFDigest := blake2b.Sum256(rbase)
|
2020-05-27 20:53:20 +00:00
|
|
|
_, err := h.Write(VRFDigest[:])
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("hashing VRFDigest: %w", err)
|
|
|
|
}
|
2020-02-23 20:00:47 +00:00
|
|
|
if err := binary.Write(h, binary.BigEndian, round); err != nil {
|
|
|
|
return nil, xerrors.Errorf("deriving randomness: %w", err)
|
|
|
|
}
|
2020-05-27 20:53:20 +00:00
|
|
|
_, err = h.Write(entropy)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("hashing entropy: %w", err)
|
|
|
|
}
|
2019-09-09 20:03:10 +00:00
|
|
|
|
2020-02-23 20:00:47 +00:00
|
|
|
return h.Sum(nil), nil
|
2019-11-19 15:53:00 +00:00
|
|
|
}
|
2019-09-06 06:26:02 +00:00
|
|
|
|
2021-05-26 23:03:46 +00:00
|
|
|
func (cs *ChainStore) GetBeaconRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
|
|
return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ChainStore) GetBeaconRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
|
|
return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
|
2020-08-12 02:47:45 +00:00
|
|
|
_, span := trace.StartSpan(ctx, "store.GetBeaconRandomness")
|
|
|
|
defer span.End()
|
|
|
|
span.AddAttributes(trace.Int64Attribute("round", int64(round)))
|
|
|
|
|
|
|
|
ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if round > ts.Height() {
|
|
|
|
return nil, xerrors.Errorf("cannot draw randomness from the future")
|
|
|
|
}
|
|
|
|
|
|
|
|
searchHeight := round
|
|
|
|
if searchHeight < 0 {
|
|
|
|
searchHeight = 0
|
|
|
|
}
|
|
|
|
|
2021-05-26 23:03:46 +00:00
|
|
|
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
|
2020-08-12 02:47:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
be, err := cs.GetLatestBeaconEntry(randTs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// if at (or just past -- for null epochs) appropriate epoch
|
|
|
|
// or at genesis (works for negative epochs)
|
|
|
|
return DrawRandomness(be.Data, pers, round, entropy)
|
|
|
|
}
|
|
|
|
|
2021-05-26 23:03:46 +00:00
|
|
|
func (cs *ChainStore) GetChainRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
|
|
return cs.GetChainRandomness(ctx, blks, pers, round, entropy, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ChainStore) GetChainRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
|
|
return cs.GetChainRandomness(ctx, blks, pers, round, entropy, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
|
2020-08-12 02:47:45 +00:00
|
|
|
_, span := trace.StartSpan(ctx, "store.GetChainRandomness")
|
2019-11-19 15:53:00 +00:00
|
|
|
defer span.End()
|
2020-04-08 15:11:42 +00:00
|
|
|
span.AddAttributes(trace.Int64Attribute("round", int64(round)))
|
2019-09-06 06:26:02 +00:00
|
|
|
|
2020-06-03 00:35:23 +00:00
|
|
|
ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-09-06 06:26:02 +00:00
|
|
|
|
2020-06-04 01:30:43 +00:00
|
|
|
if round > ts.Height() {
|
2020-06-05 22:12:13 +00:00
|
|
|
return nil, xerrors.Errorf("cannot draw randomness from the future")
|
2020-06-04 01:30:43 +00:00
|
|
|
}
|
2019-09-06 06:26:02 +00:00
|
|
|
|
2020-06-03 00:35:23 +00:00
|
|
|
searchHeight := round
|
|
|
|
if searchHeight < 0 {
|
|
|
|
searchHeight = 0
|
|
|
|
}
|
2019-09-06 06:26:02 +00:00
|
|
|
|
2021-05-26 23:03:46 +00:00
|
|
|
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
|
2020-06-03 00:35:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-09-06 06:26:02 +00:00
|
|
|
}
|
2020-06-03 00:35:23 +00:00
|
|
|
|
|
|
|
mtb := randTs.MinTicketBlock()
|
|
|
|
|
|
|
|
// if at (or just past -- for null epochs) appropriate epoch
|
|
|
|
// or at genesis (works for negative epochs)
|
|
|
|
return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy)
|
2019-09-06 06:26:02 +00:00
|
|
|
}
|
2019-09-18 03:25:12 +00:00
|
|
|
|
2020-05-05 17:06:05 +00:00
|
|
|
// GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given
|
|
|
|
// height. In the case that the given height is a null round, the 'prev' flag
|
|
|
|
// selects the tipset before the null round if true, and the tipset following
|
|
|
|
// the null round if false.
|
|
|
|
func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) {
|
2019-09-18 03:25:12 +00:00
|
|
|
if ts == nil {
|
|
|
|
ts = cs.GetHeaviestTipSet()
|
|
|
|
}
|
|
|
|
|
|
|
|
if h > ts.Height() {
|
2020-06-04 01:25:41 +00:00
|
|
|
return nil, xerrors.Errorf("looking for tipset with height greater than start point")
|
2019-09-18 03:25:12 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 23:36:54 +00:00
|
|
|
if h == ts.Height() {
|
|
|
|
return ts, nil
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:14:36 +00:00
|
|
|
lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-19 15:53:00 +00:00
|
|
|
|
2020-06-04 21:56:57 +00:00
|
|
|
if lbts.Height() < h {
|
|
|
|
log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h)
|
|
|
|
lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ts, h)
|
2019-09-18 03:25:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-06-04 21:56:57 +00:00
|
|
|
}
|
2019-11-19 15:53:00 +00:00
|
|
|
|
2020-06-04 00:14:36 +00:00
|
|
|
if lbts.Height() == h || !prev {
|
|
|
|
return lbts, nil
|
2019-09-18 03:25:12 +00:00
|
|
|
}
|
2020-06-04 00:14:36 +00:00
|
|
|
|
|
|
|
return cs.LoadTipSet(lbts.Parents())
|
2019-09-18 03:25:12 +00:00
|
|
|
}
|
2019-10-15 04:33:29 +00:00
|
|
|
|
2020-09-02 03:12:21 +00:00
|
|
|
func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
|
2020-03-21 00:18:57 +00:00
|
|
|
if root.Prefix().Codec != cid.DagCBOR {
|
|
|
|
return in, nil
|
|
|
|
}
|
|
|
|
|
2020-01-16 18:05:07 +00:00
|
|
|
data, err := bs.Get(root)
|
|
|
|
if err != nil {
|
2020-03-21 00:18:57 +00:00
|
|
|
return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err)
|
2020-01-16 18:05:07 +00:00
|
|
|
}
|
|
|
|
|
2020-07-14 11:45:45 +00:00
|
|
|
var rerr error
|
|
|
|
err = cbg.ScanForLinks(bytes.NewReader(data.RawData()), func(c cid.Cid) {
|
|
|
|
if rerr != nil {
|
|
|
|
// No error return on ScanForLinks :(
|
|
|
|
return
|
|
|
|
}
|
2020-01-16 18:05:07 +00:00
|
|
|
|
2020-09-01 23:38:24 +00:00
|
|
|
// traversed this already...
|
2020-09-02 03:12:21 +00:00
|
|
|
if !walked.Visit(c) {
|
2020-09-01 23:38:24 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-14 11:45:45 +00:00
|
|
|
in = append(in, c)
|
2020-01-16 18:05:07 +00:00
|
|
|
var err error
|
2020-09-02 03:12:21 +00:00
|
|
|
in, err = recurseLinks(bs, walked, c, in)
|
2020-01-16 18:05:07 +00:00
|
|
|
if err != nil {
|
2020-07-14 11:45:45 +00:00
|
|
|
rerr = err
|
2020-01-16 18:05:07 +00:00
|
|
|
}
|
2020-07-14 11:45:45 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("scanning for links failed: %w", err)
|
2020-01-16 18:05:07 +00:00
|
|
|
}
|
|
|
|
|
2020-07-14 11:45:45 +00:00
|
|
|
return in, rerr
|
2020-01-16 18:05:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-11 00:28:25 +00:00
|
|
|
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error {
|
2020-03-21 00:18:57 +00:00
|
|
|
h := &car.CarHeader{
|
|
|
|
Roots: ts.Cids(),
|
|
|
|
Version: 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := car.WriteHeader(h, w); err != nil {
|
|
|
|
return xerrors.Errorf("failed to write car header: %s", err)
|
|
|
|
}
|
|
|
|
|
2021-03-02 17:03:11 +00:00
|
|
|
unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore)
|
2021-02-28 22:48:36 +00:00
|
|
|
return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error {
|
2021-03-02 17:03:11 +00:00
|
|
|
blk, err := unionBs.Get(c)
|
2020-09-16 03:20:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("writing object to car, bs.Get: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := carutil.LdWrite(w, c.Bytes(), blk.RawData()); err != nil {
|
|
|
|
return xerrors.Errorf("failed to write block to car output: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs, skipMsgReceipts bool, cb func(cid.Cid) error) error {
|
2020-09-16 03:20:48 +00:00
|
|
|
if ts == nil {
|
|
|
|
ts = cs.GetHeaviestTipSet()
|
|
|
|
}
|
|
|
|
|
|
|
|
seen := cid.NewSet()
|
|
|
|
walked := cid.NewSet()
|
|
|
|
|
2020-03-21 00:18:57 +00:00
|
|
|
blocksToWalk := ts.Cids()
|
2020-09-25 20:08:28 +00:00
|
|
|
currentMinHeight := ts.Height()
|
2020-03-21 00:18:57 +00:00
|
|
|
|
|
|
|
walkChain := func(blk cid.Cid) error {
|
|
|
|
if !seen.Visit(blk) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-16 03:20:48 +00:00
|
|
|
if err := cb(blk); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
data, err := cs.chainBlockstore.Get(blk)
|
2020-03-21 00:18:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting block: %w", err)
|
|
|
|
}
|
|
|
|
|
2020-01-16 18:05:07 +00:00
|
|
|
var b types.BlockHeader
|
2020-03-21 00:18:57 +00:00
|
|
|
if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil {
|
|
|
|
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
|
2020-01-16 18:05:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-25 20:08:28 +00:00
|
|
|
if currentMinHeight > b.Height {
|
|
|
|
currentMinHeight = b.Height
|
|
|
|
if currentMinHeight%builtin.EpochsInDay == 0 {
|
|
|
|
log.Infow("export", "height", currentMinHeight)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-11 00:28:25 +00:00
|
|
|
var cids []cid.Cid
|
|
|
|
if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots {
|
2020-10-12 08:44:39 +00:00
|
|
|
if walked.Visit(b.Messages) {
|
2021-02-28 22:48:36 +00:00
|
|
|
mcids, err := recurseLinks(cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages})
|
2020-10-12 08:44:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("recursing messages failed: %w", err)
|
|
|
|
}
|
|
|
|
cids = mcids
|
2020-09-11 00:28:25 +00:00
|
|
|
}
|
2020-01-16 18:05:07 +00:00
|
|
|
}
|
|
|
|
|
2020-08-18 01:54:49 +00:00
|
|
|
if b.Height > 0 {
|
|
|
|
for _, p := range b.Parents {
|
|
|
|
blocksToWalk = append(blocksToWalk, p)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// include the genesis block
|
|
|
|
cids = append(cids, b.Parents...)
|
|
|
|
}
|
|
|
|
|
2020-03-21 00:18:57 +00:00
|
|
|
out := cids
|
2020-01-16 18:05:07 +00:00
|
|
|
|
2020-09-01 23:38:24 +00:00
|
|
|
if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots {
|
2020-10-12 08:44:39 +00:00
|
|
|
if walked.Visit(b.ParentStateRoot) {
|
2021-03-02 16:31:01 +00:00
|
|
|
cids, err := recurseLinks(cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot})
|
2020-10-12 08:44:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("recursing genesis state failed: %w", err)
|
|
|
|
}
|
2020-01-16 18:05:07 +00:00
|
|
|
|
2020-10-12 08:44:39 +00:00
|
|
|
out = append(out, cids...)
|
|
|
|
}
|
2021-02-28 22:48:36 +00:00
|
|
|
|
|
|
|
if !skipMsgReceipts && walked.Visit(b.ParentMessageReceipts) {
|
|
|
|
out = append(out, b.ParentMessageReceipts)
|
|
|
|
}
|
2020-03-21 00:18:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range out {
|
|
|
|
if seen.Visit(c) {
|
|
|
|
if c.Prefix().Codec != cid.DagCBOR {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-09-16 03:20:48 +00:00
|
|
|
if err := cb(c); err != nil {
|
|
|
|
return err
|
2020-03-21 00:18:57 +00:00
|
|
|
}
|
2020-09-16 03:20:48 +00:00
|
|
|
|
2020-01-16 18:05:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-21 00:18:57 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-25 20:08:28 +00:00
|
|
|
log.Infow("export started")
|
|
|
|
exportStart := build.Clock.Now()
|
|
|
|
|
2020-03-21 00:18:57 +00:00
|
|
|
for len(blocksToWalk) > 0 {
|
|
|
|
next := blocksToWalk[0]
|
|
|
|
blocksToWalk = blocksToWalk[1:]
|
|
|
|
if err := walkChain(next); err != nil {
|
|
|
|
return xerrors.Errorf("walk chain failed: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-25 20:08:28 +00:00
|
|
|
log.Infow("export finished", "duration", build.Clock.Now().Sub(exportStart).Seconds())
|
|
|
|
|
2020-03-21 00:18:57 +00:00
|
|
|
return nil
|
2020-01-16 18:05:07 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 01:53:55 +00:00
|
|
|
func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) {
|
2021-03-02 21:29:24 +00:00
|
|
|
// TODO: writing only to the state blockstore is incorrect.
|
|
|
|
// At this time, both the state and chain blockstores are backed by the
|
|
|
|
// universal store. When we physically segregate the stores, we will need
|
|
|
|
// to route state objects to the state blockstore, and chain objects to
|
|
|
|
// the chain blockstore.
|
2021-02-28 22:48:36 +00:00
|
|
|
header, err := car.LoadCar(cs.StateBlockstore(), r)
|
2020-01-21 01:53:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("loadcar failed: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
root, err := cs.LoadTipSet(types.NewTipSetKey(header.Roots...))
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return root, nil
|
|
|
|
}
|
|
|
|
|
2020-04-08 15:11:42 +00:00
|
|
|
func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry, error) {
|
|
|
|
cur := ts
|
|
|
|
for i := 0; i < 20; i++ {
|
|
|
|
cbe := cur.Blocks()[0].BeaconEntries
|
|
|
|
if len(cbe) > 0 {
|
|
|
|
return &cbe[len(cbe)-1], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if cur.Height() == 0 {
|
|
|
|
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
|
|
|
|
}
|
|
|
|
|
|
|
|
next, err := cs.LoadTipSet(cur.Parents())
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
|
|
|
|
}
|
|
|
|
cur = next
|
|
|
|
}
|
|
|
|
|
2020-04-20 17:43:02 +00:00
|
|
|
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
|
|
|
|
return &types.BeaconEntry{
|
|
|
|
Data: []byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2020-08-20 21:54:59 +00:00
|
|
|
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
|
2020-04-08 15:11:42 +00:00
|
|
|
}
|
|
|
|
|
2019-10-15 04:33:29 +00:00
|
|
|
type chainRand struct {
|
2019-11-19 15:53:00 +00:00
|
|
|
cs *ChainStore
|
|
|
|
blks []cid.Cid
|
2019-10-15 04:33:29 +00:00
|
|
|
}
|
|
|
|
|
2020-09-01 19:48:16 +00:00
|
|
|
func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand {
|
2019-10-15 04:33:29 +00:00
|
|
|
return &chainRand{
|
2019-11-19 15:53:00 +00:00
|
|
|
cs: cs,
|
|
|
|
blks: blks,
|
2019-10-15 04:33:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-26 23:03:46 +00:00
|
|
|
func (cr *chainRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
|
|
return cr.cs.GetChainRandomnessLookingBack(ctx, cr.blks, pers, round, entropy)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cr *chainRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
|
|
return cr.cs.GetChainRandomnessLookingForward(ctx, cr.blks, pers, round, entropy)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cr *chainRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
|
|
return cr.cs.GetBeaconRandomnessLookingBack(ctx, cr.blks, pers, round, entropy)
|
2020-08-12 02:47:45 +00:00
|
|
|
}
|
|
|
|
|
2021-05-26 23:03:46 +00:00
|
|
|
func (cr *chainRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
|
|
|
return cr.cs.GetBeaconRandomnessLookingForward(ctx, cr.blks, pers, round, entropy)
|
2019-10-15 04:33:29 +00:00
|
|
|
}
|
2020-02-11 23:29:45 +00:00
|
|
|
|
|
|
|
func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) {
|
|
|
|
if tsk.IsEmpty() {
|
|
|
|
return cs.GetHeaviestTipSet(), nil
|
|
|
|
}
|
2020-06-02 14:29:39 +00:00
|
|
|
return cs.LoadTipSet(tsk)
|
2020-02-11 23:29:45 +00:00
|
|
|
}
|