lotus/chain/store/store.go

1225 lines
32 KiB
Go
Raw Normal View History

2019-07-26 04:54:22 +00:00
package store
2019-07-05 14:29:17 +00:00
import (
"context"
"encoding/json"
"errors"
2020-04-20 17:43:02 +00:00
"os"
"strconv"
"strings"
2019-07-05 14:29:17 +00:00
"sync"
"time"
2019-07-05 14:29:17 +00:00
2022-06-14 15:00:51 +00:00
lru "github.com/hashicorp/golang-lru"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"go.uber.org/multierr"
"golang.org/x/sync/errgroup"
2022-06-14 15:00:51 +00:00
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
2020-09-07 03:49:10 +00:00
"github.com/filecoin-project/go-state-types/abi"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/pubsub"
2020-09-28 21:25:58 +00:00
"github.com/filecoin-project/lotus/api"
bstore "github.com/filecoin-project/lotus/blockstore"
2020-09-25 20:08:28 +00:00
"github.com/filecoin-project/lotus/build"
2020-09-28 21:25:58 +00:00
"github.com/filecoin-project/lotus/chain/actors/adt"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/metrics"
2019-07-05 14:29:17 +00:00
)
2019-07-26 04:54:22 +00:00
var log = logging.Logger("chainstore")
2019-07-05 14:29:17 +00:00
var (
chainHeadKey = dstore.NewKey("head")
checkpointKey = dstore.NewKey("/chain/checks")
blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
)
var DefaultTipSetCacheSize = 8192
var DefaultMsgMetaCacheSize = 2048
var ErrNotifeeDone = errors.New("notifee is done and should be removed")
func init() {
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
tscs, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_TIPSET_CACHE' env var: %s", err)
}
DefaultTipSetCacheSize = tscs
}
if s := os.Getenv("LOTUS_CHAIN_MSGMETA_CACHE"); s != "" {
mmcs, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_MSGMETA_CACHE' env var: %s", err)
}
DefaultMsgMetaCacheSize = mmcs
}
}
// ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee = func(rev, app []*types.TipSet) error
2020-07-17 17:54:26 +00:00
// Journal event types.
const (
evtTypeHeadChange = iota
)
2020-07-20 13:45:17 +00:00
type HeadChangeEvt struct {
From types.TipSetKey
FromHeight abi.ChainEpoch
To types.TipSetKey
ToHeight abi.ChainEpoch
RevertCount int
ApplyCount int
}
2021-09-02 16:07:23 +00:00
type WeightFunc func(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (types.BigInt, error)
2020-06-23 21:51:25 +00:00
// ChainStore is the main point of access to chain data.
//
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
// latest head tipset references) being tracked in the Datastore (key-value
// store).
//
// To alleviate disk access, the ChainStore has two ARC caches:
2022-08-29 14:25:30 +00:00
// 1. a tipset cache
// 2. a block => messages references cache.
2019-07-05 14:29:17 +00:00
type ChainStore struct {
chainBlockstore bstore.Blockstore
stateBlockstore bstore.Blockstore
metadataDs dstore.Batching
2019-07-05 14:29:17 +00:00
2021-09-02 16:07:23 +00:00
weight WeightFunc
chainLocalBlockstore bstore.Blockstore
heaviestLk sync.RWMutex
2019-07-26 04:54:22 +00:00
heaviest *types.TipSet
checkpoint *types.TipSet
2019-07-05 14:29:17 +00:00
bestTips *pubsub.PubSub
2019-09-17 22:43:47 +00:00
pubLk sync.Mutex
2019-07-05 14:29:17 +00:00
tstLk sync.Mutex
2020-02-08 02:18:32 +00:00
tipsets map[abi.ChainEpoch][]cid.Cid
cindex *ChainIndex
reorgCh chan<- reorg
reorgNotifeeCh chan ReorgNotifee
2021-07-27 13:34:39 +00:00
mmCache *lru.ARCCache // msg meta cache (mh.Messages -> secp, bls []cid)
2019-12-16 19:22:56 +00:00
tsCache *lru.ARCCache
2020-01-13 20:47:27 +00:00
2020-07-17 17:54:26 +00:00
evtTypes [1]journal.EventType
journal journal.Journal
cancelFn context.CancelFunc
wg sync.WaitGroup
2019-07-05 14:29:17 +00:00
}
2021-09-02 16:07:23 +00:00
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore {
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
if j == nil {
j = journal.NilJournal()
}
ctx, cancel := context.WithCancel(context.Background())
// unwraps the fallback store in case one is configured.
// some methods _need_ to operate on a local blockstore only.
localbs, _ := bstore.UnwrapFallbackStore(chainBs)
cs := &ChainStore{
chainBlockstore: chainBs,
stateBlockstore: stateBs,
chainLocalBlockstore: localbs,
2021-09-02 16:07:23 +00:00
weight: weight,
metadataDs: ds,
bestTips: pubsub.New(64),
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
mmCache: c,
tsCache: tsc,
cancelFn: cancel,
journal: j,
}
2020-07-17 17:54:26 +00:00
cs.evtTypes = [1]journal.EventType{
evtTypeHeadChange: j.RegisterEventType("sync", "head_change"),
2019-07-05 14:29:17 +00:00
}
ci := NewChainIndex(cs.LoadTipSet)
cs.cindex = ci
hcnf := func(rev, app []*types.TipSet) error {
2019-09-17 22:43:47 +00:00
cs.pubLk.Lock()
defer cs.pubLk.Unlock()
2019-09-18 11:01:52 +00:00
notif := make([]*api.HeadChange, len(rev)+len(app))
2019-09-18 11:01:52 +00:00
for i, r := range rev {
notif[i] = &api.HeadChange{
Type: HCRevert,
Val: r,
2019-09-18 11:01:52 +00:00
}
}
2019-09-18 11:01:52 +00:00
for i, r := range app {
notif[i+len(rev)] = &api.HeadChange{
Type: HCApply,
Val: r,
2019-09-18 11:01:52 +00:00
}
}
2019-09-18 11:01:52 +00:00
cs.bestTips.Pub(notif, "headchange")
return nil
}
hcmetric := func(rev, app []*types.TipSet) error {
for _, r := range app {
stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height())))
}
return nil
}
cs.reorgNotifeeCh = make(chan ReorgNotifee)
2020-11-16 15:52:19 +00:00
cs.reorgCh = cs.reorgWorker(ctx, []ReorgNotifee{hcnf, hcmetric})
return cs
2019-07-05 14:29:17 +00:00
}
func (cs *ChainStore) Close() error {
cs.cancelFn()
cs.wg.Wait()
return nil
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) Load(ctx context.Context) error {
if err := cs.loadHead(ctx); err != nil {
return err
}
2021-12-11 21:03:00 +00:00
if err := cs.loadCheckpoint(ctx); err != nil {
return err
}
return nil
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) loadHead(ctx context.Context) error {
head, err := cs.metadataDs.Get(ctx, chainHeadKey)
2019-07-24 21:10:27 +00:00
if err == dstore.ErrNotFound {
log.Warn("no previous chain state found")
return nil
}
if err != nil {
return xerrors.Errorf("failed to load chain state from datastore: %w", err)
}
var tscids []cid.Cid
if err := json.Unmarshal(head, &tscids); err != nil {
return xerrors.Errorf("failed to unmarshal stored chain head: %w", err)
}
2021-12-11 21:03:00 +00:00
ts, err := cs.LoadTipSet(ctx, types.NewTipSetKey(tscids...))
if err != nil {
2019-09-30 23:55:35 +00:00
return xerrors.Errorf("loading tipset: %w", err)
}
cs.heaviest = ts
return nil
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) loadCheckpoint(ctx context.Context) error {
tskBytes, err := cs.metadataDs.Get(ctx, checkpointKey)
if err == dstore.ErrNotFound {
return nil
}
if err != nil {
return xerrors.Errorf("failed to load checkpoint from datastore: %w", err)
}
var tsk types.TipSetKey
err = json.Unmarshal(tskBytes, &tsk)
if err != nil {
return err
}
2021-12-11 21:03:00 +00:00
ts, err := cs.LoadTipSet(ctx, tsk)
if err != nil {
return xerrors.Errorf("loading tipset: %w", err)
}
cs.checkpoint = ts
return nil
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) writeHead(ctx context.Context, ts *types.TipSet) error {
data, err := json.Marshal(ts.Cids())
if err != nil {
return xerrors.Errorf("failed to marshal tipset: %w", err)
}
2021-12-11 21:03:00 +00:00
if err := cs.metadataDs.Put(ctx, chainHeadKey, data); err != nil {
return xerrors.Errorf("failed to write chain head to datastore: %w", err)
}
return nil
}
const (
2019-09-17 22:43:47 +00:00
HCRevert = "revert"
HCApply = "apply"
HCCurrent = "current"
)
func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange {
2019-09-17 22:43:47 +00:00
cs.pubLk.Lock()
subch := cs.bestTips.Sub("headchange")
2019-09-17 22:43:47 +00:00
head := cs.GetHeaviestTipSet()
cs.pubLk.Unlock()
out := make(chan []*api.HeadChange, 16)
out <- []*api.HeadChange{{
2019-09-17 22:43:47 +00:00
Type: HCCurrent,
Val: head,
2019-09-18 11:01:52 +00:00
}}
2019-09-17 22:43:47 +00:00
go func() {
defer func() {
// Tell the caller we're done first, the following may block for a bit.
close(out)
// Unsubscribe.
cs.bestTips.Unsub(subch)
// Drain the channel.
for range subch {
}
}()
for {
select {
case val, ok := <-subch:
if !ok {
// Shutting down.
return
}
2019-08-31 01:03:10 +00:00
select {
case out <- val.([]*api.HeadChange):
default:
log.Errorf("closing head change subscription due to slow reader")
return
}
if len(out) > 5 {
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
2019-08-31 01:03:10 +00:00
}
case <-ctx.Done():
return
}
}
}()
return out
}
func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) {
cs.reorgNotifeeCh <- f
2019-07-05 14:29:17 +00:00
}
2020-06-14 09:49:20 +00:00
func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
2021-12-11 21:03:00 +00:00
return cs.metadataDs.Has(ctx, key)
2020-06-14 09:49:20 +00:00
}
func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
2021-12-11 21:03:00 +00:00
if err := cs.metadataDs.Put(ctx, key, []byte{0}); err != nil {
2020-06-14 09:49:20 +00:00
return xerrors.Errorf("cache block validation: %w", err)
}
return nil
}
func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
2021-12-11 21:03:00 +00:00
if err := cs.metadataDs.Delete(ctx, key); err != nil {
return xerrors.Errorf("removing from valid block cache: %w", err)
}
return nil
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error {
ts, err := types.NewTipSet([]*types.BlockHeader{b})
if err != nil {
return err
2019-07-05 14:29:17 +00:00
}
2021-12-11 21:03:00 +00:00
if err := cs.PutTipSet(ctx, ts); err != nil {
2019-07-05 14:29:17 +00:00
return err
}
2021-12-11 21:03:00 +00:00
return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes())
2019-07-05 14:29:17 +00:00
}
2019-10-15 04:33:29 +00:00
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
for _, b := range ts.Blocks() {
2021-12-11 21:03:00 +00:00
if err := cs.PersistBlockHeaders(ctx, b); err != nil {
2019-07-05 14:29:17 +00:00
return err
}
}
2021-12-11 21:03:00 +00:00
expanded, err := cs.expandTipset(ctx, ts.Blocks()[0])
if err != nil {
return xerrors.Errorf("errored while expanding tipset: %w", err)
}
log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids())
2019-10-15 04:33:29 +00:00
if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil {
return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err)
}
2019-07-05 14:29:17 +00:00
return nil
}
2020-06-23 21:51:25 +00:00
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
// internal state as our new head, if and only if it is heavier than the current
// head and does not exceed the maximum fork length.
2019-10-15 04:33:29 +00:00
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
for {
cs.heaviestLk.Lock()
if len(cs.reorgCh) < reorgChBuf/2 {
break
}
cs.heaviestLk.Unlock()
log.Errorf("reorg channel is heavily backlogged, waiting a bit before trying to take process new tipsets")
select {
case <-time.After(time.Second / 2):
case <-ctx.Done():
return ctx.Err()
}
}
2019-07-05 14:29:17 +00:00
defer cs.heaviestLk.Unlock()
2021-09-02 16:07:23 +00:00
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
2019-10-15 04:33:29 +00:00
if err != nil {
return err
}
2021-09-02 16:07:23 +00:00
heaviestW, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
2019-10-15 04:33:29 +00:00
if err != nil {
return err
}
heavier := w.GreaterThan(heaviestW)
if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
heavier = breakWeightTie(ts, cs.heaviest)
}
if heavier {
2019-07-31 07:13:49 +00:00
// TODO: don't do this for initial sync. Now that we don't have a
// difference between 'bootstrap sync' and 'caught up' sync, we need
// some other heuristic.
2021-12-11 21:03:00 +00:00
exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts)
if err != nil {
return err
}
if exceeds {
return nil
}
return cs.takeHeaviestTipSet(ctx, ts)
2019-10-10 03:50:50 +00:00
}
2019-10-10 03:50:50 +00:00
return nil
}
// Check if the two tipsets have a fork length above `ForkLengthThreshold`.
// `synced` is the head of the chain we are currently synced to and `external`
// is the incoming tipset potentially belonging to a forked chain. It assumes
// the external chain has already been validated and available in the ChainStore.
// The "fast forward" case is covered in this logic as a valid fork of length 0.
//
// FIXME: We may want to replace some of the logic in `syncFork()` with this.
2022-08-29 14:25:30 +00:00
//
// `syncFork()` counts the length on both sides of the fork at the moment (we
// need to settle on that) but here we just enforce it on the `synced` side.
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) {
if synced == nil || external == nil {
// FIXME: If `cs.heaviest` is nil we should just bypass the entire
// `MaybeTakeHeavierTipSet` logic (instead of each of the called
// functions having to handle the nil case on their own).
return false, nil
}
var err error
// `forkLength`: number of tipsets we need to walk back from the our `synced`
// chain to the common ancestor with the new `external` head in order to
// adopt the fork.
for forkLength := 0; forkLength < int(build.ForkLengthThreshold); forkLength++ {
// First walk back as many tipsets in the external chain to match the
// `synced` height to compare them. If we go past the `synced` height
// the subsequent match will fail but it will still be useful to get
// closer to the `synced` head parent's height in the next loop.
for external.Height() > synced.Height() {
if external.Height() == 0 {
// We reached the genesis of the external chain without a match;
// this is considered a fork outside the allowed limit (of "infinite"
// length).
return true, nil
}
2021-12-11 21:03:00 +00:00
external, err = cs.LoadTipSet(ctx, external.Parents())
if err != nil {
return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err)
}
}
// Now check if we arrived at the common ancestor.
if synced.Equals(external) {
return false, nil
}
// Now check to see if we've walked back to the checkpoint.
if synced.Equals(cs.checkpoint) {
return true, nil
}
// If we didn't, go back *one* tipset on the `synced` side (incrementing
// the `forkLength`).
if synced.Height() == 0 {
// Same check as the `external` side, if we reach the start (genesis)
// there is no common ancestor.
return true, nil
}
2021-12-11 21:03:00 +00:00
synced, err = cs.LoadTipSet(ctx, synced.Parents())
if err != nil {
return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err)
}
}
// We traversed the fork length allowed without finding a common ancestor.
return true, nil
}
// ForceHeadSilent forces a chain head tipset without triggering a reorg
// operation.
//
// CAUTION: Use it only for testing, such as to teleport the chain to a
// particular tipset to carry out a benchmark, verification, etc. on a chain
// segment.
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) ForceHeadSilent(ctx context.Context, ts *types.TipSet) error {
log.Warnf("(!!!) forcing a new head silently; new head: %s", ts)
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
2021-12-11 21:03:00 +00:00
if err := cs.removeCheckpoint(ctx); err != nil {
return err
}
cs.heaviest = ts
2021-12-11 21:03:00 +00:00
err := cs.writeHead(ctx, ts)
if err != nil {
err = xerrors.Errorf("failed to write chain head: %s", err)
}
return err
}
type reorg struct {
old *types.TipSet
new *types.TipSet
}
const reorgChBuf = 32
func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg {
out := make(chan reorg, reorgChBuf)
notifees := make([]ReorgNotifee, len(initialNotifees))
copy(notifees, initialNotifees)
cs.wg.Add(1)
go func() {
defer cs.wg.Done()
defer log.Warn("reorgWorker quit")
for {
select {
case n := <-cs.reorgNotifeeCh:
notifees = append(notifees, n)
case r := <-out:
2021-12-11 21:03:00 +00:00
revert, apply, err := cs.ReorgOps(ctx, r.old, r.new)
if err != nil {
log.Error("computing reorg ops failed: ", err)
continue
}
cs.journal.RecordEvent(cs.evtTypes[evtTypeHeadChange], func() interface{} {
2020-07-20 13:45:17 +00:00
return HeadChangeEvt{
2020-07-17 17:54:26 +00:00
From: r.old.Key(),
FromHeight: r.old.Height(),
To: r.new.Key(),
ToHeight: r.new.Height(),
RevertCount: len(revert),
ApplyCount: len(apply),
}
})
// reverse the apply array
for i := len(apply)/2 - 1; i >= 0; i-- {
opp := len(apply) - 1 - i
apply[i], apply[opp] = apply[opp], apply[i]
}
var toremove map[int]struct{}
for i, hcf := range notifees {
err := hcf(revert, apply)
switch err {
case nil:
case ErrNotifeeDone:
if toremove == nil {
toremove = make(map[int]struct{})
}
toremove[i] = struct{}{}
default:
log.Error("head change func errored (BAD): ", err)
}
}
if len(toremove) > 0 {
newNotifees := make([]ReorgNotifee, 0, len(notifees)-len(toremove))
for i, hcf := range notifees {
_, remove := toremove[i]
if remove {
continue
}
newNotifees = append(newNotifees, hcf)
}
notifees = newNotifees
}
case <-ctx.Done():
return
}
}
}()
return out
}
2020-06-23 21:51:25 +00:00
// takeHeaviestTipSet actually sets the incoming tipset as our head both in
// memory and in the ChainStore. It also sends a notification to deliver to
// ReorgNotifees.
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
2019-12-05 05:14:19 +00:00
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
defer span.End()
if cs.heaviest != nil { // buf
if len(cs.reorgCh) > 0 {
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
2019-10-10 03:50:50 +00:00
}
cs.reorgCh <- reorg{
old: cs.heaviest,
new: ts,
}
2019-10-10 03:50:50 +00:00
} else {
2019-10-16 08:01:41 +00:00
log.Warnf("no heaviest tipset found, using %s", ts.Cids())
2019-10-10 03:50:50 +00:00
}
span.AddAttributes(trace.BoolAttribute("newHead", true))
2019-12-03 23:04:52 +00:00
log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height())
2019-10-10 03:50:50 +00:00
cs.heaviest = ts
2021-12-11 21:03:00 +00:00
if err := cs.writeHead(ctx, ts); err != nil {
2019-10-10 03:50:50 +00:00
log.Errorf("failed to write chain head: %s", err)
return nil
2019-07-05 14:29:17 +00:00
}
2019-10-10 03:50:50 +00:00
2019-07-05 14:29:17 +00:00
return nil
}
// FlushValidationCache removes all results of block validation from the
// chain metadata store. Usually the first step after a new chain import.
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) FlushValidationCache(ctx context.Context) error {
return FlushValidationCache(ctx, cs.metadataDs)
2021-01-15 19:57:32 +00:00
}
2021-12-11 21:03:00 +00:00
func FlushValidationCache(ctx context.Context, ds dstore.Batching) error {
log.Infof("clearing block validation cache...")
2021-12-11 21:03:00 +00:00
dsWalk, err := ds.Query(ctx, query.Query{
// Potential TODO: the validation cache is not a namespace on its own
// but is rather constructed as prefixed-key `foo:bar` via .Instance(), which
// in turn does not work with the filter, which can match only on `foo/bar`
//
// If this is addressed (blockcache goes into its own sub-namespace) then
// strings.HasPrefix(...) below can be skipped
//
//Prefix: blockValidationCacheKeyPrefix.String()
KeysOnly: true,
})
if err != nil {
return xerrors.Errorf("failed to initialize key listing query: %w", err)
}
allKeys, err := dsWalk.Rest()
if err != nil {
return xerrors.Errorf("failed to run key listing query: %w", err)
}
2021-12-11 21:03:00 +00:00
batch, err := ds.Batch(ctx)
if err != nil {
return xerrors.Errorf("failed to open a DS batch: %w", err)
}
delCnt := 0
for _, k := range allKeys {
if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) {
delCnt++
2021-12-11 21:03:00 +00:00
batch.Delete(ctx, dstore.RawKey(k.Key)) // nolint:errcheck
}
}
2021-12-11 21:03:00 +00:00
if err := batch.Commit(ctx); err != nil {
return xerrors.Errorf("failed to commit the DS batch: %w", err)
}
log.Infof("%d block validation entries cleared.", delCnt)
return nil
}
2019-10-10 03:50:50 +00:00
// SetHead sets the chainstores current 'best' head node.
// This should only be called if something is broken and needs fixing.
//
// This function will bypass and remove any checkpoints.
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) SetHead(ctx context.Context, ts *types.TipSet) error {
2019-10-10 03:50:50 +00:00
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
2021-12-11 21:03:00 +00:00
if err := cs.removeCheckpoint(ctx); err != nil {
return err
}
return cs.takeHeaviestTipSet(context.TODO(), ts)
2019-10-10 03:50:50 +00:00
}
// RemoveCheckpoint removes the current checkpoint.
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) RemoveCheckpoint(ctx context.Context) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
2021-12-11 21:03:00 +00:00
return cs.removeCheckpoint(ctx)
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) removeCheckpoint(ctx context.Context) error {
if err := cs.metadataDs.Delete(ctx, checkpointKey); err != nil {
return err
}
cs.checkpoint = nil
return nil
}
// SetCheckpoint will set a checkpoint past which the chainstore will not allow forks.
//
// NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past.
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error {
tskBytes, err := json.Marshal(ts.Key())
if err != nil {
return err
}
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
if ts.Height() > cs.heaviest.Height() {
return xerrors.Errorf("cannot set a checkpoint in the future")
}
// Otherwise, this operation could get _very_ expensive.
if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold {
return xerrors.Errorf("cannot set a checkpoint before the fork threshold")
}
if !ts.Equals(cs.heaviest) {
2021-12-11 21:03:00 +00:00
anc, err := cs.IsAncestorOf(ctx, ts, cs.heaviest)
if err != nil {
return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err)
}
if !anc {
return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err)
}
}
2021-12-11 21:03:00 +00:00
err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes)
if err != nil {
return err
}
cs.checkpoint = ts
return nil
}
func (cs *ChainStore) GetCheckpoint() *types.TipSet {
cs.heaviestLk.RLock()
chkpt := cs.checkpoint
cs.heaviestLk.RUnlock()
return chkpt
}
2020-06-23 21:51:25 +00:00
// Contains returns whether our BlockStore has all blocks in the supplied TipSet.
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) Contains(ctx context.Context, ts *types.TipSet) (bool, error) {
2019-07-26 04:54:22 +00:00
for _, c := range ts.Cids() {
2021-12-11 21:03:00 +00:00
has, err := cs.chainBlockstore.Has(ctx, c)
2019-07-05 14:29:17 +00:00
if err != nil {
return false, err
}
if !has {
return false, nil
}
}
return true, nil
}
2020-06-23 21:51:25 +00:00
// GetBlock fetches a BlockHeader with the supplied CID. It returns
// blockstore.ErrNotFound if the block was not found in the BlockStore.
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHeader, error) {
var blk *types.BlockHeader
2021-12-11 21:03:00 +00:00
err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) {
blk, err = types.DecodeBlock(b)
return err
})
return blk, err
2019-07-05 14:29:17 +00:00
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
2019-12-16 19:22:56 +00:00
v, ok := cs.tsCache.Get(tsk)
if ok {
return v.(*types.TipSet), nil
}
// Fetch tipset block headers from blockstore in parallel
var eg errgroup.Group
cids := tsk.Cids()
2020-09-28 14:35:37 +00:00
blks := make([]*types.BlockHeader, len(cids))
for i, c := range cids {
i, c := i, c
eg.Go(func() error {
2021-12-11 21:03:00 +00:00
b, err := cs.GetBlock(ctx, c)
if err != nil {
return xerrors.Errorf("get block %s: %w", c, err)
}
2019-07-05 14:29:17 +00:00
2020-09-28 14:35:37 +00:00
blks[i] = b
return nil
})
}
err := eg.Wait()
if err != nil {
return nil, err
2019-07-05 14:29:17 +00:00
}
2019-12-16 19:22:56 +00:00
ts, err := types.NewTipSet(blks)
if err != nil {
return nil, err
}
cs.tsCache.Add(tsk, ts)
return ts, nil
2019-07-05 14:29:17 +00:00
}
// IsAncestorOf returns true if 'a' is an ancestor of 'b'
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) IsAncestorOf(ctx context.Context, a, b *types.TipSet) (bool, error) {
2019-07-05 14:29:17 +00:00
if b.Height() <= a.Height() {
return false, nil
}
cur := b
for !a.Equals(cur) && cur.Height() > a.Height() {
2021-12-11 21:03:00 +00:00
next, err := cs.LoadTipSet(ctx, cur.Parents())
2019-07-05 14:29:17 +00:00
if err != nil {
return false, err
}
cur = next
}
return cur.Equals(a), nil
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) NearestCommonAncestor(ctx context.Context, a, b *types.TipSet) (*types.TipSet, error) {
l, _, err := cs.ReorgOps(ctx, a, b)
2019-07-05 14:29:17 +00:00
if err != nil {
return nil, err
}
2021-12-11 21:03:00 +00:00
return cs.LoadTipSet(ctx, l[len(l)-1].Parents())
2019-07-05 14:29:17 +00:00
}
2021-05-13 12:08:52 +00:00
// ReorgOps takes two tipsets (which can be at different heights), and walks
// their corresponding chains backwards one step at a time until we find
// a common ancestor. It then returns the respective chain segments that fork
// from the identified ancestor, in reverse order, where the first element of
// each slice is the supplied tipset, and the last element is the common
// ancestor.
2021-05-13 12:08:52 +00:00
//
// If an error happens along the way, we return the error with nil slices.
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) ReorgOps(ctx context.Context, a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
return ReorgOps(ctx, cs.LoadTipSet, a, b)
}
2021-12-11 21:03:00 +00:00
func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
2019-07-05 14:29:17 +00:00
left := a
right := b
2019-07-26 04:54:22 +00:00
var leftChain, rightChain []*types.TipSet
2019-07-05 14:29:17 +00:00
for !left.Equals(right) {
if left.Height() > right.Height() {
leftChain = append(leftChain, left)
2021-12-11 21:03:00 +00:00
par, err := lts(ctx, left.Parents())
2019-07-05 14:29:17 +00:00
if err != nil {
return nil, nil, err
}
left = par
} else {
rightChain = append(rightChain, right)
2021-12-11 21:03:00 +00:00
par, err := lts(ctx, right.Parents())
2019-07-05 14:29:17 +00:00
if err != nil {
2019-07-31 07:13:49 +00:00
log.Infof("failed to fetch right.Parents: %s", err)
2019-07-05 14:29:17 +00:00
return nil, nil, err
}
right = par
}
}
return leftChain, rightChain, nil
2019-07-05 14:29:17 +00:00
}
2020-06-23 21:51:25 +00:00
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) {
cs.heaviestLk.RLock()
ts = cs.heaviest
cs.heaviestLk.RUnlock()
return
2019-07-05 14:29:17 +00:00
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHeader) error {
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
tss := cs.tipsets[b.Height]
for _, oc := range tss {
if oc == b.Cid() {
2019-09-25 13:38:59 +00:00
log.Debug("tried to add block to tipset tracker that was already there")
return nil
}
2021-12-11 21:03:00 +00:00
h, err := cs.GetBlock(ctx, oc)
2020-11-09 02:57:36 +00:00
if err == nil && h != nil {
if h.Miner == b.Miner {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid())
}
}
}
// This function is called 5 times per epoch on average
// It is also called with tipsets that are done with initial validation
// so they cannot be from the future.
// We are guaranteed not to use tipsets older than 900 epochs (fork limit)
// This means that we ideally want to keep only most recent 900 epochs in here
// Golang's map iteration starts at a random point in a map.
// With 5 tries per epoch, and 900 entries to keep, on average we will have
// ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
// Seems good enough to me
for height := range cs.tipsets {
if height < b.Height-build.Finality {
delete(cs.tipsets, height)
}
break
}
cs.tipsets[b.Height] = append(tss, b.Cid())
return nil
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) PersistBlockHeaders(ctx context.Context, b ...*types.BlockHeader) error {
2019-11-12 10:18:46 +00:00
sbs := make([]block.Block, len(b))
for i, header := range b {
var err error
2019-11-12 10:18:46 +00:00
sbs[i], err = header.ToStorageBlock()
if err != nil {
return err
}
2019-07-05 14:29:17 +00:00
}
batchSize := 256
calls := len(b) / batchSize
var err error
for i := 0; i <= calls; i++ {
start := batchSize * i
end := start + batchSize
if end > len(b) {
end = len(b)
}
2021-12-11 21:03:00 +00:00
err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(ctx, sbs[start:end]))
}
return err
2019-07-05 14:29:17 +00:00
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) {
// Hold lock for the whole function for now, if it becomes a problem we can
// fix pretty easily
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
all := []*types.BlockHeader{b}
tsets, ok := cs.tipsets[b.Height]
if !ok {
return types.NewTipSet(all)
}
2020-11-09 02:57:36 +00:00
inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
for _, bhc := range tsets {
if bhc == b.Cid() {
continue
}
2021-12-11 21:03:00 +00:00
h, err := cs.GetBlock(ctx, bhc)
if err != nil {
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
}
2020-11-09 02:57:36 +00:00
if cid, found := inclMiners[h.Miner]; found {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
continue
}
if types.CidArrsEqual(h.Parents, b.Parents) {
all = append(all, h)
2020-11-09 02:57:36 +00:00
inclMiners[h.Miner] = bhc
}
}
// TODO: other validation...?
return types.NewTipSet(all)
}
2019-10-15 04:33:29 +00:00
func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error {
2021-12-11 21:03:00 +00:00
if err := cs.PersistBlockHeaders(ctx, b); err != nil {
2019-07-05 14:29:17 +00:00
return err
}
2021-12-11 21:03:00 +00:00
ts, err := cs.expandTipset(ctx, b)
if err != nil {
return err
}
2019-10-15 04:33:29 +00:00
if err := cs.MaybeTakeHeavierTipSet(ctx, ts); err != nil {
return xerrors.Errorf("MaybeTakeHeavierTipSet failed: %w", err)
}
2019-07-05 14:29:17 +00:00
return nil
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) {
data, err := cs.metadataDs.Get(ctx, dstore.NewKey("0"))
2019-07-05 14:29:17 +00:00
if err != nil {
return nil, err
}
c, err := cid.Cast(data)
if err != nil {
return nil, err
}
2021-12-11 21:03:00 +00:00
return cs.GetBlock(ctx, c)
2019-07-05 14:29:17 +00:00
}
2021-05-14 20:00:13 +00:00
// GetPath returns the sequence of atomic head change operations that
2021-05-13 12:08:52 +00:00
// need to be applied in order to switch the head of the chain from the `from`
// tipset to the `to` tipset.
func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) {
2021-12-11 21:03:00 +00:00
fts, err := cs.LoadTipSet(ctx, from)
if err != nil {
return nil, xerrors.Errorf("loading from tipset %s: %w", from, err)
}
2021-12-11 21:03:00 +00:00
tts, err := cs.LoadTipSet(ctx, to)
if err != nil {
return nil, xerrors.Errorf("loading to tipset %s: %w", to, err)
}
2021-12-11 21:03:00 +00:00
revert, apply, err := cs.ReorgOps(ctx, fts, tts)
if err != nil {
return nil, xerrors.Errorf("error getting tipset branches: %w", err)
}
path := make([]*api.HeadChange, len(revert)+len(apply))
for i, r := range revert {
path[i] = &api.HeadChange{Type: HCRevert, Val: r}
}
for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 {
path[j+len(revert)] = &api.HeadChange{Type: HCApply, Val: apply[i]}
}
return path, nil
}
// ChainBlockstore returns the chain blockstore. Currently the chain and state
// // stores are both backed by the same physical store, albeit with different
// // caching policies, but in the future they will segregate.
func (cs *ChainStore) ChainBlockstore() bstore.Blockstore {
return cs.chainBlockstore
}
// StateBlockstore returns the state blockstore. Currently the chain and state
// stores are both backed by the same physical store, albeit with different
// caching policies, but in the future they will segregate.
func (cs *ChainStore) StateBlockstore() bstore.Blockstore {
return cs.stateBlockstore
2019-07-26 04:54:22 +00:00
}
func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store {
return adt.WrapStore(ctx, cbor.NewCborStore(bs))
2020-02-08 02:18:32 +00:00
}
func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store {
return ActorStore(ctx, cs.stateBlockstore)
2020-02-08 02:18:32 +00:00
}
2021-12-17 09:42:09 +00:00
func (cs *ChainStore) TryFillTipSet(ctx context.Context, ts *types.TipSet) (*FullTipSet, error) {
var out []*types.FullBlock
for _, b := range ts.Blocks() {
2021-12-17 09:42:09 +00:00
bmsgs, smsgs, err := cs.MessagesForBlock(ctx, b)
if err != nil {
2019-08-02 00:57:29 +00:00
// TODO: check for 'not found' errors, and only return nil if this
// is actually a 'not found' error
return nil, nil
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
}
out = append(out, fb)
}
return NewFullTipSet(out), nil
}
// GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given
// height. In the case that the given height is a null round, the 'prev' flag
// selects the tipset before the null round if true, and the tipset following
// the null round if false.
func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) {
2019-09-18 03:25:12 +00:00
if ts == nil {
ts = cs.GetHeaviestTipSet()
}
if h > ts.Height() {
2020-06-04 01:25:41 +00:00
return nil, xerrors.Errorf("looking for tipset with height greater than start point")
2019-09-18 03:25:12 +00:00
}
if h == ts.Height() {
return ts, nil
}
lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h)
if err != nil {
return nil, err
}
if lbts.Height() < h {
log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h)
2021-12-11 21:03:00 +00:00
lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ctx, ts, h)
2019-09-18 03:25:12 +00:00
if err != nil {
return nil, err
}
}
if lbts.Height() == h || !prev {
return lbts, nil
2019-09-18 03:25:12 +00:00
}
2021-12-11 21:03:00 +00:00
return cs.LoadTipSet(ctx, lbts.Parents())
2019-09-18 03:25:12 +00:00
}
2021-09-02 16:07:23 +00:00
func (cs *ChainStore) Weight(ctx context.Context, hts *types.TipSet) (types.BigInt, error) { // todo remove
return cs.weight(ctx, cs.StateBlockstore(), hts)
}
// true if ts1 wins according to the filecoin tie-break rule
func breakWeightTie(ts1, ts2 *types.TipSet) bool {
s := len(ts1.Blocks())
if s > len(ts2.Blocks()) {
s = len(ts2.Blocks())
}
// blocks are already sorted by ticket
for i := 0; i < s; i++ {
if ts1.Blocks()[i].Ticket.Less(ts2.Blocks()[i].Ticket) {
log.Infof("weight tie broken in favour of %s", ts1.Key())
return true
}
}
log.Infof("weight tie left unbroken, default to %s", ts2.Key())
return false
}
2021-09-18 17:57:04 +00:00
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
2021-09-18 17:57:04 +00:00
if tsk.IsEmpty() {
return cs.GetHeaviestTipSet(), nil
}
2021-12-11 21:03:00 +00:00
return cs.LoadTipSet(ctx, tsk)
2021-09-18 17:57:04 +00:00
}
2021-12-11 21:03:00 +00:00
func (cs *ChainStore) GetLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
2021-09-18 17:57:04 +00:00
cur := ts
for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries
if len(cbe) > 0 {
return &cbe[len(cbe)-1], nil
}
if cur.Height() == 0 {
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
}
2021-12-11 21:03:00 +00:00
next, err := cs.LoadTipSet(ctx, cur.Parents())
2021-09-18 17:57:04 +00:00
if err != nil {
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
}
cur = next
}
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
return &types.BeaconEntry{
Data: []byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9},
}, nil
}
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}