2020-11-26 14:51:16 +00:00
|
|
|
package splitstore
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
2021-07-08 18:30:39 +00:00
|
|
|
"os"
|
2020-11-25 07:10:58 +00:00
|
|
|
"sync"
|
2020-11-29 13:10:30 +00:00
|
|
|
"sync/atomic"
|
2020-11-25 08:11:42 +00:00
|
|
|
"time"
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2023-03-25 07:33:05 +00:00
|
|
|
blocks "github.com/ipfs/go-block-format"
|
2022-06-15 10:06:22 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2020-11-24 17:41:07 +00:00
|
|
|
dstore "github.com/ipfs/go-datastore"
|
2022-06-28 11:09:59 +00:00
|
|
|
ipld "github.com/ipfs/go-ipld-format"
|
2020-11-26 14:51:16 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2022-06-14 15:00:51 +00:00
|
|
|
"go.opencensus.io/stats"
|
|
|
|
"go.uber.org/multierr"
|
|
|
|
"golang.org/x/xerrors"
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-06-14 15:00:51 +00:00
|
|
|
|
2021-03-01 07:25:52 +00:00
|
|
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
2021-12-03 09:50:35 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
|
|
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
2020-11-24 14:51:00 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2021-03-05 09:54:06 +00:00
|
|
|
"github.com/filecoin-project/lotus/metrics"
|
2020-11-24 14:51:00 +00:00
|
|
|
)
|
|
|
|
|
2021-03-01 16:41:51 +00:00
|
|
|
var (
|
2021-03-02 00:47:21 +00:00
|
|
|
// baseEpochKey stores the base epoch (last compaction epoch) in the
|
|
|
|
// metadata store.
|
|
|
|
baseEpochKey = dstore.NewKey("/splitstore/baseEpoch")
|
|
|
|
|
|
|
|
// warmupEpochKey stores whether a hot store warmup has been performed.
|
|
|
|
// On first start, the splitstore will walk the state tree and will copy
|
|
|
|
// all active blocks into the hotstore.
|
2021-03-01 16:41:51 +00:00
|
|
|
warmupEpochKey = dstore.NewKey("/splitstore/warmupEpoch")
|
2021-03-02 08:04:02 +00:00
|
|
|
|
2021-03-05 08:00:17 +00:00
|
|
|
// markSetSizeKey stores the current estimate for the mark set size.
|
|
|
|
// this is first computed at warmup and updated in every compaction
|
|
|
|
markSetSizeKey = dstore.NewKey("/splitstore/markSetSize")
|
|
|
|
|
2021-07-12 05:39:12 +00:00
|
|
|
// compactionIndexKey stores the compaction index (serial number)
|
|
|
|
compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex")
|
|
|
|
|
2022-08-05 20:34:16 +00:00
|
|
|
// stores the prune index (serial number)
|
|
|
|
pruneIndexKey = dstore.NewKey("/splitstore/pruneIndex")
|
|
|
|
|
2022-08-08 20:06:32 +00:00
|
|
|
// stores the base epoch of last prune in the metadata store
|
|
|
|
pruneEpochKey = dstore.NewKey("/splitstore/pruneEpoch")
|
|
|
|
|
2021-03-02 08:04:02 +00:00
|
|
|
log = logging.Logger("splitstore")
|
2021-06-22 07:10:24 +00:00
|
|
|
|
2022-08-05 20:34:16 +00:00
|
|
|
errClosing = errors.New("splitstore is closing")
|
|
|
|
|
2021-06-22 07:10:24 +00:00
|
|
|
// set this to true if you are debugging the splitstore to enable debug logging
|
|
|
|
enableDebugLog = false
|
2021-06-29 13:15:45 +00:00
|
|
|
// set this to true if you want to track origin stack traces in the write log
|
|
|
|
enableDebugLogWriteTraces = false
|
2021-12-03 09:50:35 +00:00
|
|
|
|
2021-12-03 10:15:28 +00:00
|
|
|
// upgradeBoundary is the boundary before and after an upgrade where we suppress compaction
|
2021-12-03 09:50:35 +00:00
|
|
|
upgradeBoundary = build.Finality
|
2021-03-01 16:41:51 +00:00
|
|
|
)
|
2020-11-24 17:41:07 +00:00
|
|
|
|
2022-08-05 20:34:16 +00:00
|
|
|
type CompactType int
|
|
|
|
|
|
|
|
const (
|
|
|
|
none CompactType = iota
|
|
|
|
warmup
|
|
|
|
hot
|
|
|
|
cold
|
|
|
|
check
|
|
|
|
)
|
|
|
|
|
2021-07-13 21:35:15 +00:00
|
|
|
func init() {
|
|
|
|
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" {
|
|
|
|
enableDebugLog = true
|
|
|
|
}
|
2021-03-02 16:59:00 +00:00
|
|
|
|
2021-07-13 21:35:15 +00:00
|
|
|
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES") == "1" {
|
|
|
|
enableDebugLogWriteTraces = true
|
|
|
|
}
|
|
|
|
}
|
2021-03-01 18:30:15 +00:00
|
|
|
|
2021-02-27 13:20:14 +00:00
|
|
|
type Config struct {
|
2021-03-02 00:47:21 +00:00
|
|
|
// MarkSetType is the type of mark set to use.
|
|
|
|
//
|
2021-07-22 10:00:54 +00:00
|
|
|
// The default value is "map", which uses an in-memory map-backed markset.
|
|
|
|
// If you are constrained in memory (i.e. compaction runs out of memory), you
|
|
|
|
// can use "badger", which will use a disk-backed markset using badger.
|
|
|
|
// Note that compaction will take quite a bit longer when using the "badger" option,
|
|
|
|
// but that shouldn't really matter (as long as it is under 7.5hrs).
|
2021-03-02 00:47:21 +00:00
|
|
|
MarkSetType string
|
2021-06-16 11:07:10 +00:00
|
|
|
|
2021-07-04 09:23:30 +00:00
|
|
|
// DiscardColdBlocks indicates whether to skip moving cold blocks to the coldstore.
|
2021-07-02 19:34:00 +00:00
|
|
|
// If the splitstore is running with a noop coldstore then this option is set to true
|
|
|
|
// which skips moving (as it is a noop, but still takes time to read all the cold objects)
|
|
|
|
// and directly purges cold blocks.
|
2021-07-04 06:53:58 +00:00
|
|
|
DiscardColdBlocks bool
|
2021-07-17 05:35:35 +00:00
|
|
|
|
2022-11-07 21:31:12 +00:00
|
|
|
// UniversalColdBlocks indicates whether all blocks being garbage collected and purged
|
|
|
|
// from the hotstore should be written to the cold store
|
|
|
|
UniversalColdBlocks bool
|
|
|
|
|
2021-07-17 05:35:35 +00:00
|
|
|
// HotstoreMessageRetention indicates the hotstore retention policy for messages.
|
|
|
|
// It has the following semantics:
|
|
|
|
// - a value of 0 will only retain messages within the compaction boundary (4 finalities)
|
|
|
|
// - a positive integer indicates the number of finalities, outside the compaction boundary,
|
|
|
|
// for which messages will be retained in the hotstore.
|
|
|
|
HotStoreMessageRetention uint64
|
2021-07-23 19:55:03 +00:00
|
|
|
|
2021-07-27 06:53:22 +00:00
|
|
|
// HotstoreFullGCFrequency indicates how frequently (in terms of compactions) to garbage collect
|
|
|
|
// the hotstore using full (moving) GC if supported by the hotstore.
|
|
|
|
// A value of 0 disables full GC entirely.
|
|
|
|
// A positive value is the number of compactions before a full GC is performed;
|
|
|
|
// a value of 1 will perform full GC in every compaction.
|
|
|
|
HotStoreFullGCFrequency uint64
|
2023-03-07 14:38:27 +00:00
|
|
|
|
|
|
|
// HotstoreMaxSpaceTarget suggests the max allowed space the hotstore can take.
|
|
|
|
// This is not a hard limit, it is possible for the hotstore to exceed the target
|
|
|
|
// for example if state grows massively between compactions. The splitstore
|
|
|
|
// will make a best effort to avoid overflowing the target and in practice should
|
|
|
|
// never overflow. This field is used when doing GC at the end of a compaction to
|
|
|
|
// adaptively choose moving GC
|
|
|
|
HotstoreMaxSpaceTarget uint64
|
2023-03-09 15:40:14 +00:00
|
|
|
|
|
|
|
// Moving GC will be triggered when total moving size exceeds
|
|
|
|
// HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold
|
|
|
|
HotstoreMaxSpaceThreshold uint64
|
|
|
|
|
|
|
|
// Safety buffer to prevent moving GC from overflowing disk.
|
|
|
|
// Moving GC will not occur when total moving size exceeds
|
|
|
|
// HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer
|
|
|
|
HotstoreMaxSpaceSafetyBuffer uint64
|
2021-02-27 13:20:14 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 17:38:02 +00:00
|
|
|
// ChainAccessor allows the Splitstore to access the chain. It will most likely
|
|
|
|
// be a ChainStore at runtime.
|
|
|
|
type ChainAccessor interface {
|
|
|
|
GetTipsetByHeight(context.Context, abi.ChainEpoch, *types.TipSet, bool) (*types.TipSet, error)
|
|
|
|
GetHeaviestTipSet() *types.TipSet
|
|
|
|
SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error)
|
|
|
|
}
|
|
|
|
|
2021-12-03 09:50:35 +00:00
|
|
|
// upgradeRange is a precomputed epoch range during which we shouldn't compact so as to not
|
|
|
|
// interfere with an upgrade
|
|
|
|
type upgradeRange struct {
|
|
|
|
start, end abi.ChainEpoch
|
|
|
|
}
|
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension
|
|
|
|
// of the Blockstore interface with the traits we need for compaction.
|
|
|
|
type hotstore interface {
|
|
|
|
bstore.Blockstore
|
|
|
|
bstore.BlockstoreIterator
|
|
|
|
}
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
type SplitStore struct {
|
2022-08-05 20:34:16 +00:00
|
|
|
compacting int32 // flag for when compaction is in progress
|
|
|
|
compactType CompactType // compaction type, protected by compacting atomic, only meaningful when compacting == 1
|
|
|
|
closing int32 // the splitstore is closing
|
2020-11-29 13:10:30 +00:00
|
|
|
|
2021-07-25 07:35:37 +00:00
|
|
|
cfg *Config
|
|
|
|
path string
|
2021-06-16 11:07:10 +00:00
|
|
|
|
2021-07-07 06:55:25 +00:00
|
|
|
mx sync.Mutex
|
2023-05-10 18:23:12 +00:00
|
|
|
warmupEpoch atomic.Int64
|
2021-07-07 08:31:27 +00:00
|
|
|
baseEpoch abi.ChainEpoch // protected by compaction lock
|
2022-08-08 20:06:32 +00:00
|
|
|
pruneEpoch abi.ChainEpoch // protected by compaction lock
|
2020-11-29 13:10:30 +00:00
|
|
|
|
2021-07-13 00:14:13 +00:00
|
|
|
headChangeMx sync.Mutex
|
|
|
|
|
2021-07-04 06:53:58 +00:00
|
|
|
chain ChainAccessor
|
|
|
|
ds dstore.Datastore
|
|
|
|
cold bstore.Blockstore
|
2021-07-10 13:30:27 +00:00
|
|
|
hot hotstore
|
2020-11-24 17:26:28 +00:00
|
|
|
|
2021-12-03 09:50:35 +00:00
|
|
|
upgrades []upgradeRange
|
|
|
|
|
2021-07-03 13:10:37 +00:00
|
|
|
markSetEnv MarkSetEnv
|
2021-03-02 00:47:21 +00:00
|
|
|
markSetSize int64
|
2021-06-17 09:32:32 +00:00
|
|
|
|
2021-07-12 05:39:12 +00:00
|
|
|
compactionIndex int64
|
2022-08-05 20:34:16 +00:00
|
|
|
pruneIndex int64
|
2023-03-07 14:38:27 +00:00
|
|
|
onlineGCCnt int64
|
2021-07-12 05:39:12 +00:00
|
|
|
|
2021-06-17 09:32:32 +00:00
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
2021-06-21 12:17:00 +00:00
|
|
|
|
2023-03-04 14:51:15 +00:00
|
|
|
outOfSync int32 // for fast checking
|
2023-04-04 18:15:21 +00:00
|
|
|
chainSyncMx sync.Mutex
|
2023-03-04 14:51:15 +00:00
|
|
|
chainSyncCond sync.Cond
|
|
|
|
chainSyncFinished bool // protected by chainSyncMx
|
|
|
|
|
2021-06-21 12:17:00 +00:00
|
|
|
debug *debugLog
|
2021-06-25 07:07:45 +00:00
|
|
|
|
2021-07-06 12:09:04 +00:00
|
|
|
// transactional protection for concurrent read/writes during compaction
|
2021-07-13 06:06:40 +00:00
|
|
|
txnLk sync.RWMutex
|
|
|
|
txnViewsMx sync.Mutex
|
|
|
|
txnViewsCond sync.Cond
|
|
|
|
txnViews int
|
|
|
|
txnViewsWaiting bool
|
|
|
|
txnActive bool
|
|
|
|
txnRefsMx sync.Mutex
|
|
|
|
txnRefs map[cid.Cid]struct{}
|
|
|
|
txnMissing map[cid.Cid]struct{}
|
2022-01-30 13:33:15 +00:00
|
|
|
txnMarkSet MarkSet
|
2022-02-01 09:10:51 +00:00
|
|
|
txnSyncMx sync.Mutex
|
|
|
|
txnSyncCond sync.Cond
|
|
|
|
txnSync bool
|
2021-07-17 17:33:56 +00:00
|
|
|
|
2022-02-04 12:29:50 +00:00
|
|
|
// background cold object reification
|
2022-02-14 14:10:54 +00:00
|
|
|
reifyWorkers sync.WaitGroup
|
2022-02-04 12:29:50 +00:00
|
|
|
reifyMx sync.Mutex
|
|
|
|
reifyCond sync.Cond
|
|
|
|
reifyPend map[cid.Cid]struct{}
|
|
|
|
reifyInProgress map[cid.Cid]struct{}
|
|
|
|
|
2021-07-17 17:33:56 +00:00
|
|
|
// registered protectors
|
|
|
|
protectors []func(func(cid.Cid) error) error
|
2023-03-04 16:38:18 +00:00
|
|
|
|
|
|
|
// dag sizes measured during latest compaction
|
|
|
|
// logged and used for GC strategy
|
|
|
|
|
|
|
|
// protected by compaction lock
|
|
|
|
szWalk int64
|
|
|
|
szProtectedTxns int64
|
2023-03-07 14:38:27 +00:00
|
|
|
szKeys int64 // approximate, not counting keys protected when entering critical section
|
2023-03-04 16:38:18 +00:00
|
|
|
|
|
|
|
// protected by txnLk
|
|
|
|
szMarkedLiveRefs int64
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
2020-11-25 07:07:48 +00:00
|
|
|
var _ bstore.Blockstore = (*SplitStore)(nil)
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2021-03-01 17:39:00 +00:00
|
|
|
// Open opens an existing splistore, or creates a new splitstore. The splitstore
|
|
|
|
// is backed by the provided hot and cold stores. The returned SplitStore MUST be
|
|
|
|
// attached to the ChainStore with Start in order to trigger compaction.
|
|
|
|
func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Config) (*SplitStore, error) {
|
2021-07-10 13:30:27 +00:00
|
|
|
// hot blockstore must support the hotstore interface
|
|
|
|
hots, ok := hot.(hotstore)
|
|
|
|
if !ok {
|
|
|
|
// be specific about what is missing
|
|
|
|
if _, ok := hot.(bstore.BlockstoreIterator); !ok {
|
|
|
|
return nil, xerrors.Errorf("hot blockstore does not support efficient iteration: %T", hot)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, xerrors.Errorf("hot blockstore does not support the necessary traits: %T", hot)
|
2020-12-01 15:17:34 +00:00
|
|
|
}
|
|
|
|
|
2021-03-02 00:47:21 +00:00
|
|
|
// the markset env
|
2021-07-04 09:23:30 +00:00
|
|
|
markSetEnv, err := OpenMarkSetEnv(path, cfg.MarkSetType)
|
2021-02-27 16:27:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-12-01 15:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// and now we can make a SplitStore
|
|
|
|
ss := &SplitStore{
|
2021-07-03 13:10:37 +00:00
|
|
|
cfg: cfg,
|
2021-07-25 07:35:37 +00:00
|
|
|
path: path,
|
2021-07-03 13:10:37 +00:00
|
|
|
ds: ds,
|
|
|
|
cold: cold,
|
2021-07-10 13:30:27 +00:00
|
|
|
hot: hots,
|
2021-07-03 13:10:37 +00:00
|
|
|
markSetEnv: markSetEnv,
|
2021-03-02 16:59:00 +00:00
|
|
|
}
|
|
|
|
|
2021-07-13 06:01:50 +00:00
|
|
|
ss.txnViewsCond.L = &ss.txnViewsMx
|
2022-02-01 09:10:51 +00:00
|
|
|
ss.txnSyncCond.L = &ss.txnSyncMx
|
2023-03-04 14:51:15 +00:00
|
|
|
ss.chainSyncCond.L = &ss.chainSyncMx
|
2021-06-17 09:32:32 +00:00
|
|
|
ss.ctx, ss.cancel = context.WithCancel(context.Background())
|
|
|
|
|
2022-02-04 12:29:50 +00:00
|
|
|
ss.reifyCond.L = &ss.reifyMx
|
|
|
|
ss.reifyPend = make(map[cid.Cid]struct{})
|
|
|
|
ss.reifyInProgress = make(map[cid.Cid]struct{})
|
|
|
|
|
2021-06-22 07:10:24 +00:00
|
|
|
if enableDebugLog {
|
|
|
|
ss.debug, err = openDebugLog(path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-30 13:33:15 +00:00
|
|
|
if ss.checkpointExists() {
|
|
|
|
log.Info("found compaction checkpoint; resuming compaction")
|
|
|
|
if err := ss.completeCompaction(); err != nil {
|
2022-01-30 13:43:52 +00:00
|
|
|
markSetEnv.Close() //nolint:errcheck
|
2022-01-30 13:33:15 +00:00
|
|
|
return nil, xerrors.Errorf("error resuming compaction: %w", err)
|
|
|
|
}
|
|
|
|
}
|
2022-08-05 20:34:16 +00:00
|
|
|
if ss.pruneCheckpointExists() {
|
|
|
|
log.Info("found prune checkpoint; resuming prune")
|
|
|
|
if err := ss.completePrune(); err != nil {
|
|
|
|
markSetEnv.Close() //nolint:errcheck
|
|
|
|
return nil, xerrors.Errorf("error resuming prune: %w", err)
|
|
|
|
}
|
|
|
|
}
|
2022-01-30 13:33:15 +00:00
|
|
|
|
2020-12-01 15:17:34 +00:00
|
|
|
return ss, nil
|
|
|
|
}
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
// Blockstore interface
|
2021-12-11 21:03:00 +00:00
|
|
|
func (s *SplitStore) DeleteBlock(_ context.Context, _ cid.Cid) error {
|
2020-11-24 14:51:00 +00:00
|
|
|
// afaict we don't seem to be using this method, so it's not implemented
|
2020-11-24 22:01:10 +00:00
|
|
|
return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
func (s *SplitStore) DeleteMany(_ context.Context, _ []cid.Cid) error {
|
2021-03-02 14:45:45 +00:00
|
|
|
// afaict we don't seem to be using this method, so it's not implemented
|
|
|
|
return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint
|
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(cid) {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
|
|
|
defer s.txnLk.RUnlock()
|
|
|
|
|
2022-01-30 13:33:15 +00:00
|
|
|
// critical section
|
|
|
|
if s.txnMarkSet != nil {
|
|
|
|
has, err := s.txnMarkSet.Has(cid)
|
2022-01-31 07:44:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if has {
|
|
|
|
return s.has(cid)
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
2022-08-05 20:34:16 +00:00
|
|
|
switch s.compactType {
|
|
|
|
case hot:
|
|
|
|
return s.cold.Has(ctx, cid)
|
|
|
|
case cold:
|
|
|
|
return s.hot.Has(ctx, cid)
|
|
|
|
default:
|
|
|
|
return false, xerrors.Errorf("invalid compaction type %d, only hot and cold allowed for critical section", s.compactType)
|
|
|
|
}
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
has, err := s.hot.Has(ctx, cid)
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2021-07-02 06:36:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return has, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if has {
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRef(cid)
|
2021-07-07 13:34:02 +00:00
|
|
|
return true, nil
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
2022-02-04 14:07:58 +00:00
|
|
|
has, err = s.cold.Has(ctx, cid)
|
2022-08-05 20:34:16 +00:00
|
|
|
if has {
|
|
|
|
s.trackTxnRef(cid)
|
|
|
|
if bstore.IsHotView(ctx) {
|
|
|
|
s.reifyColdObject(cid)
|
|
|
|
}
|
2022-02-04 14:07:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return has, err
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(cid) {
|
|
|
|
data, err := decodeIdentityCid(cid)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return blocks.NewBlockWithCid(data, cid)
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
|
|
|
defer s.txnLk.RUnlock()
|
|
|
|
|
2022-01-30 13:33:15 +00:00
|
|
|
// critical section
|
|
|
|
if s.txnMarkSet != nil {
|
|
|
|
has, err := s.txnMarkSet.Has(cid)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if has {
|
2022-01-30 20:47:20 +00:00
|
|
|
return s.get(cid)
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
2022-08-05 20:34:16 +00:00
|
|
|
switch s.compactType {
|
|
|
|
case hot:
|
|
|
|
return s.cold.Get(ctx, cid)
|
|
|
|
case cold:
|
|
|
|
return s.hot.Get(ctx, cid)
|
|
|
|
default:
|
|
|
|
return nil, xerrors.Errorf("invalid compaction type %d, only hot and cold allowed for critical section", s.compactType)
|
|
|
|
}
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
blk, err := s.hot.Get(ctx, cid)
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2022-06-28 11:09:59 +00:00
|
|
|
switch {
|
|
|
|
case err == nil:
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRef(cid)
|
2021-07-07 13:34:02 +00:00
|
|
|
return blk, nil
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2022-06-28 11:09:59 +00:00
|
|
|
case ipld.IsNotFound(err):
|
2021-07-10 13:30:27 +00:00
|
|
|
if s.isWarm() {
|
|
|
|
s.debug.LogReadMiss(cid)
|
2021-06-22 07:56:35 +00:00
|
|
|
}
|
2021-06-21 12:17:00 +00:00
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
blk, err = s.cold.Get(ctx, cid)
|
2021-03-05 10:04:40 +00:00
|
|
|
if err == nil {
|
2022-08-05 20:34:16 +00:00
|
|
|
s.trackTxnRef(cid)
|
2022-02-14 14:03:17 +00:00
|
|
|
if bstore.IsHotView(ctx) {
|
2022-02-04 14:07:58 +00:00
|
|
|
s.reifyColdObject(cid)
|
|
|
|
}
|
2021-06-21 12:17:00 +00:00
|
|
|
|
2022-02-04 14:07:58 +00:00
|
|
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
2021-03-05 09:54:06 +00:00
|
|
|
}
|
|
|
|
return blk, err
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(cid) {
|
|
|
|
data, err := decodeIdentityCid(cid)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return len(data), nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
|
|
|
defer s.txnLk.RUnlock()
|
|
|
|
|
2022-01-30 13:33:15 +00:00
|
|
|
// critical section
|
|
|
|
if s.txnMarkSet != nil {
|
|
|
|
has, err := s.txnMarkSet.Has(cid)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if has {
|
2022-01-30 20:47:20 +00:00
|
|
|
return s.getSize(cid)
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
2022-08-05 20:34:16 +00:00
|
|
|
switch s.compactType {
|
|
|
|
case hot:
|
|
|
|
return s.cold.GetSize(ctx, cid)
|
|
|
|
case cold:
|
|
|
|
return s.hot.GetSize(ctx, cid)
|
|
|
|
default:
|
|
|
|
return 0, xerrors.Errorf("invalid compaction type %d, only hot and cold allowed for critical section", s.compactType)
|
|
|
|
}
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
size, err := s.hot.GetSize(ctx, cid)
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2022-06-28 11:09:59 +00:00
|
|
|
switch {
|
|
|
|
case err == nil:
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRef(cid)
|
2021-07-07 13:34:02 +00:00
|
|
|
return size, nil
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2022-06-28 11:09:59 +00:00
|
|
|
case ipld.IsNotFound(err):
|
2021-07-10 13:30:27 +00:00
|
|
|
if s.isWarm() {
|
|
|
|
s.debug.LogReadMiss(cid)
|
2021-06-22 07:56:35 +00:00
|
|
|
}
|
2021-06-21 12:17:00 +00:00
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
size, err = s.cold.GetSize(ctx, cid)
|
2021-03-05 10:04:40 +00:00
|
|
|
if err == nil {
|
2022-08-05 20:34:16 +00:00
|
|
|
s.trackTxnRef(cid)
|
2022-02-14 14:03:17 +00:00
|
|
|
if bstore.IsHotView(ctx) {
|
2022-02-04 14:07:58 +00:00
|
|
|
s.reifyColdObject(cid)
|
|
|
|
}
|
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
2021-03-05 09:54:06 +00:00
|
|
|
}
|
|
|
|
return size, err
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-24 07:11:28 +00:00
|
|
|
func (s *SplitStore) Flush(ctx context.Context) error {
|
|
|
|
s.txnLk.RLock()
|
|
|
|
defer s.txnLk.RUnlock()
|
|
|
|
|
|
|
|
if err := s.cold.Flush(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.hot.Flush(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.ds.Sync(ctx, dstore.Key{}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(blk.Cid()) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
2022-02-01 07:07:14 +00:00
|
|
|
defer s.txnLk.RUnlock()
|
2021-06-25 07:07:45 +00:00
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
err := s.hot.Put(ctx, blk)
|
2021-07-07 13:34:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-06 14:13:38 +00:00
|
|
|
|
2021-07-07 13:34:02 +00:00
|
|
|
s.debug.LogWrite(blk)
|
|
|
|
|
2022-01-30 13:33:15 +00:00
|
|
|
// critical section
|
2022-08-05 20:34:16 +00:00
|
|
|
if s.txnMarkSet != nil && s.compactType == hot { // puts only touch hot store
|
2022-02-01 07:07:14 +00:00
|
|
|
s.markLiveRefs([]cid.Cid{blk.Cid()})
|
2022-01-31 15:26:08 +00:00
|
|
|
return nil
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRef(blk.Cid())
|
2022-08-05 20:34:16 +00:00
|
|
|
|
2021-07-07 13:34:02 +00:00
|
|
|
return nil
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
2021-07-09 08:31:04 +00:00
|
|
|
// filter identites
|
|
|
|
idcids := 0
|
|
|
|
for _, blk := range blks {
|
|
|
|
if isIdentiyCid(blk.Cid()) {
|
|
|
|
idcids++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if idcids > 0 {
|
|
|
|
if idcids == len(blks) {
|
|
|
|
// it's all identities
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
filtered := make([]blocks.Block, 0, len(blks)-idcids)
|
|
|
|
for _, blk := range blks {
|
|
|
|
if isIdentiyCid(blk.Cid()) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
filtered = append(filtered, blk)
|
|
|
|
}
|
|
|
|
|
|
|
|
blks = filtered
|
|
|
|
}
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
batch := make([]cid.Cid, 0, len(blks))
|
|
|
|
for _, blk := range blks {
|
|
|
|
batch = append(batch, blk.Cid())
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
2022-02-01 07:07:14 +00:00
|
|
|
defer s.txnLk.RUnlock()
|
2021-06-25 07:07:45 +00:00
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
err := s.hot.PutMany(ctx, blks)
|
2021-07-07 13:34:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-04 06:53:58 +00:00
|
|
|
|
2021-07-07 13:34:02 +00:00
|
|
|
s.debug.LogWriteMany(blks)
|
|
|
|
|
2022-01-30 13:33:15 +00:00
|
|
|
// critical section
|
2022-08-05 20:34:16 +00:00
|
|
|
if s.txnMarkSet != nil && s.compactType == hot { // puts only touch hot store
|
2022-02-01 07:07:14 +00:00
|
|
|
s.markLiveRefs(batch)
|
2022-01-31 15:26:08 +00:00
|
|
|
return nil
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRefMany(batch)
|
2022-08-05 20:34:16 +00:00
|
|
|
|
2021-07-07 13:34:02 +00:00
|
|
|
return nil
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
|
|
|
|
chHot, err := s.hot.AllKeysChan(ctx)
|
|
|
|
if err != nil {
|
2020-11-24 22:01:10 +00:00
|
|
|
cancel()
|
2020-11-24 14:51:00 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
chCold, err := s.cold.AllKeysChan(ctx)
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
seen := cid.NewSet()
|
|
|
|
ch := make(chan cid.Cid, 8) // buffer is arbitrary, just enough to avoid context switches
|
2020-11-24 14:51:00 +00:00
|
|
|
go func() {
|
|
|
|
defer cancel()
|
2020-11-25 07:07:06 +00:00
|
|
|
defer close(ch)
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
for _, in := range []<-chan cid.Cid{chHot, chCold} {
|
2021-07-10 13:30:27 +00:00
|
|
|
for c := range in {
|
|
|
|
// ensure we only emit each key once
|
|
|
|
if !seen.Visit(c) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
select {
|
2021-07-10 13:30:27 +00:00
|
|
|
case ch <- c:
|
2020-11-24 14:51:00 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ch, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SplitStore) HashOnRead(enabled bool) {
|
|
|
|
s.hot.HashOnRead(enabled)
|
|
|
|
s.cold.HashOnRead(enabled)
|
|
|
|
}
|
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(cid) {
|
|
|
|
data, err := decodeIdentityCid(cid)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return cb(data)
|
|
|
|
}
|
|
|
|
|
2022-01-30 13:33:15 +00:00
|
|
|
// critical section
|
2022-01-31 10:40:26 +00:00
|
|
|
s.txnLk.RLock() // the lock is released in protectView if we are not in critical section
|
2022-01-30 13:33:15 +00:00
|
|
|
if s.txnMarkSet != nil {
|
|
|
|
has, err := s.txnMarkSet.Has(cid)
|
|
|
|
s.txnLk.RUnlock()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if has {
|
2022-01-30 20:47:20 +00:00
|
|
|
return s.view(cid, cb)
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
2022-08-05 20:34:16 +00:00
|
|
|
switch s.compactType {
|
|
|
|
case hot:
|
|
|
|
return s.cold.View(ctx, cid, cb)
|
|
|
|
case cold:
|
|
|
|
return s.hot.View(ctx, cid, cb)
|
|
|
|
default:
|
|
|
|
return xerrors.Errorf("invalid compaction type %d, only hot and cold allowed for critical section", s.compactType)
|
|
|
|
}
|
2022-01-30 13:33:15 +00:00
|
|
|
}
|
|
|
|
|
2021-07-09 12:41:10 +00:00
|
|
|
// views are (optimistically) protected two-fold:
|
|
|
|
// - if there is an active transaction, then the reference is protected.
|
|
|
|
// - if there is no active transaction, active views are tracked in a
|
|
|
|
// wait group and compaction is inhibited from starting until they
|
|
|
|
// have all completed. this is necessary to ensure that a (very) long-running
|
|
|
|
// view can't have its data pointer deleted, which would be catastrophic.
|
|
|
|
// Note that we can't just RLock for the duration of the view, as this could
|
|
|
|
// lead to deadlock with recursive views.
|
2021-07-13 06:01:50 +00:00
|
|
|
s.protectView(cid)
|
|
|
|
defer s.viewDone()
|
2021-06-25 07:07:45 +00:00
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
err := s.hot.View(ctx, cid, cb)
|
2022-06-28 11:09:59 +00:00
|
|
|
if ipld.IsNotFound(err) {
|
2021-07-10 13:30:27 +00:00
|
|
|
if s.isWarm() {
|
|
|
|
s.debug.LogReadMiss(cid)
|
2021-06-22 07:56:35 +00:00
|
|
|
}
|
2021-06-21 12:17:00 +00:00
|
|
|
|
2021-12-11 21:03:00 +00:00
|
|
|
err = s.cold.View(ctx, cid, cb)
|
2021-03-16 20:05:22 +00:00
|
|
|
if err == nil {
|
2022-02-14 14:03:17 +00:00
|
|
|
if bstore.IsHotView(ctx) {
|
2022-02-04 14:07:58 +00:00
|
|
|
s.reifyColdObject(cid)
|
|
|
|
}
|
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
2021-03-16 20:05:22 +00:00
|
|
|
}
|
|
|
|
return err
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
2022-06-28 11:09:59 +00:00
|
|
|
return err
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
2020-11-24 17:15:38 +00:00
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
func (s *SplitStore) isWarm() bool {
|
2023-05-10 18:23:12 +00:00
|
|
|
return s.warmupEpoch.Load() > 0
|
2021-07-10 13:30:27 +00:00
|
|
|
}
|
|
|
|
|
2020-11-24 17:26:28 +00:00
|
|
|
// State tracking
|
2021-12-03 09:50:35 +00:00
|
|
|
func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error {
|
2021-03-01 17:38:02 +00:00
|
|
|
s.chain = chain
|
2021-07-07 06:55:25 +00:00
|
|
|
curTs := chain.GetHeaviestTipSet()
|
2020-11-24 17:41:07 +00:00
|
|
|
|
2021-12-03 09:50:35 +00:00
|
|
|
// precompute the upgrade boundaries
|
|
|
|
s.upgrades = make([]upgradeRange, 0, len(us))
|
|
|
|
for _, upgrade := range us {
|
|
|
|
boundary := upgrade.Height
|
|
|
|
for _, pre := range upgrade.PreMigrations {
|
|
|
|
preMigrationBoundary := upgrade.Height - pre.StartWithin
|
|
|
|
if preMigrationBoundary < boundary {
|
|
|
|
boundary = preMigrationBoundary
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
upgradeStart := boundary - upgradeBoundary
|
|
|
|
upgradeEnd := upgrade.Height + upgradeBoundary
|
|
|
|
|
|
|
|
s.upgrades = append(s.upgrades, upgradeRange{start: upgradeStart, end: upgradeEnd})
|
|
|
|
}
|
|
|
|
|
2021-07-12 11:45:58 +00:00
|
|
|
// should we warmup
|
|
|
|
warmup := false
|
|
|
|
|
2020-11-24 17:41:07 +00:00
|
|
|
// load base epoch from metadata ds
|
|
|
|
// if none, then use current epoch because it's a fresh start
|
2021-12-11 21:03:00 +00:00
|
|
|
bs, err := s.ds.Get(s.ctx, baseEpochKey)
|
2020-11-24 17:41:07 +00:00
|
|
|
switch err {
|
|
|
|
case nil:
|
2020-11-26 18:37:02 +00:00
|
|
|
s.baseEpoch = bytesToEpoch(bs)
|
2020-11-24 17:41:07 +00:00
|
|
|
|
|
|
|
case dstore.ErrNotFound:
|
2021-07-07 06:55:25 +00:00
|
|
|
if curTs == nil {
|
2020-12-01 15:56:22 +00:00
|
|
|
// this can happen in some tests
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2021-07-07 06:55:25 +00:00
|
|
|
err = s.setBaseEpoch(curTs.Height())
|
2020-11-24 17:41:07 +00:00
|
|
|
if err != nil {
|
2021-03-01 16:41:51 +00:00
|
|
|
return xerrors.Errorf("error saving base epoch: %w", err)
|
2020-11-24 17:41:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
2021-03-01 16:41:51 +00:00
|
|
|
return xerrors.Errorf("error loading base epoch: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-08-08 20:06:32 +00:00
|
|
|
// load prune epoch from metadata ds
|
|
|
|
bs, err = s.ds.Get(s.ctx, pruneEpochKey)
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
s.pruneEpoch = bytesToEpoch(bs)
|
|
|
|
case dstore.ErrNotFound:
|
|
|
|
if curTs == nil {
|
|
|
|
//this can happen in some tests
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err := s.setPruneEpoch(curTs.Height()); err != nil {
|
|
|
|
return xerrors.Errorf("error saving prune epoch: %w", err)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return xerrors.Errorf("error loading prune epoch: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-03-01 16:41:51 +00:00
|
|
|
// load warmup epoch from metadata ds
|
2021-12-11 21:03:00 +00:00
|
|
|
bs, err = s.ds.Get(s.ctx, warmupEpochKey)
|
2021-03-01 16:41:51 +00:00
|
|
|
switch err {
|
|
|
|
case nil:
|
2023-05-10 18:23:12 +00:00
|
|
|
s.warmupEpoch.Store(bytesToInt64(bs))
|
2021-03-01 16:41:51 +00:00
|
|
|
|
|
|
|
case dstore.ErrNotFound:
|
2021-07-12 11:45:58 +00:00
|
|
|
warmup = true
|
2021-06-16 10:58:16 +00:00
|
|
|
|
2021-03-01 16:41:51 +00:00
|
|
|
default:
|
|
|
|
return xerrors.Errorf("error loading warmup epoch: %w", err)
|
2020-11-24 17:41:07 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 12:09:04 +00:00
|
|
|
// load markSetSize from metadata ds to provide a size hint for marksets
|
2021-12-11 21:03:00 +00:00
|
|
|
bs, err = s.ds.Get(s.ctx, markSetSizeKey)
|
2021-03-05 08:00:17 +00:00
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
s.markSetSize = bytesToInt64(bs)
|
|
|
|
|
|
|
|
case dstore.ErrNotFound:
|
|
|
|
default:
|
|
|
|
return xerrors.Errorf("error loading mark set size: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-07-12 05:39:12 +00:00
|
|
|
// load compactionIndex from metadata ds to provide a hint as to when to perform moving gc
|
2021-12-11 21:03:00 +00:00
|
|
|
bs, err = s.ds.Get(s.ctx, compactionIndexKey)
|
2021-07-12 05:39:12 +00:00
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
s.compactionIndex = bytesToInt64(bs)
|
|
|
|
|
|
|
|
case dstore.ErrNotFound:
|
2021-07-12 11:45:58 +00:00
|
|
|
// this is potentially an upgrade from splitstore v0; schedule a warmup as v0 has
|
|
|
|
// some issues with hot references leaking into the coldstore.
|
|
|
|
warmup = true
|
2021-07-12 05:39:12 +00:00
|
|
|
default:
|
|
|
|
return xerrors.Errorf("error loading compaction index: %w", err)
|
|
|
|
}
|
|
|
|
|
2023-05-10 18:23:12 +00:00
|
|
|
log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch.Load())
|
2021-06-17 09:32:32 +00:00
|
|
|
|
2021-07-12 11:45:58 +00:00
|
|
|
if warmup {
|
|
|
|
err = s.warmup(curTs)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("error starting warmup: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-04 12:29:50 +00:00
|
|
|
// spawn the reifier
|
|
|
|
go s.reifyOrchestrator()
|
|
|
|
|
2020-11-24 17:41:07 +00:00
|
|
|
// watch the chain
|
2021-03-01 17:38:02 +00:00
|
|
|
chain.SubscribeHeadChanges(s.HeadChange)
|
2020-11-24 17:41:07 +00:00
|
|
|
|
|
|
|
return nil
|
2020-11-24 17:26:28 +00:00
|
|
|
}
|
|
|
|
|
2021-07-17 17:33:56 +00:00
|
|
|
func (s *SplitStore) AddProtector(protector func(func(cid.Cid) error) error) {
|
|
|
|
s.mx.Lock()
|
|
|
|
defer s.mx.Unlock()
|
|
|
|
|
|
|
|
s.protectors = append(s.protectors, protector)
|
|
|
|
}
|
|
|
|
|
2020-11-26 15:49:47 +00:00
|
|
|
func (s *SplitStore) Close() error {
|
2021-07-10 13:30:27 +00:00
|
|
|
if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) {
|
|
|
|
// already closing
|
|
|
|
return nil
|
|
|
|
}
|
2021-03-05 08:11:54 +00:00
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
if atomic.LoadInt32(&s.compacting) == 1 {
|
2022-02-01 09:30:21 +00:00
|
|
|
s.txnSyncMx.Lock()
|
|
|
|
s.txnSync = true
|
|
|
|
s.txnSyncCond.Broadcast()
|
|
|
|
s.txnSyncMx.Unlock()
|
|
|
|
|
2023-03-04 14:51:15 +00:00
|
|
|
s.chainSyncMx.Lock()
|
|
|
|
s.chainSyncFinished = true
|
|
|
|
s.chainSyncCond.Broadcast()
|
|
|
|
s.chainSyncMx.Unlock()
|
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
log.Warn("close with ongoing compaction in progress; waiting for it to finish...")
|
|
|
|
for atomic.LoadInt32(&s.compacting) == 1 {
|
2020-11-26 15:49:47 +00:00
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-04 12:29:50 +00:00
|
|
|
s.reifyCond.Broadcast()
|
2022-02-14 14:10:54 +00:00
|
|
|
s.reifyWorkers.Wait()
|
2021-06-17 09:32:32 +00:00
|
|
|
s.cancel()
|
2021-07-04 06:53:58 +00:00
|
|
|
return multierr.Combine(s.markSetEnv.Close(), s.debug.Close())
|
2020-11-26 15:49:47 +00:00
|
|
|
}
|
|
|
|
|
2021-07-13 21:35:15 +00:00
|
|
|
func (s *SplitStore) checkClosing() error {
|
2021-07-10 13:30:27 +00:00
|
|
|
if atomic.LoadInt32(&s.closing) == 1 {
|
2021-07-13 21:35:15 +00:00
|
|
|
return xerrors.Errorf("splitstore is closing")
|
2021-07-12 11:45:58 +00:00
|
|
|
}
|
|
|
|
|
2021-03-13 10:00:28 +00:00
|
|
|
return nil
|
2021-03-01 16:41:51 +00:00
|
|
|
}
|
|
|
|
|
2020-11-24 17:41:07 +00:00
|
|
|
func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error {
|
|
|
|
s.baseEpoch = epoch
|
2021-12-11 21:03:00 +00:00
|
|
|
return s.ds.Put(s.ctx, baseEpochKey, epochToBytes(epoch))
|
2020-11-26 18:37:02 +00:00
|
|
|
}
|
2022-08-08 20:06:32 +00:00
|
|
|
|
|
|
|
func (s *SplitStore) setPruneEpoch(epoch abi.ChainEpoch) error {
|
|
|
|
s.pruneEpoch = epoch
|
|
|
|
return s.ds.Put(s.ctx, pruneEpochKey, epochToBytes(epoch))
|
|
|
|
}
|