2020-11-26 14:51:16 +00:00
|
|
|
package splitstore
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
2021-07-08 18:30:39 +00:00
|
|
|
"os"
|
2020-11-25 07:10:58 +00:00
|
|
|
"sync"
|
2020-11-29 13:10:30 +00:00
|
|
|
"sync/atomic"
|
2020-11-25 08:11:42 +00:00
|
|
|
"time"
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2021-03-03 08:56:41 +00:00
|
|
|
"go.uber.org/multierr"
|
2021-02-26 13:59:36 +00:00
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
blocks "github.com/ipfs/go-block-format"
|
|
|
|
cid "github.com/ipfs/go-cid"
|
2020-11-24 17:41:07 +00:00
|
|
|
dstore "github.com/ipfs/go-datastore"
|
2020-11-26 14:51:16 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2021-03-01 07:25:52 +00:00
|
|
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
2020-11-24 14:51:00 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2021-03-05 09:54:06 +00:00
|
|
|
"github.com/filecoin-project/lotus/metrics"
|
|
|
|
|
|
|
|
"go.opencensus.io/stats"
|
2020-11-24 14:51:00 +00:00
|
|
|
)
|
|
|
|
|
2021-03-01 16:41:51 +00:00
|
|
|
var (
|
2021-03-02 00:47:21 +00:00
|
|
|
// baseEpochKey stores the base epoch (last compaction epoch) in the
|
|
|
|
// metadata store.
|
|
|
|
baseEpochKey = dstore.NewKey("/splitstore/baseEpoch")
|
|
|
|
|
|
|
|
// warmupEpochKey stores whether a hot store warmup has been performed.
|
|
|
|
// On first start, the splitstore will walk the state tree and will copy
|
|
|
|
// all active blocks into the hotstore.
|
2021-03-01 16:41:51 +00:00
|
|
|
warmupEpochKey = dstore.NewKey("/splitstore/warmupEpoch")
|
2021-03-02 08:04:02 +00:00
|
|
|
|
2021-03-05 08:00:17 +00:00
|
|
|
// markSetSizeKey stores the current estimate for the mark set size.
|
|
|
|
// this is first computed at warmup and updated in every compaction
|
|
|
|
markSetSizeKey = dstore.NewKey("/splitstore/markSetSize")
|
|
|
|
|
2021-07-12 05:39:12 +00:00
|
|
|
// compactionIndexKey stores the compaction index (serial number)
|
|
|
|
compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex")
|
|
|
|
|
2021-03-02 08:04:02 +00:00
|
|
|
log = logging.Logger("splitstore")
|
2021-06-22 07:10:24 +00:00
|
|
|
|
|
|
|
// set this to true if you are debugging the splitstore to enable debug logging
|
|
|
|
enableDebugLog = false
|
2021-06-29 13:15:45 +00:00
|
|
|
// set this to true if you want to track origin stack traces in the write log
|
|
|
|
enableDebugLogWriteTraces = false
|
2021-03-01 16:41:51 +00:00
|
|
|
)
|
2020-11-24 17:41:07 +00:00
|
|
|
|
2021-07-13 21:35:15 +00:00
|
|
|
func init() {
|
|
|
|
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" {
|
|
|
|
enableDebugLog = true
|
|
|
|
}
|
2021-03-02 16:59:00 +00:00
|
|
|
|
2021-07-13 21:35:15 +00:00
|
|
|
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES") == "1" {
|
|
|
|
enableDebugLogWriteTraces = true
|
|
|
|
}
|
|
|
|
}
|
2021-03-01 18:30:15 +00:00
|
|
|
|
2021-02-27 13:20:14 +00:00
|
|
|
type Config struct {
|
2021-03-02 00:47:21 +00:00
|
|
|
// MarkSetType is the type of mark set to use.
|
|
|
|
//
|
2021-07-09 01:26:36 +00:00
|
|
|
// Only current sane value is "map", but we may add an option for a disk-backed
|
2021-07-07 13:39:37 +00:00
|
|
|
// markset for memory-constrained situations.
|
2021-03-02 00:47:21 +00:00
|
|
|
MarkSetType string
|
2021-06-16 11:07:10 +00:00
|
|
|
|
2021-07-04 09:23:30 +00:00
|
|
|
// DiscardColdBlocks indicates whether to skip moving cold blocks to the coldstore.
|
2021-07-02 19:34:00 +00:00
|
|
|
// If the splitstore is running with a noop coldstore then this option is set to true
|
|
|
|
// which skips moving (as it is a noop, but still takes time to read all the cold objects)
|
|
|
|
// and directly purges cold blocks.
|
2021-07-04 06:53:58 +00:00
|
|
|
DiscardColdBlocks bool
|
2021-02-27 13:20:14 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 17:38:02 +00:00
|
|
|
// ChainAccessor allows the Splitstore to access the chain. It will most likely
|
|
|
|
// be a ChainStore at runtime.
|
|
|
|
type ChainAccessor interface {
|
|
|
|
GetTipsetByHeight(context.Context, abi.ChainEpoch, *types.TipSet, bool) (*types.TipSet, error)
|
|
|
|
GetHeaviestTipSet() *types.TipSet
|
|
|
|
SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error)
|
|
|
|
}
|
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension
|
|
|
|
// of the Blockstore interface with the traits we need for compaction.
|
|
|
|
type hotstore interface {
|
|
|
|
bstore.Blockstore
|
|
|
|
bstore.BlockstoreIterator
|
|
|
|
}
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
type SplitStore struct {
|
2021-07-13 21:35:15 +00:00
|
|
|
compacting int32 // compaction/prune/warmup in progress
|
2021-07-10 13:30:27 +00:00
|
|
|
closing int32 // the splitstore is closing
|
2020-11-29 13:10:30 +00:00
|
|
|
|
2021-06-16 11:07:10 +00:00
|
|
|
cfg *Config
|
|
|
|
|
2021-07-07 06:55:25 +00:00
|
|
|
mx sync.Mutex
|
2021-07-07 08:31:27 +00:00
|
|
|
warmupEpoch abi.ChainEpoch // protected by mx
|
|
|
|
baseEpoch abi.ChainEpoch // protected by compaction lock
|
2020-11-29 13:10:30 +00:00
|
|
|
|
2021-07-13 00:14:13 +00:00
|
|
|
headChangeMx sync.Mutex
|
|
|
|
|
2021-03-02 16:59:00 +00:00
|
|
|
coldPurgeSize int
|
|
|
|
|
2021-07-04 06:53:58 +00:00
|
|
|
chain ChainAccessor
|
|
|
|
ds dstore.Datastore
|
|
|
|
cold bstore.Blockstore
|
2021-07-10 13:30:27 +00:00
|
|
|
hot hotstore
|
2020-11-24 17:26:28 +00:00
|
|
|
|
2021-07-03 13:10:37 +00:00
|
|
|
markSetEnv MarkSetEnv
|
2021-03-02 00:47:21 +00:00
|
|
|
markSetSize int64
|
2021-06-17 09:32:32 +00:00
|
|
|
|
2021-07-12 05:39:12 +00:00
|
|
|
compactionIndex int64
|
|
|
|
|
2021-06-17 09:32:32 +00:00
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
2021-06-21 12:17:00 +00:00
|
|
|
|
|
|
|
debug *debugLog
|
2021-06-25 07:07:45 +00:00
|
|
|
|
2021-07-06 12:09:04 +00:00
|
|
|
// transactional protection for concurrent read/writes during compaction
|
2021-07-13 06:06:40 +00:00
|
|
|
txnLk sync.RWMutex
|
|
|
|
txnViewsMx sync.Mutex
|
|
|
|
txnViewsCond sync.Cond
|
|
|
|
txnViews int
|
|
|
|
txnViewsWaiting bool
|
|
|
|
txnActive bool
|
|
|
|
txnProtect MarkSet
|
|
|
|
txnRefsMx sync.Mutex
|
|
|
|
txnRefs map[cid.Cid]struct{}
|
|
|
|
txnMissing map[cid.Cid]struct{}
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
2020-11-25 07:07:48 +00:00
|
|
|
var _ bstore.Blockstore = (*SplitStore)(nil)
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2021-03-01 17:39:00 +00:00
|
|
|
// Open opens an existing splistore, or creates a new splitstore. The splitstore
|
|
|
|
// is backed by the provided hot and cold stores. The returned SplitStore MUST be
|
|
|
|
// attached to the ChainStore with Start in order to trigger compaction.
|
|
|
|
func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Config) (*SplitStore, error) {
|
2021-07-10 13:30:27 +00:00
|
|
|
// hot blockstore must support the hotstore interface
|
|
|
|
hots, ok := hot.(hotstore)
|
|
|
|
if !ok {
|
|
|
|
// be specific about what is missing
|
|
|
|
if _, ok := hot.(bstore.BlockstoreIterator); !ok {
|
|
|
|
return nil, xerrors.Errorf("hot blockstore does not support efficient iteration: %T", hot)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, xerrors.Errorf("hot blockstore does not support the necessary traits: %T", hot)
|
2020-12-01 15:17:34 +00:00
|
|
|
}
|
|
|
|
|
2021-03-02 00:47:21 +00:00
|
|
|
// the markset env
|
2021-07-04 09:23:30 +00:00
|
|
|
markSetEnv, err := OpenMarkSetEnv(path, cfg.MarkSetType)
|
2021-02-27 16:27:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-12-01 15:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// and now we can make a SplitStore
|
|
|
|
ss := &SplitStore{
|
2021-07-03 13:10:37 +00:00
|
|
|
cfg: cfg,
|
|
|
|
ds: ds,
|
|
|
|
cold: cold,
|
2021-07-10 13:30:27 +00:00
|
|
|
hot: hots,
|
2021-07-03 13:10:37 +00:00
|
|
|
markSetEnv: markSetEnv,
|
2021-02-27 13:20:14 +00:00
|
|
|
|
2021-03-02 16:59:00 +00:00
|
|
|
coldPurgeSize: defaultColdPurgeSize,
|
|
|
|
}
|
|
|
|
|
2021-07-13 06:01:50 +00:00
|
|
|
ss.txnViewsCond.L = &ss.txnViewsMx
|
2021-06-17 09:32:32 +00:00
|
|
|
ss.ctx, ss.cancel = context.WithCancel(context.Background())
|
|
|
|
|
2021-06-22 07:10:24 +00:00
|
|
|
if enableDebugLog {
|
|
|
|
ss.debug, err = openDebugLog(path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-01 15:17:34 +00:00
|
|
|
return ss, nil
|
|
|
|
}
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
// Blockstore interface
|
2021-03-01 17:39:00 +00:00
|
|
|
func (s *SplitStore) DeleteBlock(_ cid.Cid) error {
|
2020-11-24 14:51:00 +00:00
|
|
|
// afaict we don't seem to be using this method, so it's not implemented
|
2020-11-24 22:01:10 +00:00
|
|
|
return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
2021-03-02 14:45:45 +00:00
|
|
|
func (s *SplitStore) DeleteMany(_ []cid.Cid) error {
|
|
|
|
// afaict we don't seem to be using this method, so it's not implemented
|
|
|
|
return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint
|
|
|
|
}
|
|
|
|
|
2021-07-07 13:34:02 +00:00
|
|
|
func (s *SplitStore) Has(cid cid.Cid) (bool, error) {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(cid) {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
|
|
|
defer s.txnLk.RUnlock()
|
|
|
|
|
2021-07-07 13:34:02 +00:00
|
|
|
has, err := s.hot.Has(cid)
|
2020-11-24 14:51:00 +00:00
|
|
|
|
2021-07-02 06:36:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return has, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if has {
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRef(cid)
|
2021-07-07 13:34:02 +00:00
|
|
|
return true, nil
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
2021-07-07 13:34:02 +00:00
|
|
|
return s.cold.Has(cid)
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(cid) {
|
|
|
|
data, err := decodeIdentityCid(cid)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return blocks.NewBlockWithCid(data, cid)
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
|
|
|
defer s.txnLk.RUnlock()
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
blk, err := s.hot.Get(cid)
|
|
|
|
|
|
|
|
switch err {
|
|
|
|
case nil:
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRef(cid)
|
2021-07-07 13:34:02 +00:00
|
|
|
return blk, nil
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
case bstore.ErrNotFound:
|
2021-07-10 13:30:27 +00:00
|
|
|
if s.isWarm() {
|
|
|
|
s.debug.LogReadMiss(cid)
|
2021-06-22 07:56:35 +00:00
|
|
|
}
|
2021-06-21 12:17:00 +00:00
|
|
|
|
2021-03-05 09:54:06 +00:00
|
|
|
blk, err = s.cold.Get(cid)
|
2021-03-05 10:04:40 +00:00
|
|
|
if err == nil {
|
2021-07-10 13:30:27 +00:00
|
|
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
2021-06-21 12:17:00 +00:00
|
|
|
|
2021-03-05 09:54:06 +00:00
|
|
|
}
|
|
|
|
return blk, err
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SplitStore) GetSize(cid cid.Cid) (int, error) {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(cid) {
|
|
|
|
data, err := decodeIdentityCid(cid)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return len(data), nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
|
|
|
defer s.txnLk.RUnlock()
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
size, err := s.hot.GetSize(cid)
|
|
|
|
|
|
|
|
switch err {
|
|
|
|
case nil:
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRef(cid)
|
2021-07-07 13:34:02 +00:00
|
|
|
return size, nil
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
case bstore.ErrNotFound:
|
2021-07-10 13:30:27 +00:00
|
|
|
if s.isWarm() {
|
|
|
|
s.debug.LogReadMiss(cid)
|
2021-06-22 07:56:35 +00:00
|
|
|
}
|
2021-06-21 12:17:00 +00:00
|
|
|
|
2021-03-05 09:54:06 +00:00
|
|
|
size, err = s.cold.GetSize(cid)
|
2021-03-05 10:04:40 +00:00
|
|
|
if err == nil {
|
2021-07-10 13:30:27 +00:00
|
|
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
2021-03-05 09:54:06 +00:00
|
|
|
}
|
|
|
|
return size, err
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SplitStore) Put(blk blocks.Block) error {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(blk.Cid()) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
|
|
|
defer s.txnLk.RUnlock()
|
|
|
|
|
2021-07-02 08:37:35 +00:00
|
|
|
err := s.hot.Put(blk)
|
2021-07-07 13:34:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-06 14:13:38 +00:00
|
|
|
|
2021-07-07 13:34:02 +00:00
|
|
|
s.debug.LogWrite(blk)
|
|
|
|
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRef(blk.Cid())
|
2021-07-07 13:34:02 +00:00
|
|
|
return nil
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SplitStore) PutMany(blks []blocks.Block) error {
|
2021-07-09 08:31:04 +00:00
|
|
|
// filter identites
|
|
|
|
idcids := 0
|
|
|
|
for _, blk := range blks {
|
|
|
|
if isIdentiyCid(blk.Cid()) {
|
|
|
|
idcids++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if idcids > 0 {
|
|
|
|
if idcids == len(blks) {
|
|
|
|
// it's all identities
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
filtered := make([]blocks.Block, 0, len(blks)-idcids)
|
|
|
|
for _, blk := range blks {
|
|
|
|
if isIdentiyCid(blk.Cid()) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
filtered = append(filtered, blk)
|
|
|
|
}
|
|
|
|
|
|
|
|
blks = filtered
|
|
|
|
}
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
batch := make([]cid.Cid, 0, len(blks))
|
|
|
|
for _, blk := range blks {
|
|
|
|
batch = append(batch, blk.Cid())
|
|
|
|
}
|
|
|
|
|
2021-06-25 07:07:45 +00:00
|
|
|
s.txnLk.RLock()
|
|
|
|
defer s.txnLk.RUnlock()
|
|
|
|
|
2021-07-02 08:37:35 +00:00
|
|
|
err := s.hot.PutMany(blks)
|
2021-07-07 13:34:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-04 06:53:58 +00:00
|
|
|
|
2021-07-07 13:34:02 +00:00
|
|
|
s.debug.LogWriteMany(blks)
|
|
|
|
|
2021-07-09 12:10:02 +00:00
|
|
|
s.trackTxnRefMany(batch)
|
2021-07-07 13:34:02 +00:00
|
|
|
return nil
|
2020-11-24 14:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
|
|
|
|
chHot, err := s.hot.AllKeysChan(ctx)
|
|
|
|
if err != nil {
|
2020-11-24 22:01:10 +00:00
|
|
|
cancel()
|
2020-11-24 14:51:00 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
chCold, err := s.cold.AllKeysChan(ctx)
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
seen := cid.NewSet()
|
|
|
|
ch := make(chan cid.Cid, 8) // buffer is arbitrary, just enough to avoid context switches
|
2020-11-24 14:51:00 +00:00
|
|
|
go func() {
|
|
|
|
defer cancel()
|
2020-11-25 07:07:06 +00:00
|
|
|
defer close(ch)
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
for _, in := range []<-chan cid.Cid{chHot, chCold} {
|
2021-07-10 13:30:27 +00:00
|
|
|
for c := range in {
|
|
|
|
// ensure we only emit each key once
|
|
|
|
if !seen.Visit(c) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-11-24 14:51:00 +00:00
|
|
|
select {
|
2021-07-10 13:30:27 +00:00
|
|
|
case ch <- c:
|
2020-11-24 14:51:00 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ch, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SplitStore) HashOnRead(enabled bool) {
|
|
|
|
s.hot.HashOnRead(enabled)
|
|
|
|
s.cold.HashOnRead(enabled)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error {
|
2021-07-09 08:31:04 +00:00
|
|
|
if isIdentiyCid(cid) {
|
|
|
|
data, err := decodeIdentityCid(cid)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return cb(data)
|
|
|
|
}
|
|
|
|
|
2021-07-09 12:41:10 +00:00
|
|
|
// views are (optimistically) protected two-fold:
|
|
|
|
// - if there is an active transaction, then the reference is protected.
|
|
|
|
// - if there is no active transaction, active views are tracked in a
|
|
|
|
// wait group and compaction is inhibited from starting until they
|
|
|
|
// have all completed. this is necessary to ensure that a (very) long-running
|
|
|
|
// view can't have its data pointer deleted, which would be catastrophic.
|
|
|
|
// Note that we can't just RLock for the duration of the view, as this could
|
|
|
|
// lead to deadlock with recursive views.
|
2021-07-13 06:01:50 +00:00
|
|
|
s.protectView(cid)
|
|
|
|
defer s.viewDone()
|
2021-06-25 07:07:45 +00:00
|
|
|
|
2021-07-09 12:41:10 +00:00
|
|
|
err := s.hot.View(cid, cb)
|
2021-07-08 18:18:59 +00:00
|
|
|
switch err {
|
2020-11-24 14:51:00 +00:00
|
|
|
case bstore.ErrNotFound:
|
2021-07-10 13:30:27 +00:00
|
|
|
if s.isWarm() {
|
|
|
|
s.debug.LogReadMiss(cid)
|
2021-06-22 07:56:35 +00:00
|
|
|
}
|
2021-06-21 12:17:00 +00:00
|
|
|
|
2021-03-16 20:05:22 +00:00
|
|
|
err = s.cold.View(cid, cb)
|
|
|
|
if err == nil {
|
2021-07-10 13:30:27 +00:00
|
|
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
2021-03-16 20:05:22 +00:00
|
|
|
}
|
|
|
|
return err
|
2020-11-24 14:51:00 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-11-24 17:15:38 +00:00
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
func (s *SplitStore) isWarm() bool {
|
|
|
|
s.mx.Lock()
|
|
|
|
defer s.mx.Unlock()
|
|
|
|
return s.warmupEpoch > 0
|
|
|
|
}
|
|
|
|
|
2020-11-24 17:26:28 +00:00
|
|
|
// State tracking
|
2021-03-01 17:38:02 +00:00
|
|
|
func (s *SplitStore) Start(chain ChainAccessor) error {
|
|
|
|
s.chain = chain
|
2021-07-07 06:55:25 +00:00
|
|
|
curTs := chain.GetHeaviestTipSet()
|
2020-11-24 17:41:07 +00:00
|
|
|
|
2021-07-12 11:45:58 +00:00
|
|
|
// should we warmup
|
|
|
|
warmup := false
|
|
|
|
|
2020-11-24 17:41:07 +00:00
|
|
|
// load base epoch from metadata ds
|
|
|
|
// if none, then use current epoch because it's a fresh start
|
|
|
|
bs, err := s.ds.Get(baseEpochKey)
|
|
|
|
switch err {
|
|
|
|
case nil:
|
2020-11-26 18:37:02 +00:00
|
|
|
s.baseEpoch = bytesToEpoch(bs)
|
2020-11-24 17:41:07 +00:00
|
|
|
|
|
|
|
case dstore.ErrNotFound:
|
2021-07-07 06:55:25 +00:00
|
|
|
if curTs == nil {
|
2020-12-01 15:56:22 +00:00
|
|
|
// this can happen in some tests
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2021-07-07 06:55:25 +00:00
|
|
|
err = s.setBaseEpoch(curTs.Height())
|
2020-11-24 17:41:07 +00:00
|
|
|
if err != nil {
|
2021-03-01 16:41:51 +00:00
|
|
|
return xerrors.Errorf("error saving base epoch: %w", err)
|
2020-11-24 17:41:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
2021-03-01 16:41:51 +00:00
|
|
|
return xerrors.Errorf("error loading base epoch: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// load warmup epoch from metadata ds
|
|
|
|
bs, err = s.ds.Get(warmupEpochKey)
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
s.warmupEpoch = bytesToEpoch(bs)
|
|
|
|
|
|
|
|
case dstore.ErrNotFound:
|
2021-07-12 11:45:58 +00:00
|
|
|
warmup = true
|
2021-06-16 10:58:16 +00:00
|
|
|
|
2021-03-01 16:41:51 +00:00
|
|
|
default:
|
|
|
|
return xerrors.Errorf("error loading warmup epoch: %w", err)
|
2020-11-24 17:41:07 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 12:09:04 +00:00
|
|
|
// load markSetSize from metadata ds to provide a size hint for marksets
|
2021-03-05 08:00:17 +00:00
|
|
|
bs, err = s.ds.Get(markSetSizeKey)
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
s.markSetSize = bytesToInt64(bs)
|
|
|
|
|
|
|
|
case dstore.ErrNotFound:
|
|
|
|
default:
|
|
|
|
return xerrors.Errorf("error loading mark set size: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-07-12 05:39:12 +00:00
|
|
|
// load compactionIndex from metadata ds to provide a hint as to when to perform moving gc
|
|
|
|
bs, err = s.ds.Get(compactionIndexKey)
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
s.compactionIndex = bytesToInt64(bs)
|
|
|
|
|
|
|
|
case dstore.ErrNotFound:
|
2021-07-12 11:45:58 +00:00
|
|
|
// this is potentially an upgrade from splitstore v0; schedule a warmup as v0 has
|
|
|
|
// some issues with hot references leaking into the coldstore.
|
|
|
|
warmup = true
|
2021-07-12 05:39:12 +00:00
|
|
|
default:
|
|
|
|
return xerrors.Errorf("error loading compaction index: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-07-04 06:53:58 +00:00
|
|
|
log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch)
|
2021-06-17 09:32:32 +00:00
|
|
|
|
2021-07-12 11:45:58 +00:00
|
|
|
if warmup {
|
|
|
|
err = s.warmup(curTs)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("error starting warmup: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-24 17:41:07 +00:00
|
|
|
// watch the chain
|
2021-03-01 17:38:02 +00:00
|
|
|
chain.SubscribeHeadChanges(s.HeadChange)
|
2020-11-24 17:41:07 +00:00
|
|
|
|
|
|
|
return nil
|
2020-11-24 17:26:28 +00:00
|
|
|
}
|
|
|
|
|
2020-11-26 15:49:47 +00:00
|
|
|
func (s *SplitStore) Close() error {
|
2021-07-10 13:30:27 +00:00
|
|
|
if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) {
|
|
|
|
// already closing
|
|
|
|
return nil
|
|
|
|
}
|
2021-03-05 08:11:54 +00:00
|
|
|
|
2021-07-10 13:30:27 +00:00
|
|
|
if atomic.LoadInt32(&s.compacting) == 1 {
|
|
|
|
log.Warn("close with ongoing compaction in progress; waiting for it to finish...")
|
|
|
|
for atomic.LoadInt32(&s.compacting) == 1 {
|
2020-11-26 15:49:47 +00:00
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-17 09:32:32 +00:00
|
|
|
s.cancel()
|
2021-07-04 06:53:58 +00:00
|
|
|
return multierr.Combine(s.markSetEnv.Close(), s.debug.Close())
|
2020-11-26 15:49:47 +00:00
|
|
|
}
|
|
|
|
|
2021-07-13 21:35:15 +00:00
|
|
|
func (s *SplitStore) checkClosing() error {
|
2021-07-10 13:30:27 +00:00
|
|
|
if atomic.LoadInt32(&s.closing) == 1 {
|
2021-07-13 21:35:15 +00:00
|
|
|
return xerrors.Errorf("splitstore is closing")
|
2021-07-12 11:45:58 +00:00
|
|
|
}
|
|
|
|
|
2021-03-13 10:00:28 +00:00
|
|
|
return nil
|
2021-03-01 16:41:51 +00:00
|
|
|
}
|
|
|
|
|
2020-11-24 17:41:07 +00:00
|
|
|
func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error {
|
|
|
|
s.baseEpoch = epoch
|
2020-11-26 18:37:02 +00:00
|
|
|
return s.ds.Put(baseEpochKey, epochToBytes(epoch))
|
|
|
|
}
|