From dd998d6b2452e1d63eb5e64e52d2093c0aa45fe5 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Fri, 3 Mar 2023 08:53:23 -0500 Subject: [PATCH 01/19] Begin account for size during walks --- blockstore/splitstore/splitstore_compact.go | 113 ++++++++++++-------- blockstore/splitstore/splitstore_reify.go | 2 +- 2 files changed, 71 insertions(+), 44 deletions(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 59bdd515d..79afe126c 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -201,7 +201,7 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { count := new(int32) visitor := newConcurrentVisitor() - walkObject := func(c cid.Cid) error { + walkObject := func(c cid.Cid) (int, error) { return s.walkObjectIncomplete(c, visitor, func(c cid.Cid) error { if isUnitaryObject(c) { @@ -228,7 +228,7 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { // optimize the common case of single put if len(cids) == 1 { - if err := walkObject(cids[0]); err != nil { + if _, err := walkObject(cids[0]); err != nil { log.Errorf("error marking tipset refs: %s", err) } log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) @@ -243,7 +243,7 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { worker := func() error { for c := range workch { - if err := walkObject(c); err != nil { + if _, err := walkObject(c); err != nil { return err } } @@ -361,6 +361,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { log.Infow("protecting transactional references", "refs", len(txnRefs)) count := 0 + sz := new(int64) workch := make(chan cid.Cid, len(txnRefs)) startProtect := time.Now() @@ -393,10 +394,11 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { worker := func() error { for c := range workch { - err := s.doTxnProtect(c, markSet) + szTxn, err := s.doTxnProtect(c, markSet) if err != nil { return xerrors.Errorf("error protecting transactional references to %s: %w", c, err) } + atomic.AddInt64(sz, int64(szTxn)) } return nil } @@ -410,15 +412,15 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { return err } - log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count) + log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count, "protected size", sz) } } // transactionally protect a reference by walking the object and marking. // concurrent markings are short circuited by checking the markset. -func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error { +func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int, error) { if err := s.checkClosing(); err != nil { - return err + return 0, err } // Note: cold objects are deleted heaviest first, so the consituents of an object @@ -907,6 +909,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp copy(toWalk, ts.Cids()) walkCnt := new(int64) scanCnt := new(int64) + szWalk := new(int64) tsRef := func(blkCids []cid.Cid) (cid.Cid, error) { return types.NewTipSetKey(blkCids...).Cid() @@ -942,8 +945,10 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error computing cid reference to parent tipset") } - if err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk); err != nil { + if sz, err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk); err != nil { return xerrors.Errorf("error walking parent tipset cid reference") + } else { + atomic.AddInt64(szWalk, int64(sz)) } // message are retained if within the inclMsgs boundary @@ -951,38 +956,52 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if inclMsgs < inclState { // we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we // synced from snapshot and have a long HotStoreMessageRetentionPolicy. - if err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk); err != nil { + if sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk); err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) + } else { + atomic.AddInt64(szWalk, int64(sz)) } - if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil { + if sz, err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil { return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) + } else { + atomic.AddInt64(szWalk, int64(sz)) } } else { - if err := s.walkObject(hdr.Messages, visitor, fHot); err != nil { + if sz, err := s.walkObject(hdr.Messages, visitor, fHot); err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) + } else { + atomic.AddInt64(szWalk, int64(sz)) } - if err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot); err != nil { + if sz, err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot); err != nil { return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) + } else { + atomic.AddInt64(szWalk, int64(sz)) } } } // messages and receipts outside of inclMsgs are included in the cold store if hdr.Height < inclMsgs && hdr.Height > 0 { - if err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk); err != nil { + if sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk); err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) + } else { + atomic.AddInt64(szWalk, int64(sz)) } - if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk); err != nil { + if sz, err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk); err != nil { return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) + } else { + atomic.AddInt64(szWalk, int64(sz)) } } // state is only retained if within the inclState boundary, with the exception of genesis if hdr.Height >= inclState || hdr.Height == 0 { - if err := s.walkObject(hdr.ParentStateRoot, visitor, fHot); err != nil { + if sz, err := s.walkObject(hdr.ParentStateRoot, visitor, fHot); err != nil { return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) + } else { + atomic.AddInt64(szWalk, int64(sz)) } atomic.AddInt64(scanCnt, 1) } @@ -1001,8 +1020,10 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error computing cid reference to parent tipset") } - if err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk); err != nil { + if sz, err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk); err != nil { return xerrors.Errorf("error walking parent tipset cid reference") + } else { + atomic.AddInt64(szWalk, int64(sz)) } for len(toWalk) > 0 { @@ -1047,123 +1068,129 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp } } - log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt) + log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt, "walk size", szWalk) return nil } -func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) error { +func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) (int, error) { + var sz int visit, err := visitor.Visit(c) if err != nil { - return xerrors.Errorf("error visiting object: %w", err) + return 0, xerrors.Errorf("error visiting object: %w", err) } if !visit { - return nil + return sz, nil } if err := f(c); err != nil { if err == errStopWalk { - return nil + return sz, nil } - return err + return 0, err } if c.Prefix().Codec != cid.DagCBOR { - return nil + return sz, nil } // check this before recursing if err := s.checkClosing(); err != nil { - return err + return 0, err } var links []cid.Cid err = s.view(c, func(data []byte) error { + sz += len(data) return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { links = append(links, c) }) }) if err != nil { - return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) } for _, c := range links { - err := s.walkObject(c, visitor, f) + szLink, err := s.walkObject(c, visitor, f) if err != nil { - return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err) } + sz += szLink } - return nil + return sz, nil } // like walkObject, but the object may be potentially incomplete (references missing) -func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) error { +func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) (int, error) { + sz := 0 visit, err := visitor.Visit(c) if err != nil { - return xerrors.Errorf("error visiting object: %w", err) + return 0, xerrors.Errorf("error visiting object: %w", err) } if !visit { - return nil + return sz, nil } // occurs check -- only for DAGs if c.Prefix().Codec == cid.DagCBOR { has, err := s.has(c) if err != nil { - return xerrors.Errorf("error occur checking %s: %w", c, err) + return 0, xerrors.Errorf("error occur checking %s: %w", c, err) } if !has { err = missing(c) if err == errStopWalk { - return nil + return sz, nil } - return err + return 0, err } } if err := f(c); err != nil { if err == errStopWalk { - return nil + return sz, nil } - return err + return 0, err } if c.Prefix().Codec != cid.DagCBOR { - return nil + return sz, nil } // check this before recursing if err := s.checkClosing(); err != nil { - return err + return sz, err } var links []cid.Cid err = s.view(c, func(data []byte) error { + sz += len(data) return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { links = append(links, c) }) }) if err != nil { - return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) } for _, c := range links { - err := s.walkObjectIncomplete(c, visitor, f, missing) + szLink, err := s.walkObjectIncomplete(c, visitor, f, missing) if err != nil { - return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err) } + sz += szLink } - return nil + return sz, nil } // internal version used during compaction and related operations @@ -1528,7 +1555,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSet) { missing = make(map[cid.Cid]struct{}) for c := range towalk { - err := s.walkObjectIncomplete(c, visitor, + _, err := s.walkObjectIncomplete(c, visitor, func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk diff --git a/blockstore/splitstore/splitstore_reify.go b/blockstore/splitstore/splitstore_reify.go index aa14f090a..07efedead 100644 --- a/blockstore/splitstore/splitstore_reify.go +++ b/blockstore/splitstore/splitstore_reify.go @@ -101,7 +101,7 @@ func (s *SplitStore) doReify(c cid.Cid) { defer s.txnLk.RUnlock() count := 0 - err := s.walkObjectIncomplete(c, newTmpVisitor(), + _, err := s.walkObjectIncomplete(c, newTmpVisitor(), func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk From 5d9739a863b41bfd1d1563082509f9700e3bfeda Mon Sep 17 00:00:00 2001 From: zenground0 Date: Sat, 4 Mar 2023 09:38:18 -0700 Subject: [PATCH 02/19] Track size of dags relevant to compaction --- blockstore/splitstore/splitstore.go | 11 +++++++ blockstore/splitstore/splitstore_check.go | 4 +-- blockstore/splitstore/splitstore_compact.go | 33 ++++++++++++++++----- 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 0afb15c11..43d6d9349 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -195,6 +195,17 @@ type SplitStore struct { // registered protectors protectors []func(func(cid.Cid) error) error + + // dag sizes measured during latest compaction + // logged and used for GC strategy + + // protected by compaction lock + szWalk int64 + szProtectedTxns int64 + szToPurge int64 // expected purges before critical section protections and live marking + + // protected by txnLk + szMarkedLiveRefs int64 } var _ bstore.Blockstore = (*SplitStore)(nil) diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go index 336515980..f57703866 100644 --- a/blockstore/splitstore/splitstore_check.go +++ b/blockstore/splitstore/splitstore_check.go @@ -95,7 +95,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { } defer visitor.Close() //nolint - err = s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor, + size, err := s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor, func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk @@ -133,7 +133,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return err } - log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt) + log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt, "walk size", size) write("--") write("cold: %d missing: %d", *coldCnt, *missingCnt) write("DONE") diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 79afe126c..bddb9e1df 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -199,6 +199,8 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { log.Debugf("marking %d live refs", len(cids)) startMark := time.Now() + szMarked := new(int64) + count := new(int32) visitor := newConcurrentVisitor() walkObject := func(c cid.Cid) (int, error) { @@ -228,10 +230,12 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { // optimize the common case of single put if len(cids) == 1 { - if _, err := walkObject(cids[0]); err != nil { + sz, err := walkObject(cids[0]) + if err != nil { log.Errorf("error marking tipset refs: %s", err) } log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) + atomic.AddInt64(szMarked, int64(sz)) return } @@ -243,9 +247,11 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { worker := func() error { for c := range workch { - if _, err := walkObject(c); err != nil { + sz, err := walkObject(c) + if err != nil { return err } + atomic.AddInt64(szMarked, int64(sz)) } return nil @@ -268,7 +274,8 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { log.Errorf("error marking tipset refs: %s", err) } - log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) + log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count, "size marked", *szMarked) + s.szMarkedLiveRefs += *szMarked } // transactionally protect a view @@ -600,7 +607,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { } err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, fHot, fCold) - if err != nil { return xerrors.Errorf("error marking: %w", err) } @@ -640,7 +646,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { defer purgew.Close() //nolint:errcheck // some stats for logging - var hotCnt, coldCnt, purgeCnt int + var hotCnt, coldCnt, purgeCnt, szPurge int err = s.hot.ForEachKey(func(c cid.Cid) error { // was it marked? mark, err := markSet.Has(c) @@ -652,6 +658,16 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { hotCnt++ return nil } + if sz, err := s.hot.GetSize(s.ctx, c); err != nil { + if ipld.IsNotFound(err) { + log.Warnf("hotstore missing expected block %s", c) + return nil + } + + return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err) + } else { + szPurge += sz + } // it needs to be removed from hot store, mark it as candidate for purge if err := purgew.Write(c); err != nil { @@ -691,7 +707,8 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Infow("cold collection done", "took", time.Since(startCollect)) - log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt) + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt, "purge size", szPurge) + s.szToPurge = int64(szPurge) stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) @@ -775,8 +792,8 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error purging cold objects: %w", err) } log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) - s.endCriticalSection() + log.Infow("total protected size", s.szProtectedTxns, "total marked live size", s.szMarkedLiveRefs) if err := checkpoint.Close(); err != nil { log.Warnf("error closing checkpoint: %s", err) @@ -1069,7 +1086,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp } log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt, "walk size", szWalk) - + s.szWalk = *szWalk return nil } From 0fe91846cd924c3f90907acd3ddcbe14856bee7a Mon Sep 17 00:00:00 2001 From: zenground0 Date: Sat, 4 Mar 2023 09:46:38 -0700 Subject: [PATCH 03/19] Plan out moving GC limiting --- blockstore/splitstore/splitstore_gc.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index 3b53b8042..c70440cd4 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -8,6 +8,13 @@ import ( ) func (s *SplitStore) gcHotAfterCompaction() { + // TODO size aware GC + // 1. Add a config value to specify targetted max number of bytes M + // 2. Use measurement of marked hotstore size H (we now have this), actual hostore size T (need to compute this), total move size H + T, approximate purged size P + // 3. Trigger moving GC whenever H + T is within 50 GB of M + // 4. if H + T > M use aggressive online threshold + // 5. Use threshold that covers 3 std devs of vlogs when doing aggresive online. Mean == (H + P) / T, assume normal distribution + // 6. Use threshold that covers 1 or 2 std devs of vlogs when doing regular online GC var opts []bstore.BlockstoreGCOption if s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 { opts = append(opts, bstore.WithFullGC(true)) From a994153e272f061b423ea420e3ca21967b14867f Mon Sep 17 00:00:00 2001 From: zenground0 Date: Tue, 7 Mar 2023 07:38:27 -0700 Subject: [PATCH 04/19] GC respects target for max hotstore space --- blockstore/splitstore/splitstore.go | 10 ++++ blockstore/splitstore/splitstore_check.go | 2 +- blockstore/splitstore/splitstore_compact.go | 18 +++++-- blockstore/splitstore/splitstore_gc.go | 55 ++++++++++++++++++--- documentation/en/default-lotus-config.toml | 11 +++++ node/config/doc_gen.go | 11 +++++ node/config/types.go | 7 +++ node/modules/blockstore.go | 1 + 8 files changed, 103 insertions(+), 12 deletions(-) diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 43d6d9349..cefb86ebd 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -115,6 +115,14 @@ type Config struct { // A positive value is the number of compactions before a full GC is performed; // a value of 1 will perform full GC in every compaction. HotStoreFullGCFrequency uint64 + + // HotstoreMaxSpaceTarget suggests the max allowed space the hotstore can take. + // This is not a hard limit, it is possible for the hotstore to exceed the target + // for example if state grows massively between compactions. The splitstore + // will make a best effort to avoid overflowing the target and in practice should + // never overflow. This field is used when doing GC at the end of a compaction to + // adaptively choose moving GC + HotstoreMaxSpaceTarget uint64 } // ChainAccessor allows the Splitstore to access the chain. It will most likely @@ -165,6 +173,7 @@ type SplitStore struct { compactionIndex int64 pruneIndex int64 + onlineGCCnt int64 ctx context.Context cancel func() @@ -203,6 +212,7 @@ type SplitStore struct { szWalk int64 szProtectedTxns int64 szToPurge int64 // expected purges before critical section protections and live marking + szKeys int64 // approximate, not counting keys protected when entering critical section // protected by txnLk szMarkedLiveRefs int64 diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go index f57703866..2645c78c5 100644 --- a/blockstore/splitstore/splitstore_check.go +++ b/blockstore/splitstore/splitstore_check.go @@ -95,7 +95,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { } defer visitor.Close() //nolint - size, err := s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor, + size := s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor, func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index bddb9e1df..60003bbd2 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -66,7 +66,8 @@ var ( ) const ( - batchSize = 16384 + batchSize = 16384 + cidKeySize = 32 ) func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { @@ -518,6 +519,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { // might be potentially inconsistent; abort compaction and notify the user to intervene. return xerrors.Errorf("checkpoint exists; aborting compaction") } + s.clearSizeMeasurements() currentEpoch := curTs.Height() boundaryEpoch := currentEpoch - CompactionBoundary @@ -709,6 +711,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt, "purge size", szPurge) s.szToPurge = int64(szPurge) + s.szKeys = int64(hotCnt) * cidKeySize stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) @@ -1473,8 +1476,9 @@ func (s *SplitStore) completeCompaction() error { } s.compactType = none - // Note: at this point we can start the splitstore; a compaction should run on - // the first head change, which will trigger gc on the hotstore. + // Note: at this point we can start the splitstore; base epoch is not + // incremented here so a compaction should run on the first head + // change, which will trigger gc on the hotstore. // We don't mind the second (back-to-back) compaction as the head will // have advanced during marking and coldset accumulation. return nil @@ -1532,6 +1536,14 @@ func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint, return nil } +func (s *SplitStore) clearSizeMeasurements() { + s.szKeys = 0 + s.szMarkedLiveRefs = 0 + s.szProtectedTxns = 0 + s.szToPurge = 0 + s.szWalk = 0 +} + // I really don't like having this code, but we seem to have some occasional DAG references with // missing constituents. During testing in mainnet *some* of these references *sometimes* appeared // after a little bit. diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index c70440cd4..70ad9fb75 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -7,17 +7,56 @@ import ( bstore "github.com/filecoin-project/lotus/blockstore" ) +const ( + // When < 150 GB of space would remain during moving GC, trigger moving GC + targetThreshold = 150_000_000_000 + // Don't attempt moving GC with 50 GB or less would remain during moving GC + targetBuffer = 50_000_000_000 + // Fraction of garbage in badger vlog for online GC traversal to collect garbage + aggressiveOnlineGCThreshold = 0.0001 +) + func (s *SplitStore) gcHotAfterCompaction() { - // TODO size aware GC - // 1. Add a config value to specify targetted max number of bytes M - // 2. Use measurement of marked hotstore size H (we now have this), actual hostore size T (need to compute this), total move size H + T, approximate purged size P - // 3. Trigger moving GC whenever H + T is within 50 GB of M - // 4. if H + T > M use aggressive online threshold - // 5. Use threshold that covers 3 std devs of vlogs when doing aggresive online. Mean == (H + P) / T, assume normal distribution - // 6. Use threshold that covers 1 or 2 std devs of vlogs when doing regular online GC + // Measure hotstore size, determine if we should do full GC, determine if we can do full GC. + // We should do full GC if + // FullGCFrequency is specified and compaction index matches frequency + // OR HotstoreMaxSpaceTarget is specified and total moving space is within 150 GB of target + // We can do full if + // HotstoreMaxSpaceTarget is not specified + // OR total moving space would not exceed 50 GB below target + // + // a) If we should not do full GC => online GC + // b) If we should do full GC and can => moving GC + // c) If we should do full GC and can't => aggressive online GC + var hotSize int64 + var err error + sizer, ok := s.hot.(bstore.BlockstoreSize) + if ok { + hotSize, err = sizer.Size() + if err != nil { + log.Warnf("error getting hotstore size: %s, estimating empty hot store for targeting", err) + hotSize = 0 + } + } else { + hotSize = 0 + } + + copySizeApprox := s.szKeys + s.szMarkedLiveRefs + s.szProtectedTxns + s.szWalk + shouldTarget := s.cfg.HotstoreMaxSpaceTarget > 0 && hotSize+copySizeApprox > int64(s.cfg.HotstoreMaxSpaceTarget)-targetThreshold + shouldFreq := s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 + shouldDoFull := shouldTarget || shouldFreq + canDoFull := s.cfg.HotstoreMaxSpaceTarget == 0 || hotSize+copySizeApprox < int64(s.cfg.HotstoreMaxSpaceTarget)-targetBuffer + log.Infof("measured hot store size: %d, approximate new size: %d, should do full %t, can do full %t", hotSize, copySizeApprox, shouldDoFull, canDoFull) + var opts []bstore.BlockstoreGCOption - if s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 { + if shouldDoFull && canDoFull { opts = append(opts, bstore.WithFullGC(true)) + } else if shouldDoFull && !canDoFull { + log.Warnf("Attention! Estimated moving GC size %d is not within safety buffer %d of target max %d, performing aggressive online GC to attempt to bring hotstore size down safely", copySizeApprox, targetBuffer, s.cfg.HotstoreMaxSpaceTarget) + log.Warn("If problem continues you can 1) temporarily allocate more disk space to hotstore and 2) reflect in HotstoreMaxSpaceTarget OR trigger manual move with `lotus chain prune hot-moving`") + log.Warn("If problem continues and you do not have any more disk space you can run continue to manually trigger online GC at agressive thresholds (< 0.01) with `lotus chain prune hot`") + + opts = append(opts, bstore.WithThreshold(aggressiveOnlineGCThreshold)) } if err := s.gcBlockstore(s.hot, opts); err != nil { diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 41d7e6aca..a447350ff 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -230,6 +230,17 @@ # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY #HotStoreFullGCFrequency = 20 + # HotStoreMaxSpaceTarget sets a target max disk size for the hotstore. Splitstore GC + # will run moving GC if disk utilization gets within a threshold (150 GB) of the target. + # Splitstore GC will NOT run moving GC if the total size of the move would get + # within 50 GB of the target, and instead will run a more aggressive online GC. + # If both HotStoreFullGCFrequency and HotStoreMaxSpaceTarget are set then splitstore + # GC will trigger moving GC if either configuration condition is met. + # + # type: uint64 + # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACETARGET + #HotStoreMaxSpaceTarget = 0 + [Cluster] # EXPERIMENTAL. config to enabled node cluster with raft consensus diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 8b79bed4f..53c305be6 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -1286,6 +1286,17 @@ the compaction boundary; default is 0.`, A value of 0 disables, while a value 1 will do full GC in every compaction. Default is 20 (about once a week).`, }, + { + Name: "HotStoreMaxSpaceTarget", + Type: "uint64", + + Comment: `HotStoreMaxSpaceTarget sets a target max disk size for the hotstore. Splitstore GC +will run moving GC if disk utilization gets within a threshold (150 GB) of the target. +Splitstore GC will NOT run moving GC if the total size of the move would get +within 50 GB of the target, and instead will run a more aggressive online GC. +If both HotStoreFullGCFrequency and HotStoreMaxSpaceTarget are set then splitstore +GC will trigger moving GC if either configuration condition is met.`, + }, }, "StorageMiner": []DocField{ { diff --git a/node/config/types.go b/node/config/types.go index 690e8caee..123714794 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -601,6 +601,13 @@ type Splitstore struct { // A value of 0 disables, while a value 1 will do full GC in every compaction. // Default is 20 (about once a week). HotStoreFullGCFrequency uint64 + // HotStoreMaxSpaceTarget sets a target max disk size for the hotstore. Splitstore GC + // will run moving GC if disk utilization gets within a threshold (150 GB) of the target. + // Splitstore GC will NOT run moving GC if the total size of the move would get + // within 50 GB of the target, and instead will run a more aggressive online GC. + // If both HotStoreFullGCFrequency and HotStoreMaxSpaceTarget are set then splitstore + // GC will trigger moving GC if either configuration condition is met. + HotStoreMaxSpaceTarget uint64 } // // Full Node diff --git a/node/modules/blockstore.go b/node/modules/blockstore.go index 90b7b6183..f4211bbe2 100644 --- a/node/modules/blockstore.go +++ b/node/modules/blockstore.go @@ -87,6 +87,7 @@ func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.Locked UniversalColdBlocks: cfg.Splitstore.ColdStoreType == "universal", HotStoreMessageRetention: cfg.Splitstore.HotStoreMessageRetention, HotStoreFullGCFrequency: cfg.Splitstore.HotStoreFullGCFrequency, + HotstoreMaxSpaceTarget: cfg.Splitstore.HotStoreMaxSpaceTarget, } ss, err := splitstore.Open(path, ds, hot, cold, cfg) if err != nil { From bf29d4199366b36e2e7f7940127a6d2d6e7606db Mon Sep 17 00:00:00 2001 From: zenground0 Date: Tue, 7 Mar 2023 08:21:15 -0700 Subject: [PATCH 05/19] lint --- blockstore/splitstore/splitstore_compact.go | 60 ++++++++++----------- blockstore/splitstore/splitstore_gc.go | 2 +- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 60003bbd2..d8a7c2c1d 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -660,16 +660,16 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { hotCnt++ return nil } - if sz, err := s.hot.GetSize(s.ctx, c); err != nil { + sz, err := s.hot.GetSize(s.ctx, c) + if err != nil { if ipld.IsNotFound(err) { log.Warnf("hotstore missing expected block %s", c) return nil } return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err) - } else { - szPurge += sz } + szPurge += sz // it needs to be removed from hot store, mark it as candidate for purge if err := purgew.Write(c); err != nil { @@ -965,64 +965,64 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error computing cid reference to parent tipset") } - if sz, err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk); err != nil { + sz, err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk) + if err != nil { return xerrors.Errorf("error walking parent tipset cid reference") - } else { - atomic.AddInt64(szWalk, int64(sz)) } + atomic.AddInt64(szWalk, int64(sz)) // message are retained if within the inclMsgs boundary if hdr.Height >= inclMsgs && hdr.Height > 0 { if inclMsgs < inclState { // we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we // synced from snapshot and have a long HotStoreMessageRetentionPolicy. - if sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk); err != nil { + sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk) + if err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) - } else { - atomic.AddInt64(szWalk, int64(sz)) } + atomic.AddInt64(szWalk, int64(sz)) - if sz, err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil { + sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk) + if err != nil { return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) - } else { - atomic.AddInt64(szWalk, int64(sz)) } + atomic.AddInt64(szWalk, int64(sz)) } else { - if sz, err := s.walkObject(hdr.Messages, visitor, fHot); err != nil { + sz, err = s.walkObject(hdr.Messages, visitor, fHot) + if err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) - } else { - atomic.AddInt64(szWalk, int64(sz)) } + atomic.AddInt64(szWalk, int64(sz)) - if sz, err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot); err != nil { + sz, err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot) + if err != nil { return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) - } else { - atomic.AddInt64(szWalk, int64(sz)) } + atomic.AddInt64(szWalk, int64(sz)) } } // messages and receipts outside of inclMsgs are included in the cold store if hdr.Height < inclMsgs && hdr.Height > 0 { - if sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk); err != nil { + sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk) + if err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) - } else { - atomic.AddInt64(szWalk, int64(sz)) } - if sz, err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk); err != nil { + atomic.AddInt64(szWalk, int64(sz)) + sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk) + if err != nil { return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) - } else { - atomic.AddInt64(szWalk, int64(sz)) } + atomic.AddInt64(szWalk, int64(sz)) } // state is only retained if within the inclState boundary, with the exception of genesis if hdr.Height >= inclState || hdr.Height == 0 { - if sz, err := s.walkObject(hdr.ParentStateRoot, visitor, fHot); err != nil { + sz, err := s.walkObject(hdr.ParentStateRoot, visitor, fHot) + if err != nil { return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) - } else { - atomic.AddInt64(szWalk, int64(sz)) } + atomic.AddInt64(szWalk, int64(sz)) atomic.AddInt64(scanCnt, 1) } @@ -1040,11 +1040,11 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error computing cid reference to parent tipset") } - if sz, err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk); err != nil { + sz, err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk) + if err != nil { return xerrors.Errorf("error walking parent tipset cid reference") - } else { - atomic.AddInt64(szWalk, int64(sz)) } + atomic.AddInt64(szWalk, int64(sz)) for len(toWalk) > 0 { // walking can take a while, so check this with every opportunity diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index 70ad9fb75..cdd866b16 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -54,7 +54,7 @@ func (s *SplitStore) gcHotAfterCompaction() { } else if shouldDoFull && !canDoFull { log.Warnf("Attention! Estimated moving GC size %d is not within safety buffer %d of target max %d, performing aggressive online GC to attempt to bring hotstore size down safely", copySizeApprox, targetBuffer, s.cfg.HotstoreMaxSpaceTarget) log.Warn("If problem continues you can 1) temporarily allocate more disk space to hotstore and 2) reflect in HotstoreMaxSpaceTarget OR trigger manual move with `lotus chain prune hot-moving`") - log.Warn("If problem continues and you do not have any more disk space you can run continue to manually trigger online GC at agressive thresholds (< 0.01) with `lotus chain prune hot`") + log.Warn("If problem continues and you do not have any more disk space you can run continue to manually trigger online GC at aggressive thresholds (< 0.01) with `lotus chain prune hot`") opts = append(opts, bstore.WithThreshold(aggressiveOnlineGCThreshold)) } From e17ec37d10ebdb84f83954cd913b59cd64aad13d Mon Sep 17 00:00:00 2001 From: zenground0 Date: Wed, 8 Mar 2023 09:13:00 -0700 Subject: [PATCH 06/19] fix logging --- blockstore/splitstore/splitstore_compact.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index d8a7c2c1d..841211524 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -796,7 +796,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { } log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) s.endCriticalSection() - log.Infow("total protected size", s.szProtectedTxns, "total marked live size", s.szMarkedLiveRefs) + log.Infow("critical section done", "total protected size", s.szProtectedTxns, "total marked live size", s.szMarkedLiveRefs) if err := checkpoint.Close(); err != nil { log.Warnf("error closing checkpoint: %s", err) From efbc0ff12e4cdf508d8cab01afc5740c00da08e5 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Wed, 8 Mar 2023 09:43:37 -0700 Subject: [PATCH 07/19] Fix up approximation and logging --- blockstore/splitstore/splitstore_compact.go | 2 +- blockstore/splitstore/splitstore_gc.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 841211524..bd73aa570 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -419,7 +419,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { if err := g.Wait(); err != nil { return err } - + s.szProtectedTxns += *sz log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count, "protected size", sz) } } diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index cdd866b16..2e23b993a 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -46,6 +46,7 @@ func (s *SplitStore) gcHotAfterCompaction() { shouldFreq := s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 shouldDoFull := shouldTarget || shouldFreq canDoFull := s.cfg.HotstoreMaxSpaceTarget == 0 || hotSize+copySizeApprox < int64(s.cfg.HotstoreMaxSpaceTarget)-targetBuffer + log.Debugw("approximating new hot store size", "key size", s.szKeys, "marked live refs", s.szMarkedLiveRefs, "protected txns", s.szProtectedTxns, "walked DAG", s.szWalk) log.Infof("measured hot store size: %d, approximate new size: %d, should do full %t, can do full %t", hotSize, copySizeApprox, shouldDoFull, canDoFull) var opts []bstore.BlockstoreGCOption From e5553554d17caf050daba140b14f0f73f08da281 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Wed, 8 Mar 2023 17:30:42 -0700 Subject: [PATCH 08/19] cid key size --- blockstore/splitstore/splitstore_compact.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index bd73aa570..7ac00cadf 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -67,7 +67,7 @@ var ( const ( batchSize = 16384 - cidKeySize = 32 + cidKeySize = 128 ) func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { From ee9ff563d20a30ea071f72e07dee888202e1b636 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Wed, 8 Mar 2023 17:51:59 -0700 Subject: [PATCH 09/19] computing szPurge leads to deadlock and unneeded, remove --- blockstore/splitstore/splitstore.go | 1 - blockstore/splitstore/splitstore_compact.go | 18 +++--------------- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index cefb86ebd..ad19f5dff 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -211,7 +211,6 @@ type SplitStore struct { // protected by compaction lock szWalk int64 szProtectedTxns int64 - szToPurge int64 // expected purges before critical section protections and live marking szKeys int64 // approximate, not counting keys protected when entering critical section // protected by txnLk diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 7ac00cadf..db1b35176 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -648,7 +648,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { defer purgew.Close() //nolint:errcheck // some stats for logging - var hotCnt, coldCnt, purgeCnt, szPurge int + var hotCnt, coldCnt, purgeCnt int64 err = s.hot.ForEachKey(func(c cid.Cid) error { // was it marked? mark, err := markSet.Has(c) @@ -660,16 +660,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { hotCnt++ return nil } - sz, err := s.hot.GetSize(s.ctx, c) - if err != nil { - if ipld.IsNotFound(err) { - log.Warnf("hotstore missing expected block %s", c) - return nil - } - - return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err) - } - szPurge += sz // it needs to be removed from hot store, mark it as candidate for purge if err := purgew.Write(c); err != nil { @@ -709,9 +699,8 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Infow("cold collection done", "took", time.Since(startCollect)) - log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt, "purge size", szPurge) - s.szToPurge = int64(szPurge) - s.szKeys = int64(hotCnt) * cidKeySize + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt) + s.szKeys = hotCnt * cidKeySize stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) @@ -1540,7 +1529,6 @@ func (s *SplitStore) clearSizeMeasurements() { s.szKeys = 0 s.szMarkedLiveRefs = 0 s.szProtectedTxns = 0 - s.szToPurge = 0 s.szWalk = 0 } From efbb63582e9ab1e5da679726045b9a5b661c4bb8 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Wed, 8 Mar 2023 18:11:39 -0700 Subject: [PATCH 10/19] Review Response --- blockstore/splitstore/splitstore_compact.go | 16 +++++++------- blockstore/splitstore/splitstore_gc.go | 24 ++++++++++++--------- node/config/doc_gen.go | 5 ++++- node/config/types.go | 3 +++ 4 files changed, 29 insertions(+), 19 deletions(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index db1b35176..68845a598 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -204,7 +204,7 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { count := new(int32) visitor := newConcurrentVisitor() - walkObject := func(c cid.Cid) (int, error) { + walkObject := func(c cid.Cid) (int64, error) { return s.walkObjectIncomplete(c, visitor, func(c cid.Cid) error { if isUnitaryObject(c) { @@ -426,7 +426,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { // transactionally protect a reference by walking the object and marking. // concurrent markings are short circuited by checking the markset. -func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int, error) { +func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int64, error) { if err := s.checkClosing(); err != nil { return 0, err } @@ -1082,8 +1082,8 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp return nil } -func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) (int, error) { - var sz int +func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) (int64, error) { + var sz int64 visit, err := visitor.Visit(c) if err != nil { return 0, xerrors.Errorf("error visiting object: %w", err) @@ -1112,7 +1112,7 @@ func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid var links []cid.Cid err = s.view(c, func(data []byte) error { - sz += len(data) + sz += int64(len(data)) return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { links = append(links, c) }) @@ -1134,8 +1134,8 @@ func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid } // like walkObject, but the object may be potentially incomplete (references missing) -func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) (int, error) { - sz := 0 +func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) (int64, error) { + sz := int64(0) visit, err := visitor.Visit(c) if err != nil { return 0, xerrors.Errorf("error visiting object: %w", err) @@ -1181,7 +1181,7 @@ func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, m var links []cid.Cid err = s.view(c, func(data []byte) error { - sz += len(data) + sz += int64(len(data)) return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { links = append(links, c) }) diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index 2e23b993a..c415b74dc 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -28,18 +28,21 @@ func (s *SplitStore) gcHotAfterCompaction() { // a) If we should not do full GC => online GC // b) If we should do full GC and can => moving GC // c) If we should do full GC and can't => aggressive online GC - var hotSize int64 - var err error - sizer, ok := s.hot.(bstore.BlockstoreSize) - if ok { - hotSize, err = sizer.Size() - if err != nil { - log.Warnf("error getting hotstore size: %s, estimating empty hot store for targeting", err) - hotSize = 0 + getSize := func() int64 { + sizer, ok := s.hot.(bstore.BlockstoreSize) + if ok { + size, err := sizer.Size() + if err != nil { + log.Warnf("error getting hotstore size: %s, estimating empty hot store for targeting", err) + return 0 + } + return size + } else { + log.Errorf("Could not measure hotstore size, assuming it is 0 bytes, which it is not") + return 0 } - } else { - hotSize = 0 } + hotSize := getSize() copySizeApprox := s.szKeys + s.szMarkedLiveRefs + s.szProtectedTxns + s.szWalk shouldTarget := s.cfg.HotstoreMaxSpaceTarget > 0 && hotSize+copySizeApprox > int64(s.cfg.HotstoreMaxSpaceTarget)-targetThreshold @@ -63,6 +66,7 @@ func (s *SplitStore) gcHotAfterCompaction() { if err := s.gcBlockstore(s.hot, opts); err != nil { log.Warnf("error garbage collecting hostore: %s", err) } + log.Infof("measured hot store size after GC: %d", getSize()) } func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error { diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 53c305be6..f5a89293c 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -1295,7 +1295,10 @@ will run moving GC if disk utilization gets within a threshold (150 GB) of the t Splitstore GC will NOT run moving GC if the total size of the move would get within 50 GB of the target, and instead will run a more aggressive online GC. If both HotStoreFullGCFrequency and HotStoreMaxSpaceTarget are set then splitstore -GC will trigger moving GC if either configuration condition is met.`, +GC will trigger moving GC if either configuration condition is met. +A reasonable minimum is 2x fully GCed hotstore size + 50 G buffer. +At this minimum size moving GC happens every time, any smaller and moving GC won't +be able to run. In spring 2023 this minimum is ~550 GB.`, }, }, "StorageMiner": []DocField{ diff --git a/node/config/types.go b/node/config/types.go index 123714794..68cd35123 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -607,6 +607,9 @@ type Splitstore struct { // within 50 GB of the target, and instead will run a more aggressive online GC. // If both HotStoreFullGCFrequency and HotStoreMaxSpaceTarget are set then splitstore // GC will trigger moving GC if either configuration condition is met. + // A reasonable minimum is 2x fully GCed hotstore size + 50 G buffer. + // At this minimum size moving GC happens every time, any smaller and moving GC won't + // be able to run. In spring 2023 this minimum is ~550 GB. HotStoreMaxSpaceTarget uint64 } From 793141473555c66b16aec169f3f87c971040a52c Mon Sep 17 00:00:00 2001 From: zenground0 Date: Wed, 8 Mar 2023 18:20:27 -0700 Subject: [PATCH 11/19] Lint --- blockstore/splitstore/splitstore_compact.go | 6 +++--- blockstore/splitstore/splitstore_gc.go | 5 ++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 68845a598..c113ec72f 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -236,7 +236,7 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { log.Errorf("error marking tipset refs: %s", err) } log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) - atomic.AddInt64(szMarked, int64(sz)) + atomic.AddInt64(szMarked, sz) return } @@ -252,7 +252,7 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { if err != nil { return err } - atomic.AddInt64(szMarked, int64(sz)) + atomic.AddInt64(szMarked, sz) } return nil @@ -969,7 +969,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) } - atomic.AddInt64(szWalk, int64(sz)) + atomic.AddInt64(szWalk, sz) sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk) if err != nil { diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index c415b74dc..43832a585 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -37,10 +37,9 @@ func (s *SplitStore) gcHotAfterCompaction() { return 0 } return size - } else { - log.Errorf("Could not measure hotstore size, assuming it is 0 bytes, which it is not") - return 0 } + log.Errorf("Could not measure hotstore size, assuming it is 0 bytes, which it is not") + return 0 } hotSize := getSize() From 0b0913f2f3b8a38a6c4e94d83aaade48eb0dbc0a Mon Sep 17 00:00:00 2001 From: zenground0 Date: Wed, 8 Mar 2023 18:40:23 -0700 Subject: [PATCH 12/19] lint --- blockstore/splitstore/splitstore_compact.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index c113ec72f..b3873f659 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -997,7 +997,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) } - atomic.AddInt64(szWalk, int64(sz)) + atomic.AddInt64(szWalk, sz) sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk) if err != nil { return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) @@ -1011,7 +1011,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) } - atomic.AddInt64(szWalk, int64(sz)) + atomic.AddInt64(szWalk, sz) atomic.AddInt64(scanCnt, 1) } From 14af4b27cd36d4cee4dd77c0c8818c5007c2a62a Mon Sep 17 00:00:00 2001 From: zenground0 Date: Wed, 8 Mar 2023 18:41:31 -0700 Subject: [PATCH 13/19] docsgen-cli --- documentation/en/default-lotus-config.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index a447350ff..f772e316c 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -236,6 +236,9 @@ # within 50 GB of the target, and instead will run a more aggressive online GC. # If both HotStoreFullGCFrequency and HotStoreMaxSpaceTarget are set then splitstore # GC will trigger moving GC if either configuration condition is met. + # A reasonable minimum is 2x fully GCed hotstore size + 50 G buffer. + # At this minimum size moving GC happens every time, any smaller and moving GC won't + # be able to run. In spring 2023 this minimum is ~550 GB. # # type: uint64 # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACETARGET From 140f2099bd8423f0a68a6eb6bd49aa55a49bee46 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Wed, 8 Mar 2023 22:49:30 -0700 Subject: [PATCH 14/19] lint --- blockstore/splitstore/splitstore_compact.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index b3873f659..96542fcf4 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -701,8 +701,8 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt) s.szKeys = hotCnt * cidKeySize - stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) - stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) + stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(hotCnt)) + stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(coldCnt)) if err := s.checkClosing(); err != nil { return err @@ -975,7 +975,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) } - atomic.AddInt64(szWalk, int64(sz)) + atomic.AddInt64(szWalk, sz) } else { sz, err = s.walkObject(hdr.Messages, visitor, fHot) if err != nil { From fb7eb086246f572323244721f4a2d31723034ff0 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Thu, 9 Mar 2023 06:36:35 -0700 Subject: [PATCH 15/19] lint --- blockstore/splitstore/splitstore_compact.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 96542fcf4..e01ce684c 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -406,7 +406,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { if err != nil { return xerrors.Errorf("error protecting transactional references to %s: %w", c, err) } - atomic.AddInt64(sz, int64(szTxn)) + atomic.AddInt64(sz, szTxn) } return nil } @@ -958,7 +958,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error walking parent tipset cid reference") } - atomic.AddInt64(szWalk, int64(sz)) + atomic.AddInt64(szWalk, sz) // message are retained if within the inclMsgs boundary if hdr.Height >= inclMsgs && hdr.Height > 0 { @@ -981,13 +981,13 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) } - atomic.AddInt64(szWalk, int64(sz)) + atomic.AddInt64(szWalk, sz) sz, err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot) if err != nil { return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) } - atomic.AddInt64(szWalk, int64(sz)) + atomic.AddInt64(szWalk, sz) } } @@ -1002,7 +1002,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) } - atomic.AddInt64(szWalk, int64(sz)) + atomic.AddInt64(szWalk, sz) } // state is only retained if within the inclState boundary, with the exception of genesis @@ -1033,7 +1033,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error walking parent tipset cid reference") } - atomic.AddInt64(szWalk, int64(sz)) + atomic.AddInt64(szWalk, sz) for len(toWalk) > 0 { // walking can take a while, so check this with every opportunity @@ -1135,7 +1135,7 @@ func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid // like walkObject, but the object may be potentially incomplete (references missing) func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) (int64, error) { - sz := int64(0) + var sz int64 visit, err := visitor.Visit(c) if err != nil { return 0, xerrors.Errorf("error visiting object: %w", err) From aac30cd840ddf409bf794a093ffe2d4dd5327245 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Thu, 9 Mar 2023 07:57:35 -0700 Subject: [PATCH 16/19] Review Response --- blockstore/splitstore/splitstore_compact.go | 6 +++--- blockstore/splitstore/splitstore_gc.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index e01ce684c..12d0ff899 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -276,7 +276,7 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { } log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count, "size marked", *szMarked) - s.szMarkedLiveRefs += *szMarked + s.szMarkedLiveRefs += atomic.LoadInt64(szMarked) } // transactionally protect a view @@ -419,7 +419,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { if err := g.Wait(); err != nil { return err } - s.szProtectedTxns += *sz + s.szProtectedTxns += atomic.LoadInt64(sz) log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count, "protected size", sz) } } @@ -1078,7 +1078,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp } log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt, "walk size", szWalk) - s.szWalk = *szWalk + s.szWalk = atomic.LoadInt64(szWalk) return nil } diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index 43832a585..1a5fbda68 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -13,7 +13,7 @@ const ( // Don't attempt moving GC with 50 GB or less would remain during moving GC targetBuffer = 50_000_000_000 // Fraction of garbage in badger vlog for online GC traversal to collect garbage - aggressiveOnlineGCThreshold = 0.0001 + AggressiveOnlineGCThreshold = 0.0001 ) func (s *SplitStore) gcHotAfterCompaction() { @@ -59,7 +59,7 @@ func (s *SplitStore) gcHotAfterCompaction() { log.Warn("If problem continues you can 1) temporarily allocate more disk space to hotstore and 2) reflect in HotstoreMaxSpaceTarget OR trigger manual move with `lotus chain prune hot-moving`") log.Warn("If problem continues and you do not have any more disk space you can run continue to manually trigger online GC at aggressive thresholds (< 0.01) with `lotus chain prune hot`") - opts = append(opts, bstore.WithThreshold(aggressiveOnlineGCThreshold)) + opts = append(opts, bstore.WithThreshold(AggressiveOnlineGCThreshold)) } if err := s.gcBlockstore(s.hot, opts); err != nil { From bd0c010be6f1d0b85f5ba3c214a5f3911674f9f6 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Thu, 9 Mar 2023 08:40:14 -0700 Subject: [PATCH 17/19] Configur buffer and threshold --- blockstore/splitstore/splitstore.go | 9 +++++++++ blockstore/splitstore/splitstore_gc.go | 4 ++-- documentation/en/default-lotus-config.toml | 6 ++++++ node/config/def.go | 4 +++- node/config/types.go | 9 +++++++++ node/modules/blockstore.go | 14 ++++++++------ 6 files changed, 37 insertions(+), 9 deletions(-) diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index ad19f5dff..410cc50df 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -123,6 +123,15 @@ type Config struct { // never overflow. This field is used when doing GC at the end of a compaction to // adaptively choose moving GC HotstoreMaxSpaceTarget uint64 + + // Moving GC will be triggered when total moving size exceeds + // HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold + HotstoreMaxSpaceThreshold uint64 + + // Safety buffer to prevent moving GC from overflowing disk. + // Moving GC will not occur when total moving size exceeds + // HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer + HotstoreMaxSpaceSafetyBuffer uint64 } // ChainAccessor allows the Splitstore to access the chain. It will most likely diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index 1a5fbda68..f6087205d 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -44,10 +44,10 @@ func (s *SplitStore) gcHotAfterCompaction() { hotSize := getSize() copySizeApprox := s.szKeys + s.szMarkedLiveRefs + s.szProtectedTxns + s.szWalk - shouldTarget := s.cfg.HotstoreMaxSpaceTarget > 0 && hotSize+copySizeApprox > int64(s.cfg.HotstoreMaxSpaceTarget)-targetThreshold + shouldTarget := s.cfg.HotstoreMaxSpaceTarget > 0 && hotSize+copySizeApprox > int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceThreshold) shouldFreq := s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 shouldDoFull := shouldTarget || shouldFreq - canDoFull := s.cfg.HotstoreMaxSpaceTarget == 0 || hotSize+copySizeApprox < int64(s.cfg.HotstoreMaxSpaceTarget)-targetBuffer + canDoFull := s.cfg.HotstoreMaxSpaceTarget == 0 || hotSize+copySizeApprox < int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceSafetyBuffer) log.Debugw("approximating new hot store size", "key size", s.szKeys, "marked live refs", s.szMarkedLiveRefs, "protected txns", s.szProtectedTxns, "walked DAG", s.szWalk) log.Infof("measured hot store size: %d, approximate new size: %d, should do full %t, can do full %t", hotSize, copySizeApprox, shouldDoFull, canDoFull) diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index f772e316c..e17448a6c 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -244,6 +244,12 @@ # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACETARGET #HotStoreMaxSpaceTarget = 0 + # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACETHRESHOLD + #HotStoreMaxSpaceThreshold = 150000000000 + + # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACESAFETYBUFFER + #HotstoreMaxSpaceSafetyBuffer = 50000000000 + [Cluster] # EXPERIMENTAL. config to enabled node cluster with raft consensus diff --git a/node/config/def.go b/node/config/def.go index f50e9575a..a5c532d14 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -95,7 +95,9 @@ func DefaultFullNode() *FullNode { HotStoreType: "badger", MarkSetType: "badger", - HotStoreFullGCFrequency: 20, + HotStoreFullGCFrequency: 20, + HotStoreMaxSpaceThreshold: 150_000_000_000, + HotstoreMaxSpaceSafetyBuffer: 50_000_000_000, }, }, Cluster: *DefaultUserRaftConfig(), diff --git a/node/config/types.go b/node/config/types.go index 68cd35123..5b952d35e 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -611,6 +611,15 @@ type Splitstore struct { // At this minimum size moving GC happens every time, any smaller and moving GC won't // be able to run. In spring 2023 this minimum is ~550 GB. HotStoreMaxSpaceTarget uint64 + + // When HotStoreMaxSpaceTarget is set Moving GC will be triggered when total moving size + // exceeds HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold + HotStoreMaxSpaceThreshold uint64 + + // Safety buffer to prevent moving GC from overflowing disk when HotStoreMaxSpaceTarget + // is set. Moving GC will not occur when total moving size exceeds + // HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer + HotstoreMaxSpaceSafetyBuffer uint64 } // // Full Node diff --git a/node/modules/blockstore.go b/node/modules/blockstore.go index f4211bbe2..f96fd0db4 100644 --- a/node/modules/blockstore.go +++ b/node/modules/blockstore.go @@ -82,12 +82,14 @@ func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.Locked } cfg := &splitstore.Config{ - MarkSetType: cfg.Splitstore.MarkSetType, - DiscardColdBlocks: cfg.Splitstore.ColdStoreType == "discard", - UniversalColdBlocks: cfg.Splitstore.ColdStoreType == "universal", - HotStoreMessageRetention: cfg.Splitstore.HotStoreMessageRetention, - HotStoreFullGCFrequency: cfg.Splitstore.HotStoreFullGCFrequency, - HotstoreMaxSpaceTarget: cfg.Splitstore.HotStoreMaxSpaceTarget, + MarkSetType: cfg.Splitstore.MarkSetType, + DiscardColdBlocks: cfg.Splitstore.ColdStoreType == "discard", + UniversalColdBlocks: cfg.Splitstore.ColdStoreType == "universal", + HotStoreMessageRetention: cfg.Splitstore.HotStoreMessageRetention, + HotStoreFullGCFrequency: cfg.Splitstore.HotStoreFullGCFrequency, + HotstoreMaxSpaceTarget: cfg.Splitstore.HotStoreMaxSpaceTarget, + HotstoreMaxSpaceThreshold: cfg.Splitstore.HotStoreMaxSpaceThreshold, + HotstoreMaxSpaceSafetyBuffer: cfg.Splitstore.HotstoreMaxSpaceSafetyBuffer, } ss, err := splitstore.Open(path, ds, hot, cold, cfg) if err != nil { From 87d5a3723ffc7a3c584cbd14a889a5c9128dafa8 Mon Sep 17 00:00:00 2001 From: zenground0 Date: Thu, 9 Mar 2023 08:57:14 -0700 Subject: [PATCH 18/19] lint --- blockstore/splitstore/splitstore_gc.go | 6 +----- node/config/doc_gen.go | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index f6087205d..2ddb7d404 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -8,10 +8,6 @@ import ( ) const ( - // When < 150 GB of space would remain during moving GC, trigger moving GC - targetThreshold = 150_000_000_000 - // Don't attempt moving GC with 50 GB or less would remain during moving GC - targetBuffer = 50_000_000_000 // Fraction of garbage in badger vlog for online GC traversal to collect garbage AggressiveOnlineGCThreshold = 0.0001 ) @@ -55,7 +51,7 @@ func (s *SplitStore) gcHotAfterCompaction() { if shouldDoFull && canDoFull { opts = append(opts, bstore.WithFullGC(true)) } else if shouldDoFull && !canDoFull { - log.Warnf("Attention! Estimated moving GC size %d is not within safety buffer %d of target max %d, performing aggressive online GC to attempt to bring hotstore size down safely", copySizeApprox, targetBuffer, s.cfg.HotstoreMaxSpaceTarget) + log.Warnf("Attention! Estimated moving GC size %d is not within safety buffer %d of target max %d, performing aggressive online GC to attempt to bring hotstore size down safely", copySizeApprox, s.cfg.HotstoreMaxSpaceSafetyBuffer, s.cfg.HotstoreMaxSpaceTarget) log.Warn("If problem continues you can 1) temporarily allocate more disk space to hotstore and 2) reflect in HotstoreMaxSpaceTarget OR trigger manual move with `lotus chain prune hot-moving`") log.Warn("If problem continues and you do not have any more disk space you can run continue to manually trigger online GC at aggressive thresholds (< 0.01) with `lotus chain prune hot`") diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index f5a89293c..c62084708 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -1300,6 +1300,21 @@ A reasonable minimum is 2x fully GCed hotstore size + 50 G buffer. At this minimum size moving GC happens every time, any smaller and moving GC won't be able to run. In spring 2023 this minimum is ~550 GB.`, }, + { + Name: "HotStoreMaxSpaceThreshold", + Type: "uint64", + + Comment: `When HotStoreMaxSpaceTarget is set Moving GC will be triggered when total moving size +exceeds HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold`, + }, + { + Name: "HotstoreMaxSpaceSafetyBuffer", + Type: "uint64", + + Comment: `Safety buffer to prevent moving GC from overflowing disk when HotStoreMaxSpaceTarget +is set. Moving GC will not occur when total moving size exceeds +HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer`, + }, }, "StorageMiner": []DocField{ { From 366ebe3155a28ee08597886ae943366cc0f86d8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Mar 2023 17:18:04 +0100 Subject: [PATCH 19/19] make gen --- documentation/en/default-lotus-config.toml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index e17448a6c..8e757d2c3 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -244,9 +244,18 @@ # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACETARGET #HotStoreMaxSpaceTarget = 0 + # When HotStoreMaxSpaceTarget is set Moving GC will be triggered when total moving size + # exceeds HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold + # + # type: uint64 # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACETHRESHOLD #HotStoreMaxSpaceThreshold = 150000000000 + # Safety buffer to prevent moving GC from overflowing disk when HotStoreMaxSpaceTarget + # is set. Moving GC will not occur when total moving size exceeds + # HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer + # + # type: uint64 # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACESAFETYBUFFER #HotstoreMaxSpaceSafetyBuffer = 50000000000