more robust handling of sync gap walks

This commit is contained in:
vyzo 2021-06-14 20:49:11 +03:00
parent 7cf75e667d
commit bdb97d6186

View File

@ -543,7 +543,7 @@ func (s *SplitStore) warmup(curTs *types.TipSet) error {
count := int64(0) count := int64(0)
xcount := int64(0) xcount := int64(0)
missing := int64(0) missing := int64(0)
err := s.walk(curTs, epoch, err := s.walk(curTs, epoch, false,
func(cid cid.Cid) error { func(cid cid.Cid) error {
count++ count++
@ -655,7 +655,7 @@ func (s *SplitStore) estimateMarkSetSize(curTs *types.TipSet) error {
epoch := curTs.Height() epoch := curTs.Height()
var count int64 var count int64
err := s.walk(curTs, epoch, err := s.walk(curTs, epoch, false,
func(cid cid.Cid) error { func(cid cid.Cid) error {
count++ count++
return nil return nil
@ -686,6 +686,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet, syncGapEpoch abi.ChainEpoch)
log.Infow("marking reachable blocks", "currentEpoch", currentEpoch, "boundaryEpoch", boundaryEpoch) log.Infow("marking reachable blocks", "currentEpoch", currentEpoch, "boundaryEpoch", boundaryEpoch)
startMark := time.Now() startMark := time.Now()
var inclMsgs bool
var markTs *types.TipSet var markTs *types.TipSet
if syncGapEpoch > boundaryEpoch { if syncGapEpoch > boundaryEpoch {
// There is a sync gap that may have caused writes that are logically after the boundary // There is a sync gap that may have caused writes that are logically after the boundary
@ -695,6 +696,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet, syncGapEpoch abi.ChainEpoch)
// In this case we perform a full walk to avoid pathologies with pushing actually hot // In this case we perform a full walk to avoid pathologies with pushing actually hot
// objects into the coldstore. // objects into the coldstore.
markTs = curTs markTs = curTs
inclMsgs = true
log.Infof("sync gap detected at epoch %d; marking from current epoch to boundary epoch", syncGapEpoch) log.Infof("sync gap detected at epoch %d; marking from current epoch to boundary epoch", syncGapEpoch)
} else { } else {
// There is no pathological sync gap, so we can use the much faster single tipset walk at // There is no pathological sync gap, so we can use the much faster single tipset walk at
@ -707,7 +709,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet, syncGapEpoch abi.ChainEpoch)
} }
var count int64 var count int64
err = s.walk(markTs, boundaryEpoch, err = s.walk(markTs, boundaryEpoch, inclMsgs,
func(cid cid.Cid) error { func(cid cid.Cid) error {
count++ count++
return markSet.Mark(cid) return markSet.Mark(cid)
@ -829,7 +831,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet, syncGapEpoch abi.ChainEpoch)
return nil return nil
} }
func (s *SplitStore) walk(ts *types.TipSet, boundary abi.ChainEpoch, f func(cid.Cid) error) error { func (s *SplitStore) walk(ts *types.TipSet, boundary abi.ChainEpoch, inclMsgs bool, f func(cid.Cid) error) error {
walked := cid.NewSet() walked := cid.NewSet()
toWalk := ts.Cids() toWalk := ts.Cids()
@ -857,9 +859,11 @@ func (s *SplitStore) walk(ts *types.TipSet, boundary abi.ChainEpoch, f func(cid.
return err return err
} }
if inclMsgs {
if err := s.walkLinks(hdr.Messages, walked, f); err != nil { if err := s.walkLinks(hdr.Messages, walked, f); err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
} }
}
if err := s.walkLinks(hdr.ParentStateRoot, walked, f); err != nil { if err := s.walkLinks(hdr.ParentStateRoot, walked, f); err != nil {
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)