Merge branch 'releases' into jen/v15

This commit is contained in:
jennijuju 2022-02-25 21:01:31 -05:00
commit c6361b391c
16 changed files with 118 additions and 59 deletions

View File

@ -173,6 +173,35 @@ Contributors
| Rob Quist | 1 | +2/-2 | 1 |
| shotcollin | 1 | +1/-1 | 1 |
# 1.14.2 / 2022-02-24
This is an **optional** release of lotus, that's had a couple more improvements w.r.t Snap experience for storage providers in preparation of the[upcoming OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550).
Note that the network is STILL scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to at least Lotus v1.14.0 before that time. Storage providers must update their daemons, miners, and worker(s).
Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-project/lotus/discussions/8141) out!
## Bug Fixes
- fix lotus-bench for sealing jobs (#8173)
- fix:sealing:really-do-it flag for abort upgrade (#8181)
- fix:proving:post check sector handles snap deals replica faults (#8177)
- fix: sealing: missing file type (#8180)
## Others
- Retract force-pushed v1.14.0 to work around stale gomod caches (#8159): We originally tagged v1.14.0 off the wrong
commit and fixed that by a force push, in which is a really bad practise since it messes up the go mod. Therefore,
we want to retract it and users may use v1.14.1&^.
## Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| @zenground0 | 2 | +73/-58 | 12 |
| @eben.xie | 1 | +7/-0 | 1 |
| @jennijuju | 1 | +4/-0 | 1 |
| @jennijuju | 1 | +2/-1 | 1 |
| @ribasushi | 1 | +2/-0 | 1 |
# 1.14.1 / 2022-02-18
This is an **optional** release of lotus, that fixes the incorrect *comment* of network v15 OhSnap upgrade **date**. Note the actual upgrade epoch in [v1.14.0](https://github.com/filecoin-project/lotus/releases/tag/v1.14.0) was correct.
@ -195,7 +224,7 @@ It is recommended that storage providers download the new params before updating
- run `./lotus-shed fetch-params` with the appropriate `proving-params` flag
- Upgrade the Lotus daemon and miner **when the previous step is complete**
All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (150 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries.
All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (90 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries.
## New Features and Changes
- Integrate actor v7-rc1:

View File

@ -256,7 +256,7 @@ type StorageMiner interface {
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error //perm:admin
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
}

View File

@ -625,7 +625,7 @@ type StorageMinerStruct struct {
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
@ -3747,14 +3747,14 @@ func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Addres
return *new(abi.SectorSize), ErrNotSupported
}
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
if s.Internal.CheckProvable == nil {
return *new(map[abi.SectorNumber]string), ErrNotSupported
}
return s.Internal.CheckProvable(p0, p1, p2, p3)
return s.Internal.CheckProvable(p0, p1, p2, p3, p4)
}
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
return *new(map[abi.SectorNumber]string), ErrNotSupported
}

View File

@ -57,7 +57,7 @@ var (
FullAPIVersion0 = newVer(1, 5, 0)
FullAPIVersion1 = newVer(2, 2, 0)
MinerAPIVersion0 = newVer(1, 3, 0)
MinerAPIVersion0 = newVer(1, 4, 0)
WorkerAPIVersion0 = newVer(1, 5, 0)
)

Binary file not shown.

View File

@ -276,6 +276,13 @@ var sealBenchCmd = &cli.Command{
if err != nil {
return xerrors.Errorf("failed to run seals: %w", err)
}
for _, s := range extendedSealedSectors {
sealedSectors = append(sealedSectors, proof.SectorInfo{
SealedCID: s.SealedCID,
SectorNumber: s.SectorNumber,
SealProof: s.SealProof,
})
}
} else {
// TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not)

View File

@ -437,6 +437,7 @@ var provingCheckProvableCmd = &cli.Command{
}
var tocheck []storage.SectorRef
var update []bool
for _, info := range sectorInfos {
si := abi.SectorID{
Miner: abi.ActorID(mid),
@ -454,9 +455,10 @@ var provingCheckProvableCmd = &cli.Command{
ProofType: info.SealProof,
ID: si,
})
update = append(update, info.SectorKeyCID != nil)
}
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow"))
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, update, cctx.Bool("slow"))
if err != nil {
return err
}

View File

@ -1530,6 +1530,12 @@ var sectorsSnapAbortCmd = &cli.Command{
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
}
really := cctx.Bool("really-do-it")
if !really {
//nolint:golint
return fmt.Errorf("--really-do-it must be specified for this action to have an effect; you have been warned")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err

View File

@ -338,6 +338,9 @@ Inputs:
"ProofType": 8
}
],
[
true
],
true
]
```

View File

@ -19,11 +19,11 @@ import (
// FaultTracker TODO: Track things more actively
type FaultTracker interface {
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error)
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error)
}
// CheckProvable returns unprovable sectors
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) {
var bad = make(map[abi.SectorID]string)
ssize, err := pp.SectorSize()
@ -32,11 +32,31 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
}
// TODO: More better checks
for _, sector := range sectors {
for i, sector := range sectors {
err := func() error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var fReplica string
var fCache string
if update[i] {
lockedUpdate, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone)
if err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
if !lockedUpdate {
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock on update replica", "sector", sector)
bad[sector.ID] = fmt.Sprint("can't acquire read lock")
return nil
}
lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
if err != nil {
log.Warnw("CheckProvable Sector FAULT: acquire sector update replica in checkProvable", "sector", sector, "error", err)
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
return nil
}
fReplica, fCache = lp.Update, lp.UpdateCache
} else {
locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone)
if err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
@ -54,50 +74,34 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
return nil
}
fReplica, fCache = lp.Sealed, lp.Cache
// temporary hack to make the check work with snapdeals
// will go away in https://github.com/filecoin-project/lotus/pull/7971
if lp.Sealed == "" || lp.Cache == "" {
// maybe it's update
lockedUpdate, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone)
if err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
if lockedUpdate {
lp, _, err = m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
if err != nil {
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
return nil
}
lp.Sealed, lp.Cache = lp.Update, lp.UpdateCache
}
}
if lp.Sealed == "" || lp.Cache == "" {
log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache)
bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", lp.Cache, lp.Sealed)
if fReplica == "" || fCache == "" {
log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", fReplica, "cache", fCache)
bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", fCache, fReplica)
return nil
}
toCheck := map[string]int64{
lp.Sealed: 1,
filepath.Join(lp.Cache, "p_aux"): 0,
fReplica: 1,
filepath.Join(fCache, "p_aux"): 0,
}
addCachePathsForSectorSize(toCheck, lp.Cache, ssize)
addCachePathsForSectorSize(toCheck, fCache, ssize)
for p, sz := range toCheck {
st, err := os.Stat(p)
if err != nil {
log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err)
log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "err", err)
bad[sector.ID] = fmt.Sprintf("%s", err)
return nil
}
if sz != 0 {
if st.Size() != int64(ssize)*sz {
log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz)
log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz)
bad[sector.ID] = fmt.Sprintf("%s is wrong size (got %d, expect %d)", p, st.Size(), int64(ssize)*sz)
return nil
}
@ -118,14 +122,14 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
sector.ID.Number,
})
if err != nil {
log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err)
log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err)
bad[sector.ID] = fmt.Sprintf("generating fallback challenges: %s", err)
return nil
}
commr, err := rg(ctx, sector.ID)
if err != nil {
log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err)
log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err)
bad[sector.ID] = fmt.Sprintf("getting commR: %s", err)
return nil
}
@ -136,12 +140,12 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
SectorNumber: sector.ID.Number,
SealedCID: commr,
},
CacheDirPath: lp.Cache,
CacheDirPath: fCache,
PoStProofType: wpp,
SealedSectorPath: lp.Sealed,
SealedSectorPath: fReplica,
}, ch.Challenges[sector.ID.Number])
if err != nil {
log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err)
log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err)
bad[sector.ID] = fmt.Sprintf("generating vanilla proof: %s", err)
return nil
}

View File

@ -505,7 +505,7 @@ func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) erro
return nil
}
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) {
bad := map[abi.SectorID]string{}
for _, sid := range ids {

View File

@ -294,6 +294,10 @@ func ftFromString(t string) (storiface.SectorFileType, error) {
return storiface.FTSealed, nil
case storiface.FTCache.String():
return storiface.FTCache, nil
case storiface.FTUpdate.String():
return storiface.FTUpdate, nil
case storiface.FTUpdateCache.String():
return storiface.FTUpdateCache, nil
default:
return 0, xerrors.Errorf("unknown sector file type: '%s'", t)
}

2
go.mod
View File

@ -2,6 +2,8 @@ module github.com/filecoin-project/lotus
go 1.16
retract v1.14.0 // Accidentally force-pushed tag, use v1.14.1+ instead.
require (
contrib.go.opencensus.io/exporter/prometheus v0.4.0
github.com/BurntSushi/toml v0.4.1

View File

@ -1127,7 +1127,7 @@ func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error
return backup(ctx, sm.DS, fpath)
}
func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) {
func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) {
var rg storiface.RGetter
if expensive {
rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, error) {
@ -1143,7 +1143,7 @@ func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredP
}
}
bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, rg)
bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, update, rg)
if err != nil {
return nil, err
}

View File

@ -206,6 +206,7 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B
sectors := make(map[abi.SectorNumber]struct{})
var tocheck []storage.SectorRef
var update []bool
for _, info := range sectorInfos {
sectors[info.SectorNumber] = struct{}{}
tocheck = append(tocheck, storage.SectorRef{
@ -215,9 +216,10 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B
Number: info.SectorNumber,
},
})
update = append(update, info.SectorKeyCID != nil)
}
bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck, nil)
bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck, update, nil)
if err != nil {
return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err)
}

View File

@ -168,7 +168,7 @@ func (m mockVerif) GenerateWinningPoStSectorChallenge(context.Context, abi.Regis
type mockFaultTracker struct {
}
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) {
// Returns "bad" sectors so just return empty map meaning all sectors are good
return map[abi.SectorID]string{}, nil
}