Merge remote-tracking branch 'origin/master' into feat/post-worker

This commit is contained in:
Łukasz Magiera 2022-01-18 10:49:42 +01:00
commit 0c062f9c08
107 changed files with 2696 additions and 441 deletions

View File

@ -1084,7 +1084,7 @@ type CirculatingSupply struct {
type MiningBaseInfo struct {
MinerPower types.BigInt
NetworkPower types.BigInt
Sectors []builtin.SectorInfo
Sectors []builtin.ExtendedSectorInfo
WorkerKey address.Address
SectorSize abi.SectorSize
PrevBeaconEntry types.BeaconEntry

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/go-address"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-state-types/abi"
abinetwork "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
"github.com/filecoin-project/specs-storage/storage"
@ -99,8 +100,8 @@ type StorageMiner interface {
// Returns null if message wasn't sent
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
// Returns null if message wasn't sent
SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
@ -111,6 +112,7 @@ type StorageMiner interface {
SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
// WorkerConnect tells the node to connect to workers RPC
WorkerConnect(context.Context, string) error //perm:admin retry:true
@ -253,7 +255,7 @@ type StorageMiner interface {
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:read
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
}
var _ storiface.WorkerReturn = *new(StorageMiner)

View File

@ -17,6 +17,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
abinetwork "github.com/filecoin-project/go-state-types/network"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -621,7 +622,7 @@ type StorageMinerStruct struct {
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
ComputeProof func(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) `perm:"read"`
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
@ -759,7 +760,9 @@ type StorageMinerStruct struct {
SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"`
SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber, p2 bool) error `perm:"admin"`
SectorMatchPendingPiecesToOpenSectors func(p0 context.Context) error `perm:"admin"`
SectorPreCommitFlush func(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) `perm:"admin"`
@ -3715,14 +3718,14 @@ func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPo
return *new(map[abi.SectorNumber]string), ErrNotSupported
}
func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) {
func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) {
if s.Internal.ComputeProof == nil {
return *new([]builtin.PoStProof), ErrNotSupported
}
return s.Internal.ComputeProof(p0, p1, p2)
return s.Internal.ComputeProof(p0, p1, p2, p3, p4)
}
func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) {
func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) {
return *new([]builtin.PoStProof), ErrNotSupported
}
@ -4474,14 +4477,25 @@ func (s *StorageMinerStub) SectorGetSealDelay(p0 context.Context) (time.Duration
return *new(time.Duration), ErrNotSupported
}
func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error {
if s.Internal.SectorMarkForUpgrade == nil {
return ErrNotSupported
}
return s.Internal.SectorMarkForUpgrade(p0, p1)
return s.Internal.SectorMarkForUpgrade(p0, p1, p2)
}
func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error {
if s.Internal.SectorMatchPendingPiecesToOpenSectors == nil {
return ErrNotSupported
}
return s.Internal.SectorMatchPendingPiecesToOpenSectors(p0)
}
func (s *StorageMinerStub) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error {
return ErrNotSupported
}

View File

@ -8,7 +8,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
abinetwork "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -57,7 +57,7 @@ type Gateway interface {
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error)
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)

View File

@ -13,7 +13,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
abinetwork "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -451,7 +451,7 @@ type GatewayStruct struct {
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (network.Version, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) ``
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
@ -2703,15 +2703,15 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) {
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) {
if s.Internal.StateNetworkVersion == nil {
return *new(network.Version), ErrNotSupported
return *new(abinetwork.Version), ErrNotSupported
}
return s.Internal.StateNetworkVersion(p0, p1)
}
func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) {
return *new(network.Version), ErrNotSupported
func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) {
return *new(abinetwork.Version), ErrNotSupported
}
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {

View File

@ -54,10 +54,10 @@ func VersionForType(nodeType NodeType) (Version, error) {
// semver versions of the rpc api exposed
var (
FullAPIVersion0 = newVer(1, 4, 0)
FullAPIVersion1 = newVer(2, 1, 0)
FullAPIVersion0 = newVer(1, 5, 0)
FullAPIVersion1 = newVer(2, 2, 0)
MinerAPIVersion0 = newVer(1, 2, 0)
MinerAPIVersion0 = newVer(1, 3, 0)
WorkerAPIVersion0 = newVer(1, 5, 0)
)

262
blockstore/autobatch.go Normal file
View File

@ -0,0 +1,262 @@
package blockstore
import (
"context"
"sync"
"time"
"golang.org/x/xerrors"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
)
// autolog is a logger for the autobatching blockstore. It is subscoped from the
// blockstore logger.
var autolog = log.Named("auto")
// contains the same set of blocks twice, once as an ordered list for flushing, and as a map for fast access
type blockBatch struct {
blockList []block.Block
blockMap map[cid.Cid]block.Block
}
type AutobatchBlockstore struct {
// TODO: drop if memory consumption is too high
addedCids map[cid.Cid]struct{}
stateLock sync.Mutex
bufferedBatch blockBatch
flushingBatch blockBatch
flushErr error
flushCh chan struct{}
doFlushLock sync.Mutex
flushRetryDelay time.Duration
doneCh chan struct{}
shutdown context.CancelFunc
backingBs Blockstore
bufferCapacity int
bufferSize int
}
func NewAutobatch(ctx context.Context, backingBs Blockstore, bufferCapacity int) *AutobatchBlockstore {
ctx, cancel := context.WithCancel(ctx)
bs := &AutobatchBlockstore{
addedCids: make(map[cid.Cid]struct{}),
backingBs: backingBs,
bufferCapacity: bufferCapacity,
flushCh: make(chan struct{}, 1),
doneCh: make(chan struct{}),
// could be made configable
flushRetryDelay: time.Millisecond * 100,
shutdown: cancel,
}
bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block)
go bs.flushWorker(ctx)
return bs
}
func (bs *AutobatchBlockstore) Put(ctx context.Context, blk block.Block) error {
bs.stateLock.Lock()
defer bs.stateLock.Unlock()
_, ok := bs.addedCids[blk.Cid()]
if !ok {
bs.addedCids[blk.Cid()] = struct{}{}
bs.bufferedBatch.blockList = append(bs.bufferedBatch.blockList, blk)
bs.bufferedBatch.blockMap[blk.Cid()] = blk
bs.bufferSize += len(blk.RawData())
if bs.bufferSize >= bs.bufferCapacity {
// signal that a flush is appropriate, may be ignored
select {
case bs.flushCh <- struct{}{}:
default:
// do nothing
}
}
}
return nil
}
func (bs *AutobatchBlockstore) flushWorker(ctx context.Context) {
defer close(bs.doneCh)
for {
select {
case <-bs.flushCh:
// TODO: check if we _should_ actually flush. We could get a spurious wakeup
// here.
putErr := bs.doFlush(ctx, false)
for putErr != nil {
select {
case <-ctx.Done():
return
case <-time.After(bs.flushRetryDelay):
autolog.Errorf("FLUSH ERRORED: %w, retrying after %v", putErr, bs.flushRetryDelay)
putErr = bs.doFlush(ctx, true)
}
}
case <-ctx.Done():
// Do one last flush.
_ = bs.doFlush(ctx, false)
return
}
}
}
// caller must NOT hold stateLock
// set retryOnly to true to only retry a failed flush and not flush anything new.
func (bs *AutobatchBlockstore) doFlush(ctx context.Context, retryOnly bool) error {
bs.doFlushLock.Lock()
defer bs.doFlushLock.Unlock()
// If we failed to flush last time, try flushing again.
if bs.flushErr != nil {
bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList)
}
// If we failed, or we're _only_ retrying, bail.
if retryOnly || bs.flushErr != nil {
return bs.flushErr
}
// Then take the current batch...
bs.stateLock.Lock()
// We do NOT clear addedCids here, because its purpose is to expedite Puts
bs.flushingBatch = bs.bufferedBatch
bs.bufferedBatch.blockList = make([]block.Block, 0, len(bs.flushingBatch.blockList))
bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block, len(bs.flushingBatch.blockMap))
bs.stateLock.Unlock()
// And try to flush it.
bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList)
// If we succeeded, reset the batch. Otherwise, we'll try again next time.
if bs.flushErr == nil {
bs.stateLock.Lock()
bs.flushingBatch = blockBatch{}
bs.stateLock.Unlock()
}
return bs.flushErr
}
// caller must NOT hold stateLock
func (bs *AutobatchBlockstore) Flush(ctx context.Context) error {
return bs.doFlush(ctx, false)
}
func (bs *AutobatchBlockstore) Shutdown(ctx context.Context) error {
// TODO: Prevent puts after we call this to avoid losing data.
bs.shutdown()
select {
case <-bs.doneCh:
case <-ctx.Done():
return ctx.Err()
}
bs.doFlushLock.Lock()
defer bs.doFlushLock.Unlock()
return bs.flushErr
}
func (bs *AutobatchBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) {
// may seem backward to check the backingBs first, but that is the likeliest case
blk, err := bs.backingBs.Get(ctx, c)
if err == nil {
return blk, nil
}
if err != ErrNotFound {
return blk, err
}
bs.stateLock.Lock()
defer bs.stateLock.Unlock()
v, ok := bs.flushingBatch.blockMap[c]
if ok {
return v, nil
}
v, ok = bs.bufferedBatch.blockMap[c]
if ok {
return v, nil
}
return bs.Get(ctx, c)
}
func (bs *AutobatchBlockstore) DeleteBlock(context.Context, cid.Cid) error {
// if we wanted to support this, we would have to:
// - flush
// - delete from the backingBs (if present)
// - remove from addedCids (if present)
// - if present in addedCids, also walk the ordered lists and remove if present
return xerrors.New("deletion is unsupported")
}
func (bs *AutobatchBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
// see note in DeleteBlock()
return xerrors.New("deletion is unsupported")
}
func (bs *AutobatchBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
_, err := bs.Get(ctx, c)
if err == nil {
return true, nil
}
if err == ErrNotFound {
return false, nil
}
return false, err
}
func (bs *AutobatchBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
blk, err := bs.Get(ctx, c)
if err != nil {
return 0, err
}
return len(blk.RawData()), nil
}
func (bs *AutobatchBlockstore) PutMany(ctx context.Context, blks []block.Block) error {
for _, blk := range blks {
if err := bs.Put(ctx, blk); err != nil {
return err
}
}
return nil
}
func (bs *AutobatchBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
if err := bs.Flush(ctx); err != nil {
return nil, err
}
return bs.backingBs.AllKeysChan(ctx)
}
func (bs *AutobatchBlockstore) HashOnRead(enabled bool) {
bs.backingBs.HashOnRead(enabled)
}
func (bs *AutobatchBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
blk, err := bs.Get(ctx, cid)
if err != nil {
return err
}
return callback(blk.RawData())
}

View File

@ -0,0 +1,34 @@
package blockstore
import (
"context"
"testing"
"github.com/stretchr/testify/require"
)
func TestAutobatchBlockstore(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ab := NewAutobatch(ctx, NewMemory(), len(b0.RawData())+len(b1.RawData())-1)
require.NoError(t, ab.Put(ctx, b0))
require.NoError(t, ab.Put(ctx, b1))
require.NoError(t, ab.Put(ctx, b2))
v0, err := ab.Get(ctx, b0.Cid())
require.NoError(t, err)
require.Equal(t, b0.RawData(), v0.RawData())
v1, err := ab.Get(ctx, b1.Cid())
require.NoError(t, err)
require.Equal(t, b1.RawData(), v1.RawData())
v2, err := ab.Get(ctx, b2.Cid())
require.NoError(t, err)
require.Equal(t, b2.RawData(), v2.RawData())
require.NoError(t, ab.Flush(ctx))
require.NoError(t, ab.Shutdown(ctx))
}

View File

@ -1,2 +1,2 @@
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBzv5sf4eTyo8cjJGfGnpxo6QkEPkRShG9GqjE2A5QaW5
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBo9TSD4XXRFtu6snv6QNYvXgRaSaVb116YiYEsDWgKtq
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBdRCBLUeKvoy22u5DcXs61adFn31v8WWCZgmBjDCjbsC
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWDUQJBA18njjXnG9RtLxoN3muvdU7PEy55QorUEsdAqdy

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -17,7 +17,7 @@ import (
const BootstrappersFile = ""
const GenesisFile = ""
const GenesisNetworkVersion = network.Version14
const GenesisNetworkVersion = network.Version15
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
@ -47,7 +47,7 @@ var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
var UpgradeSnapDealsHeight = abi.ChainEpoch(-18)
var UpgradeOhSnapHeight = abi.ChainEpoch(-18)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -16,7 +16,7 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}
const GenesisNetworkVersion = network.Version13
const GenesisNetworkVersion = network.Version14
const BootstrappersFile = "butterflynet.pi"
const GenesisFile = "butterflynet.car"
@ -40,13 +40,17 @@ const UpgradeTrustHeight = -13
const UpgradeNorwegianHeight = -14
const UpgradeTurboHeight = -15
const UpgradeHyperdriveHeight = -16
const UpgradeChocolateHeight = 6360
const UpgradeSnapDealsHeight = 99999999
const UpgradeChocolateHeight = -17
// 2022-01-17T19:00:00Z
const UpgradeOhSnapHeight = 30262
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
)
SetAddressNetwork(address.Testnet)

View File

@ -54,7 +54,7 @@ const UpgradeHyperdriveHeight = 420
const UpgradeChocolateHeight = 312746
const UpgradeSnapDealsHeight = 99999999
const UpgradeOhSnapHeight = 99999999
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))

View File

@ -47,7 +47,7 @@ var UpgradeTurboHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
var UpgradeSnapDealsHeight = abi.ChainEpoch(-18)
var UpgradeOhSnapHeight = abi.ChainEpoch(-18)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -67,7 +67,7 @@ const UpgradeHyperdriveHeight = 892800
// 2021-10-26T13:30:00Z
const UpgradeChocolateHeight = 1231620
var UpgradeSnapDealsHeight = abi.ChainEpoch(999999999999)
var UpgradeOhSnapHeight = abi.ChainEpoch(999999999999)
func init() {
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
@ -75,7 +75,7 @@ func init() {
}
if os.Getenv("LOTUS_DISABLE_SNAPDEALS") == "1" {
UpgradeSnapDealsHeight = math.MaxInt64
UpgradeOhSnapHeight = math.MaxInt64
}
Devnet = false

View File

@ -99,7 +99,7 @@ var (
UpgradeTurboHeight abi.ChainEpoch = -14
UpgradeHyperdriveHeight abi.ChainEpoch = -15
UpgradeChocolateHeight abi.ChainEpoch = -16
UpgradeSnapDealsHeight abi.ChainEpoch = -17
UpgradeOhSnapHeight abi.ChainEpoch = -17
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
@ -107,8 +107,8 @@ var (
GenesisNetworkVersion = network.Version0
NewestNetworkVersion = network.Version14
ActorUpgradeNetworkVersion = network.Version4
NewestNetworkVersion = network.Version15
ActorUpgradeNetworkVersion = network.Version15
Devnet = true
ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")

View File

@ -37,7 +37,7 @@ func BuildTypeString() string {
}
// BuildVersion is the local build version
const BuildVersion = "1.13.3-dev"
const BuildVersion = "1.15.0-dev"
func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {

View File

@ -61,6 +61,7 @@ const (
// These are all just type aliases across actor versions. In the future, that might change
// and we might need to do something fancier.
type SectorInfo = proof7.SectorInfo
type ExtendedSectorInfo = proof7.ExtendedSectorInfo
type PoStProof = proof7.PoStProof
type FilterEstimate = smoothing0.FilterEstimate

View File

@ -45,6 +45,7 @@ const (
// These are all just type aliases across actor versions. In the future, that might change
// and we might need to do something fancier.
type SectorInfo = proof{{.latestVersion}}.SectorInfo
type ExtendedSectorInfo = proof{{.latestVersion}}.ExtendedSectorInfo
type PoStProof = proof{{.latestVersion}}.PoStProof
type FilterEstimate = smoothing0.FilterEstimate

View File

@ -23,6 +23,7 @@ import (
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
{{range .versions}}
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
{{end}}
@ -193,6 +194,7 @@ type SectorPreCommitOnChainInfo struct {
type PoStPartition = miner0.PoStPartition
type RecoveryDeclaration = miner0.RecoveryDeclaration
type FaultDeclaration = miner0.FaultDeclaration
type ReplicaUpdate = miner7.ReplicaUpdate
// Params
type DeclareFaultsParams = miner0.DeclareFaultsParams
@ -201,6 +203,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old

View File

@ -23,6 +23,7 @@ import (
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@ -282,6 +283,7 @@ type SectorPreCommitOnChainInfo struct {
type PoStPartition = miner0.PoStPartition
type RecoveryDeclaration = miner0.RecoveryDeclaration
type FaultDeclaration = miner0.FaultDeclaration
type ReplicaUpdate = miner7.ReplicaUpdate
// Params
type DeclareFaultsParams = miner0.DeclareFaultsParams
@ -290,6 +292,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old

View File

@ -39,7 +39,11 @@ func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount)
func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych{{.v}}.UpdateChannelStateParams{
{{if (ge .v 7)}}
Sv: toV{{.v}}SignedVoucher(*sv),
{{else}}
Sv: *sv,
{{end}}
Secret: secret,
})
if aerr != nil {

View File

@ -39,7 +39,9 @@ func (m message0) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
func (m message0) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych0.UpdateChannelStateParams{
Sv: *sv,
Sv: *sv,
Secret: secret,
})
if aerr != nil {

View File

@ -39,7 +39,9 @@ func (m message2) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
func (m message2) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych2.UpdateChannelStateParams{
Sv: *sv,
Sv: *sv,
Secret: secret,
})
if aerr != nil {

View File

@ -39,7 +39,9 @@ func (m message3) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
func (m message3) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych3.UpdateChannelStateParams{
Sv: *sv,
Sv: *sv,
Secret: secret,
})
if aerr != nil {

View File

@ -39,7 +39,9 @@ func (m message4) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
func (m message4) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych4.UpdateChannelStateParams{
Sv: *sv,
Sv: *sv,
Secret: secret,
})
if aerr != nil {

View File

@ -39,7 +39,9 @@ func (m message5) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
func (m message5) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych5.UpdateChannelStateParams{
Sv: *sv,
Sv: *sv,
Secret: secret,
})
if aerr != nil {

View File

@ -39,7 +39,9 @@ func (m message6) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
func (m message6) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych6.UpdateChannelStateParams{
Sv: *sv,
Sv: *sv,
Secret: secret,
})
if aerr != nil {

View File

@ -39,7 +39,9 @@ func (m message7) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
func (m message7) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych7.UpdateChannelStateParams{
Sv: *sv,
Sv: toV7SignedVoucher(*sv),
Secret: secret,
})
if aerr != nil {

View File

@ -112,3 +112,21 @@ func (ls *laneState{{.v}}) Redeemed() (big.Int, error) {
func (ls *laneState{{.v}}) Nonce() (uint64, error) {
return ls.LaneState.Nonce, nil
}
{{if (ge .v 7)}}
func toV{{.v}}SignedVoucher(sv SignedVoucher) paych{{.v}}.SignedVoucher {
return paych{{.v}}.SignedVoucher{
ChannelAddr: sv.ChannelAddr,
TimeLockMin: sv.TimeLockMin,
TimeLockMax: sv.TimeLockMax,
SecretHash: sv.SecretPreimage,
Extra: sv.Extra,
Lane: sv.Lane,
Nonce: sv.Nonce,
Amount: sv.Amount,
MinSettleHeight: sv.MinSettleHeight,
Merges: sv.Merges,
Signature: sv.Signature,
}
}
{{end}}

View File

@ -112,3 +112,19 @@ func (ls *laneState7) Redeemed() (big.Int, error) {
func (ls *laneState7) Nonce() (uint64, error) {
return ls.LaneState.Nonce, nil
}
func toV7SignedVoucher(sv SignedVoucher) paych7.SignedVoucher {
return paych7.SignedVoucher{
ChannelAddr: sv.ChannelAddr,
TimeLockMin: sv.TimeLockMin,
TimeLockMax: sv.TimeLockMax,
SecretHash: sv.SecretPreimage,
Extra: sv.Extra,
Lane: sv.Lane,
Nonce: sv.Nonce,
Amount: sv.Amount,
MinSettleHeight: sv.MinSettleHeight,
Merges: sv.Merges,
Signature: sv.Signature,
}
}

View File

@ -26,7 +26,7 @@ import (
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
@ -400,12 +400,21 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
}
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand)
xsectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand)
if err != nil {
return xerrors.Errorf("getting winning post sector set: %w", err)
}
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
sectors := make([]proof.SectorInfo, len(xsectors))
for i, xsi := range xsectors {
sectors[i] = proof.SectorInfo{
SealProof: xsi.SealProof,
SectorNumber: xsi.SectorNumber,
SealedCID: xsi.SealedCID,
}
}
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{
Randomness: rand,
Proofs: h.WinPoStProof,
ChallengedSectors: sectors,

View File

@ -5,6 +5,8 @@ import (
"runtime"
"time"
"github.com/docker/go-units"
"github.com/filecoin-project/specs-actors/v6/actors/migration/nv14"
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
@ -158,7 +160,7 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
}},
Expensive: true,
}, {
Height: build.UpgradeSnapDealsHeight,
Height: build.UpgradeOhSnapHeight,
Network: network.Version15,
Migration: UpgradeActorsV7,
PreMigrations: []stmgr.PreMigration{{
@ -1245,8 +1247,15 @@ func PreUpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr
workerCount /= 2
}
config := nv15.Config{MaxWorkers: uint(workerCount)}
_, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config)
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
if err != nil {
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
}
config := nv15.Config{MaxWorkers: uint(workerCount),
ProgressLogPeriod: time.Minute * 5}
_, err = upgradeActorsV7Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
return err
}
@ -1255,9 +1264,10 @@ func upgradeActorsV7Common(
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv15.Config,
) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB)
// TODO: pretty sure we'd achieve nothing by doing this, confirm in review
//buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), writeStore)
store := store.ActorStore(ctx, writeStore)
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
@ -1287,15 +1297,13 @@ func upgradeActorsV7Common(
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persist the new tree.
// Persists the new tree and shuts down the flush worker
if err := writeStore.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
}
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
if err := writeStore.Shutdown(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
}
return newRoot, nil

View File

@ -461,7 +461,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet,
if et != nil {
// TODO: maybe think about passing in more real parameters to this?
wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil)
wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil, round, network.Version0)
if err != nil {
return nil, err
}
@ -620,7 +620,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr
type WinningPoStProver interface {
GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error)
ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error)
ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error)
}
type wppProvider struct{}
@ -629,7 +629,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
return []uint64{0}, nil
}
func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) {
func (wpp *wppProvider) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error) {
return ValidWpostForTesting, nil
}
@ -692,11 +692,11 @@ func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (b
panic("not supported")
}
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) {
panic("not supported")
}
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) (bool, error) {
panic("not supported")
}

View File

@ -117,7 +117,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres
return mas.GetSector(sid)
}
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.ExtendedSectorInfo, error) {
act, err := sm.LoadActorRaw(ctx, maddr, st)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
@ -203,12 +203,13 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra
return nil, xerrors.Errorf("loading proving sectors: %w", err)
}
out := make([]builtin.SectorInfo, len(sectors))
out := make([]builtin.ExtendedSectorInfo, len(sectors))
for i, sinfo := range sectors {
out[i] = builtin.SectorInfo{
out[i] = builtin.ExtendedSectorInfo{
SealProof: sinfo.SealProof,
SectorNumber: sinfo.SectorNumber,
SealedCID: sinfo.SealedCID,
SectorKey: sinfo.SectorKeyCID,
}
}

View File

@ -8,6 +8,8 @@ import (
"sync"
"time"
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@ -15,8 +17,6 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
@ -211,7 +211,7 @@ func (sm *StateManager) hasExpensiveFork(height abi.ChainEpoch) bool {
return ok
}
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv10.MemMigrationCache, ts *types.TipSet) {
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv15.MemMigrationCache, ts *types.TipSet) {
height := ts.Height()
parent := ts.ParentState()

View File

@ -4,6 +4,8 @@ import (
"context"
"sync"
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
"github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/beacon"
@ -18,10 +20,6 @@ import (
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
// Used for genesis.
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
@ -30,6 +28,9 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
// Used for genesis.
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
)
const LookbackNoLimit = api.LookbackNoLimit
@ -53,7 +54,7 @@ type versionSpec struct {
type migration struct {
upgrade MigrationFunc
preMigrations []PreMigration
cache *nv10.MemMigrationCache
cache *nv15.MemMigrationCache
}
type Executor interface {
@ -121,7 +122,7 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
migration := &migration{
upgrade: upgrade.Migration,
preMigrations: upgrade.PreMigrations,
cache: nv10.NewMemMigrationCache(),
cache: nv15.NewMemMigrationCache(),
}
stateMigrations[upgrade.Height] = migration
}

View File

@ -22,6 +22,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
@ -543,7 +544,7 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64
return []uint64{1}, nil
}
func (wpp badWpp) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) {
func (wpp badWpp) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof2.PoStProof, error) {
return []proof2.PoStProof{
{
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,

View File

@ -245,8 +245,8 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre
return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker)
}
func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error {
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof)
func (ss *syscallShim) VerifyPoSt(info proof5.WindowPoStVerifyInfo) error {
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), info)
if err != nil {
return err
}

View File

@ -86,9 +86,10 @@ func (cv *cachingVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) {
}, &svi)
}
func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) {
func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) {
return cv.backend.VerifyWinningPoSt(ctx, info)
}
func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) {
return cv.withCache(func() (bool, error) {
return cv.backend.VerifyWindowPoSt(ctx, info)

View File

@ -12,6 +12,8 @@ import (
"time"
saproof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
saproof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/docker/go-units"
logging "github.com/ipfs/go-log/v2"
@ -260,7 +262,8 @@ var sealBenchCmd = &cli.Command{
sectorNumber := c.Int("num-sectors")
var sealTimings []SealingResult
var sealedSectors []saproof2.SectorInfo
var extendedSealedSectors []saproof7.ExtendedSectorInfo
var sealedSectors []saproof7.SectorInfo
if robench == "" {
var err error
@ -269,7 +272,7 @@ var sealBenchCmd = &cli.Command{
PreCommit2: 1,
Commit: 1,
}
sealTimings, sealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal"))
sealTimings, extendedSealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal"))
if err != nil {
return xerrors.Errorf("failed to run seals: %w", err)
}
@ -296,7 +299,13 @@ var sealBenchCmd = &cli.Command{
}
for _, s := range genm.Sectors {
sealedSectors = append(sealedSectors, saproof2.SectorInfo{
extendedSealedSectors = append(extendedSealedSectors, saproof7.ExtendedSectorInfo{
SealedCID: s.CommR,
SectorNumber: s.SectorID,
SealProof: s.ProofType,
SectorKey: nil,
})
sealedSectors = append(sealedSectors, proof.SectorInfo{
SealedCID: s.CommR,
SectorNumber: s.SectorID,
SealProof: s.ProofType,
@ -325,20 +334,20 @@ var sealBenchCmd = &cli.Command{
return err
}
fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(sealedSectors)))
fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(extendedSealedSectors)))
if err != nil {
return err
}
candidates := make([]saproof2.SectorInfo, len(fcandidates))
xcandidates := make([]saproof7.ExtendedSectorInfo, len(fcandidates))
for i, fcandidate := range fcandidates {
candidates[i] = sealedSectors[fcandidate]
xcandidates[i] = extendedSealedSectors[fcandidate]
}
gencandidates := time.Now()
log.Info("computing winning post snark (cold)")
proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:])
proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:])
if err != nil {
return err
}
@ -346,14 +355,23 @@ var sealBenchCmd = &cli.Command{
winningpost1 := time.Now()
log.Info("computing winning post snark (hot)")
proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:])
proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:])
if err != nil {
return err
}
candidates := make([]saproof7.SectorInfo, len(xcandidates))
for i, xsi := range xcandidates {
candidates[i] = saproof7.SectorInfo{
SealedCID: xsi.SealedCID,
SectorNumber: xsi.SectorNumber,
SealProof: xsi.SealProof,
}
}
winnningpost2 := time.Now()
pvi1 := saproof2.WinningPoStVerifyInfo{
pvi1 := saproof7.WinningPoStVerifyInfo{
Randomness: abi.PoStRandomness(challenge[:]),
Proofs: proof1,
ChallengedSectors: candidates,
@ -369,7 +387,7 @@ var sealBenchCmd = &cli.Command{
verifyWinningPost1 := time.Now()
pvi2 := saproof2.WinningPoStVerifyInfo{
pvi2 := saproof7.WinningPoStVerifyInfo{
Randomness: abi.PoStRandomness(challenge[:]),
Proofs: proof2,
ChallengedSectors: candidates,
@ -386,7 +404,7 @@ var sealBenchCmd = &cli.Command{
verifyWinningPost2 := time.Now()
log.Info("computing window post snark (cold)")
wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:])
wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:])
if err != nil {
return err
}
@ -394,7 +412,7 @@ var sealBenchCmd = &cli.Command{
windowpost1 := time.Now()
log.Info("computing window post snark (hot)")
wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:])
wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:])
if err != nil {
return err
}
@ -502,10 +520,10 @@ type ParCfg struct {
Commit int
}
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof2.SectorInfo, error) {
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof7.ExtendedSectorInfo, error) {
var pieces []abi.PieceInfo
sealTimings := make([]SealingResult, numSectors)
sealedSectors := make([]saproof2.SectorInfo, numSectors)
sealedSectors := make([]saproof7.ExtendedSectorInfo, numSectors)
preCommit2Sema := make(chan struct{}, par.PreCommit2)
commitSema := make(chan struct{}, par.Commit)
@ -579,10 +597,11 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
precommit2 := time.Now()
<-preCommit2Sema
sealedSectors[i] = saproof2.SectorInfo{
sealedSectors[i] = saproof7.ExtendedSectorInfo{
SealProof: sid.ProofType,
SectorNumber: i,
SealedCID: cids.Sealed,
SectorKey: nil,
}
seed := lapi.SealSeed{

View File

@ -470,6 +470,8 @@ var stateList = []stateMeta{
{col: color.FgBlue, state: sealing.Empty},
{col: color.FgBlue, state: sealing.WaitDeals},
{col: color.FgBlue, state: sealing.AddPiece},
{col: color.FgBlue, state: sealing.SnapDealsWaitDeals},
{col: color.FgBlue, state: sealing.SnapDealsAddPiece},
{col: color.FgRed, state: sealing.UndefinedSectorState},
{col: color.FgYellow, state: sealing.Packing},
@ -488,6 +490,12 @@ var stateList = []stateMeta{
{col: color.FgYellow, state: sealing.SubmitCommitAggregate},
{col: color.FgYellow, state: sealing.CommitAggregateWait},
{col: color.FgYellow, state: sealing.FinalizeSector},
{col: color.FgYellow, state: sealing.SnapDealsPacking},
{col: color.FgYellow, state: sealing.UpdateReplica},
{col: color.FgYellow, state: sealing.ProveReplicaUpdate},
{col: color.FgYellow, state: sealing.SubmitReplicaUpdate},
{col: color.FgYellow, state: sealing.ReplicaUpdateWait},
{col: color.FgYellow, state: sealing.FinalizeReplicaUpdate},
{col: color.FgCyan, state: sealing.Terminating},
{col: color.FgCyan, state: sealing.TerminateWait},
@ -495,6 +503,7 @@ var stateList = []stateMeta{
{col: color.FgCyan, state: sealing.TerminateFailed},
{col: color.FgCyan, state: sealing.Removing},
{col: color.FgCyan, state: sealing.Removed},
{col: color.FgCyan, state: sealing.AbortUpgrade},
{col: color.FgRed, state: sealing.FailedUnrecoverable},
{col: color.FgRed, state: sealing.AddPieceFailed},
@ -512,6 +521,9 @@ var stateList = []stateMeta{
{col: color.FgRed, state: sealing.RemoveFailed},
{col: color.FgRed, state: sealing.DealsExpired},
{col: color.FgRed, state: sealing.RecoverDealIDs},
{col: color.FgRed, state: sealing.SnapDealsAddPieceFailed},
{col: color.FgRed, state: sealing.SnapDealsDealsExpired},
{col: color.FgRed, state: sealing.ReplicaUpdateFailed},
}
func init() {

View File

@ -11,6 +11,9 @@ import (
"strings"
"time"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/docker/go-units"
"github.com/fatih/color"
cbor "github.com/ipfs/go-ipld-cbor"
@ -20,6 +23,7 @@ import (
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/api"
@ -50,11 +54,13 @@ var sectorsCmd = &cli.Command{
sectorsExtendCmd,
sectorsTerminateCmd,
sectorsRemoveCmd,
sectorsSnapUpCmd,
sectorsMarkForUpgradeCmd,
sectorsStartSealCmd,
sectorsSealDelayCmd,
sectorsCapacityCollateralCmd,
sectorsBatching,
sectorsRefreshPieceMatchingCmd,
},
}
@ -1476,6 +1482,44 @@ var sectorsRemoveCmd = &cli.Command{
},
}
var sectorsSnapUpCmd = &cli.Command{
Name: "snap-up",
Usage: "Mark a committed capacity sector to be filled with deals",
ArgsUsage: "<sectorNum>",
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 1 {
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, nCloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer nCloser()
ctx := lcli.ReqContext(cctx)
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to get network version: %w", err)
}
if nv < network.Version15 {
return xerrors.Errorf("snap deals upgrades enabled in network v15")
}
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector number: %w", err)
}
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), true)
},
}
var sectorsMarkForUpgradeCmd = &cli.Command{
Name: "mark-for-upgrade",
Usage: "Mark a committed capacity sector for replacement by a sector with deals",
@ -1490,14 +1534,40 @@ var sectorsMarkForUpgradeCmd = &cli.Command{
return err
}
defer closer()
api, nCloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer nCloser()
ctx := lcli.ReqContext(cctx)
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to get network version: %w", err)
}
if nv >= network.Version15 {
return xerrors.Errorf("classic cc upgrades disabled v15 and beyond, use `snap-up`")
}
// disable mark for upgrade two days before the ntwk v15 upgrade
// TODO: remove the following block in v1.15.1
head, err := api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("failed to get chain head: %w", err)
}
twoDays := abi.ChainEpoch(2 * builtin.EpochsInDay)
if head.Height() > (build.UpgradeOhSnapHeight - twoDays) {
return xerrors.Errorf("OhSnap is coming soon, " +
"please use `snap-up` to upgrade your cc sectors after the network v15 upgrade!")
}
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector number: %w", err)
}
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id))
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), false)
},
}
@ -2000,6 +2070,25 @@ var sectorsBatchingPendingPreCommit = &cli.Command{
},
}
var sectorsRefreshPieceMatchingCmd = &cli.Command{
Name: "match-pending-pieces",
Usage: "force a refreshed match of pending pieces to open sectors without manually waiting for more deals",
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if err := nodeApi.SectorMatchPendingPiecesToOpenSectors(ctx); err != nil {
return err
}
return nil
},
}
func yesno(b bool) string {
if b {
return color.GreenString("YES")

View File

@ -160,6 +160,16 @@ var runCmd = &cli.Command{
Usage: "enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap)",
Value: true,
},
&cli.BoolFlag{
Name: "replica-update",
Usage: "enable replica update",
Value: true,
},
&cli.BoolFlag{
Name: "prove-replica-update2",
Usage: "enable prove replica update 2",
Value: true,
},
&cli.BoolFlag{
Name: "windowpost",
Usage: "enable window post",
@ -270,7 +280,7 @@ var runCmd = &cli.Command{
}
if !exclusiveSet {
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize)
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize)
}
if cctx.Bool("addpiece") {
@ -288,6 +298,12 @@ var runCmd = &cli.Command{
if cctx.Bool("commit") {
taskTypes = append(taskTypes, sealtasks.TTCommit2)
}
if cctx.Bool("replicaupdate") {
taskTypes = append(taskTypes, sealtasks.TTReplicaUpdate)
}
if cctx.Bool("prove-replica-update2") {
taskTypes = append(taskTypes, sealtasks.TTProveReplicaUpdate2)
}
if len(taskTypes) == 0 {
return xerrors.Errorf("no task types specified")

View File

@ -67,6 +67,7 @@ func main() {
balancerCmd,
sendCsvCmd,
terminationsCmd,
migrationsCmd,
}
app := &cli.App{

View File

@ -0,0 +1,127 @@
package main
import (
"context"
"fmt"
"io"
"time"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/node/repo"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
)
var migrationsCmd = &cli.Command{
Name: "migrate-nv15",
Description: "Run the specified migration",
ArgsUsage: "[block to look back from]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
Value: "~/.lotus",
},
},
Action: func(cctx *cli.Context) error {
ctx := context.TODO()
if cctx.NArg() != 1 {
return fmt.Errorf("must pass block cid")
}
blkCid, err := cid.Decode(cctx.Args().First())
if err != nil {
return fmt.Errorf("failed to parse input: %w", err)
}
fsrepo, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return err
}
lkrepo, err := fsrepo.Lock(repo.FullNode)
if err != nil {
return err
}
defer lkrepo.Close() //nolint:errcheck
bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
defer func() {
if c, ok := bs.(io.Closer); ok {
if err := c.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}
}()
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil)
if err != nil {
return err
}
cache := nv15.NewMemMigrationCache()
blk, err := cs.GetBlock(ctx, blkCid)
if err != nil {
return err
}
migrationTs, err := cs.LoadTipSet(ctx, types.NewTipSetKey(blk.Parents...))
if err != nil {
return err
}
ts1, err := cs.GetTipsetByHeight(ctx, blk.Height-240, migrationTs, false)
if err != nil {
return err
}
startTime := time.Now()
err = filcns.PreUpgradeActorsV7(ctx, sm, cache, ts1.ParentState(), ts1.Height()-1, ts1)
if err != nil {
return err
}
fmt.Println("completed round 1, took ", time.Since(startTime))
startTime = time.Now()
newCid1, err := filcns.UpgradeActorsV7(ctx, sm, cache, nil, blk.ParentStateRoot, blk.Height-1, migrationTs)
if err != nil {
return err
}
fmt.Println("completed round actual (with cache), took ", time.Since(startTime))
fmt.Println("new cid", newCid1)
newCid2, err := filcns.UpgradeActorsV7(ctx, sm, nv15.NewMemMigrationCache(), nil, blk.ParentStateRoot, blk.Height-1, migrationTs)
if err != nil {
return err
}
fmt.Println("completed round actual (without cache), took ", time.Since(startTime))
fmt.Println("new cid", newCid2)
return nil
},
}

View File

@ -78,7 +78,7 @@ func (mockVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool,
return false, nil
}
func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) {
panic("should not be called")
}
func (mockVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {

View File

@ -119,6 +119,7 @@
* [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration)
* [SectorGetSealDelay](#SectorGetSealDelay)
* [SectorMarkForUpgrade](#SectorMarkForUpgrade)
* [SectorMatchPendingPiecesToOpenSectors](#SectorMatchPendingPiecesToOpenSectors)
* [SectorPreCommitFlush](#SectorPreCommitFlush)
* [SectorPreCommitPending](#SectorPreCommitPending)
* [SectorRemove](#SectorRemove)
@ -215,7 +216,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131584,
"BlockDelay": 42
}
```
@ -358,12 +359,15 @@ Inputs:
{
"SealProof": 8,
"SectorNumber": 9,
"SectorKey": null,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
}
],
"Bw=="
"Bw==",
10101,
15
]
```
@ -2474,12 +2478,22 @@ Perms: admin
Inputs:
```json
[
9
9,
true
]
```
Response: `{}`
### SectorMatchPendingPiecesToOpenSectors
Perms: admin
Inputs: `null`
Response: `{}`
### SectorPreCommitFlush
SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
Returns null if message wasn't sent

View File

@ -743,7 +743,7 @@ Perms: admin
Inputs: `null`
Response: `131328`
Response: `131584`
## Add

View File

@ -283,7 +283,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131584,
"BlockDelay": 42
}
```
@ -2579,6 +2579,7 @@ Response:
{
"SealProof": 8,
"SectorNumber": 9,
"SectorKey": null,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}

View File

@ -289,7 +289,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131584,
"BlockDelay": 42
}
```
@ -2591,6 +2591,7 @@ Response:
{
"SealProof": 8,
"SectorNumber": 9,
"SectorKey": null,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}

View File

@ -7,7 +7,7 @@ USAGE:
lotus-miner [global options] command [command options] [arguments...]
VERSION:
1.13.3-dev
1.15.0-dev
COMMANDS:
init Initialize a lotus miner repo
@ -1514,23 +1514,25 @@ USAGE:
lotus-miner sectors command [command options] [arguments...]
COMMANDS:
status Get the seal status of a sector by its number
list List sectors
refs List References to sectors
update-state ADVANCED: manually update the state of a sector, this may aid in error recovery
pledge store random data in a sector
check-expire Inspect expiring sectors
expired Get or cleanup expired sectors
renew Renew expiring sectors while not exceeding each sector's max life
extend Extend sector expiration
terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)
remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))
mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals
seal Manually start sealing a sector (filling any unused space with junk)
set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts
get-cc-collateral Get the collateral required to pledge a committed capacity sector
batching manage batch sector operations
help, h Shows a list of commands or help for one command
status Get the seal status of a sector by its number
list List sectors
refs List References to sectors
update-state ADVANCED: manually update the state of a sector, this may aid in error recovery
pledge store random data in a sector
check-expire Inspect expiring sectors
expired Get or cleanup expired sectors
renew Renew expiring sectors while not exceeding each sector's max life
extend Extend sector expiration
terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)
remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))
snap-up Mark a committed capacity sector to be filled with deals
mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals
seal Manually start sealing a sector (filling any unused space with junk)
set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts
get-cc-collateral Get the collateral required to pledge a committed capacity sector
batching manage batch sector operations
match-pending-pieces force a refreshed match of pending pieces to open sectors without manually waiting for more deals
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help (default: false)
@ -1746,6 +1748,19 @@ OPTIONS:
```
### lotus-miner sectors snap-up
```
NAME:
lotus-miner sectors snap-up - Mark a committed capacity sector to be filled with deals
USAGE:
lotus-miner sectors snap-up [command options] <sectorNum>
OPTIONS:
--help, -h show help (default: false)
```
### lotus-miner sectors mark-for-upgrade
```
NAME:
@ -1846,6 +1861,19 @@ OPTIONS:
```
### lotus-miner sectors match-pending-pieces
```
NAME:
lotus-miner sectors match-pending-pieces - force a refreshed match of pending pieces to open sectors without manually waiting for more deals
USAGE:
lotus-miner sectors match-pending-pieces [command options] [arguments...]
OPTIONS:
--help, -h show help (default: false)
```
## lotus-miner proving
```
NAME:

View File

@ -7,7 +7,7 @@ USAGE:
lotus-worker [global options] command [command options] [arguments...]
VERSION:
1.13.3-dev
1.15.0-dev
COMMANDS:
run Start lotus worker
@ -44,8 +44,8 @@ OPTIONS:
--unseal enable unsealing (32G sectors: 1 core, 128GiB Memory) (default: true)
--precommit2 enable precommit2 (32G sectors: all cores, 96GiB Memory) (default: true)
--commit enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap) (default: true)
--windowpost enable window post (default: false)
--winningpost enable winning post (default: false)
--replica-update enable replica update (default: true)
--prove-replica-update2 enable prove replica update 2 (default: true)
--parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5)
--timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m")
--help, -h show help (default: false)

View File

@ -7,7 +7,7 @@ USAGE:
lotus [global options] command [command options] [arguments...]
VERSION:
1.13.3-dev
1.15.0-dev
COMMANDS:
daemon Start a lotus daemon process

View File

@ -424,6 +424,12 @@
# env var: LOTUS_STORAGE_ALLOWUNSEAL
#AllowUnseal = true
# env var: LOTUS_STORAGE_ALLOWREPLICAUPDATE
#AllowReplicaUpdate = true
# env var: LOTUS_STORAGE_ALLOWPROVEREPLICAUPDATE2
#AllowProveReplicaUpdate2 = true
# env var: LOTUS_STORAGE_RESOURCEFILTERING
#ResourceFiltering = "hardware"
@ -544,6 +550,14 @@
# env var: LOTUS_DAGSTORE_MAXCONCURRENTREADYFETCHES
#MaxConcurrentReadyFetches = 0
# The maximum amount of unseals that can be processed simultaneously
# from the storage subsystem. 0 means unlimited.
# Default value: 0 (unlimited).
#
# type: int
# env var: LOTUS_DAGSTORE_MAXCONCURRENTUNSEALS
#MaxConcurrentUnseals = 5
# The maximum number of simultaneous inflight API calls to the storage
# subsystem.
# Default value: 100.

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit 8e377f906ae40239d71979cd2dd20f3c89952e3c
Subproject commit e660df5616e397b2d8ac316f45ddfa7a44637971

View File

@ -715,7 +715,6 @@ func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
if err != nil {
return empty, xerrors.Errorf("failed to update replica %d with new deal data: %w", sector.ID.Number, err)
}
return storage.ReplicaUpdateOut{NewSealed: sealed, NewUnsealed: unsealed}, nil
}
@ -855,6 +854,14 @@ func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef,
return xerrors.Errorf("not supported at this layer")
}
func (sb *Sealer) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error {
return xerrors.Errorf("not supported at this layer")
}
func (sb *Sealer) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error {
return xerrors.Errorf("not supported at this layer")
}
func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error {
return xerrors.Errorf("not supported at this layer") // happens in localworker
}

View File

@ -19,6 +19,7 @@ import (
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/ipfs/go-cid"
@ -180,16 +181,16 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage
func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7}
sis := make([]proof2.SectorInfo, len(seals))
xsis := make([]proof7.ExtendedSectorInfo, len(seals))
for i, s := range seals {
sis[i] = proof2.SectorInfo{
xsis[i] = proof7.ExtendedSectorInfo{
SealProof: s.ref.ProofType,
SectorNumber: s.ref.ID.Number,
SealedCID: s.cids.Sealed,
}
}
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, sis, randomness)
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, xsis, randomness)
if len(skipped) > 0 {
require.Error(t, err)
require.EqualValues(t, skipped, skp)
@ -200,7 +201,16 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
t.Fatalf("%+v", err)
}
ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof2.WindowPoStVerifyInfo{
sis := make([]proof7.SectorInfo, len(seals))
for i, xsi := range xsis {
sis[i] = proof7.SectorInfo{
SealProof: xsi.SealProof,
SectorNumber: xsi.SectorNumber,
SealedCID: xsi.SealedCID,
}
}
ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof7.WindowPoStVerifyInfo{
Randomness: randomness,
Proofs: proofs,
ChallengedSectors: sis,

View File

@ -4,9 +4,7 @@ import (
"context"
"io"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/ipfs/go-cid"
@ -36,11 +34,11 @@ type Storage interface {
}
type Verifier interface {
VerifySeal(proof5.SealVerifyInfo) (bool, error)
VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error)
VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error)
VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error)
VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error)
VerifySeal(proof.SealVerifyInfo) (bool, error)
VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error)
VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error)
VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error)
VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error)
GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error)
}
@ -49,7 +47,7 @@ type Verifier interface {
type Prover interface {
// TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here
AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error)
AggregateSealProofs(aggregateInfo proof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error)
}
type SectorProvider interface {

View File

@ -11,16 +11,16 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-state-types/abi"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
ffiproof "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) {
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS?
privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS?
if err != nil {
return nil, err
}
@ -32,12 +32,13 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID,
return ffi.GenerateWinningPoSt(minerID, privsectors, randomness)
}
func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) {
func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof)
privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof)
if err != nil {
return nil, nil, xerrors.Errorf("gathering sector info: %w", err)
}
defer done()
if len(skipped) > 0 {
@ -53,11 +54,10 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s
Number: f,
})
}
return proof, faultyIDs, err
}
func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof5.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) {
func (sb *Sealer) pubExtendedSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) {
fmap := map[abi.SectorNumber]struct{}{}
for _, fault := range faults {
fmap[fault] = struct{}{}
@ -81,14 +81,32 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
ID: abi.SectorID{Miner: mid, Number: s.SectorNumber},
ProofType: s.SealProof,
}
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage)
if err != nil {
log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err)
skipped = append(skipped, sid.ID)
continue
proveUpdate := s.SectorKey != nil
var cache string
var sealed string
if proveUpdate {
log.Debugf("Posting over updated sector for sector id: %d", s.SectorNumber)
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTUpdateCache|storiface.FTUpdate, 0, storiface.PathStorage)
if err != nil {
log.Warnw("failed to acquire FTUpdateCache and FTUpdate of sector, skipping", "sector", sid.ID, "error", err)
skipped = append(skipped, sid.ID)
continue
}
doneFuncs = append(doneFuncs, d)
cache = paths.UpdateCache
sealed = paths.Update
} else {
log.Debugf("Posting over sector key sector for sector id: %d", s.SectorNumber)
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage)
if err != nil {
log.Warnw("failed to acquire FTCache and FTSealed of sector, skipping", "sector", sid.ID, "error", err)
skipped = append(skipped, sid.ID)
continue
}
doneFuncs = append(doneFuncs, d)
cache = paths.Cache
sealed = paths.Sealed
}
doneFuncs = append(doneFuncs, d)
postProofType, err := rpt(s.SealProof)
if err != nil {
@ -96,11 +114,16 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
return ffi.SortedPrivateSectorInfo{}, nil, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err)
}
ffiInfo := ffiproof.SectorInfo{
SealProof: s.SealProof,
SectorNumber: s.SectorNumber,
SealedCID: s.SealedCID,
}
out = append(out, ffi.PrivateSectorInfo{
CacheDirPath: paths.Cache,
CacheDirPath: cache,
PoStProofType: postProofType,
SealedSectorPath: paths.Sealed,
SectorInfo: s,
SealedSectorPath: sealed,
SectorInfo: ffiInfo,
})
}
@ -113,19 +136,19 @@ type proofVerifier struct{}
var ProofVerifier = proofVerifier{}
func (proofVerifier) VerifySeal(info proof5.SealVerifyInfo) (bool, error) {
func (proofVerifier) VerifySeal(info proof.SealVerifyInfo) (bool, error) {
return ffi.VerifySeal(info)
}
func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
func (proofVerifier) VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) {
return ffi.VerifyAggregateSeals(aggregate)
}
func (proofVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
func (proofVerifier) VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) {
return ffi.SectorUpdate.VerifyUpdateProof(update)
}
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWinningPoSt")
defer span.End()
@ -133,7 +156,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningP
return ffi.VerifyWinningPoSt(info)
}
func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWindowPoSt")
defer span.End()

View File

@ -102,11 +102,13 @@ type SealerConfig struct {
ParallelFetchLimit int
// Local worker config
AllowAddPiece bool
AllowPreCommit1 bool
AllowPreCommit2 bool
AllowCommit bool
AllowUnseal bool
AllowAddPiece bool
AllowPreCommit1 bool
AllowPreCommit2 bool
AllowCommit bool
AllowUnseal bool
AllowReplicaUpdate bool
AllowProveReplicaUpdate2 bool
// ResourceFiltering instructs the system which resource filtering strategy
// to use when evaluating tasks against this worker. An empty value defaults
@ -150,7 +152,7 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store
go m.sched.runSched()
localTasks := []sealtasks.TaskType{
sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFetch,
}
if sc.AllowAddPiece {
localTasks = append(localTasks, sealtasks.TTAddPiece)
@ -167,6 +169,12 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store
if sc.AllowUnseal {
localTasks = append(localTasks, sealtasks.TTUnseal)
}
if sc.AllowReplicaUpdate {
localTasks = append(localTasks, sealtasks.TTReplicaUpdate)
}
if sc.AllowProveReplicaUpdate2 {
localTasks = append(localTasks, sealtasks.TTProveReplicaUpdate2)
}
wcfg := WorkerConfig{
IgnoreResourceFiltering: sc.ResourceFiltering == ResourceFilteringDisabled,
@ -615,6 +623,23 @@ func (m *Manager) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef
return m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true, nil)
}
func (m *Manager) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTUpdateCache|storiface.FTUpdate); err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
if err := m.storage.Remove(ctx, sector.ID, storiface.FTUpdateCache, true, nil); err != nil {
return xerrors.Errorf("removing update cache: %w", err)
}
if err := m.storage.Remove(ctx, sector.ID, storiface.FTUpdate, true, nil); err != nil {
return xerrors.Errorf("removing update: %w", err)
}
return nil
}
func (m *Manager) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error {
ctx, cancel := context.WithCancel(ctx)
@ -697,7 +722,7 @@ func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error {
func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.ReplicaUpdateOut, err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
log.Errorf("manager is doing replica update")
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTReplicaUpdate, sector, pieces)
if err != nil {
return storage.ReplicaUpdateOut{}, xerrors.Errorf("getWork: %w", err)
@ -708,7 +733,7 @@ func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
waitRes := func() {
p, werr := m.waitWork(ctx, wk)
if werr != nil {
waitErr = werr
waitErr = xerrors.Errorf("waitWork: %w", werr)
return
}
if p != nil {
@ -728,17 +753,17 @@ func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
log.Errorf("scheduled work for replica update")
err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces))
if err != nil {
return err
return xerrors.Errorf("startWork: %w", err)
}
waitRes()
return nil
})
if err != nil {
return storage.ReplicaUpdateOut{}, err
return storage.ReplicaUpdateOut{}, xerrors.Errorf("Schedule: %w", err)
}
return out, waitErr
}

View File

@ -12,12 +12,12 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/v6/actors/builtin"
"github.com/filecoin-project/specs-actors/v6/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
func (m *Manager) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
func (m *Manager) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
if !m.winningPoStSched.CanSched(ctx) {
log.Info("GenerateWinningPoSt run at lotus-miner")
return m.localProver.GenerateWinningPoSt(ctx, minerID, sectorInfo, randomness)
@ -25,7 +25,7 @@ func (m *Manager) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID,
return m.generateWinningPoSt(ctx, minerID, sectorInfo, randomness)
}
func (m *Manager) generateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
func (m *Manager) generateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
randomness[31] &= 0x3f
sectorNums := make([]abi.SectorNumber, len(sectorInfo))
@ -73,7 +73,7 @@ func (m *Manager) generateWinningPoSt(ctx context.Context, minerID abi.ActorID,
return proofs, nil
}
func (m *Manager) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) {
func (m *Manager) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) {
if !m.windowPoStSched.CanSched(ctx) {
log.Info("GenerateWindowPoSt run at lotus-miner")
return m.localProver.GenerateWindowPoSt(ctx, minerID, sectorInfo, randomness)

View File

@ -11,9 +11,7 @@ import (
"math/rand"
"sync"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/dagstore/mount"
ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper"
@ -40,7 +38,7 @@ type SectorMgr struct {
}
type mockVerifProver struct {
aggregates map[string]proof5.AggregateSealVerifyProofAndInfos // used for logging bad verifies
aggregates map[string]proof.AggregateSealVerifyProofAndInfos // used for logging bad verifies
}
func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr {
@ -337,14 +335,23 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) {
}
}
func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) {
func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, xSectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
mgr.lk.Lock()
defer mgr.lk.Unlock()
sectorInfo := make([]proof.SectorInfo, len(xSectorInfo))
for i, xssi := range xSectorInfo {
sectorInfo[i] = proof.SectorInfo{
SealProof: xssi.SealProof,
SectorNumber: xssi.SectorNumber,
SealedCID: xssi.SealedCID,
}
}
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil
}
func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) {
func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, xSectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
mgr.lk.Lock()
defer mgr.lk.Unlock()
@ -352,22 +359,22 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI
return nil, nil, xerrors.Errorf("failed to post (mock)")
}
si := make([]proof5.SectorInfo, 0, len(sectorInfo))
si := make([]proof.ExtendedSectorInfo, 0, len(xSectorInfo))
var skipped []abi.SectorID
var err error
for _, info := range sectorInfo {
for _, xsi := range xSectorInfo {
sid := abi.SectorID{
Miner: minerID,
Number: info.SectorNumber,
Number: xsi.SectorNumber,
}
_, found := mgr.sectors[sid]
if found && !mgr.sectors[sid].failed && !mgr.sectors[sid].corrupted {
si = append(si, info)
si = append(si, xsi)
} else {
skipped = append(skipped, sid)
err = xerrors.Errorf("skipped some sectors")
@ -378,10 +385,19 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI
return nil, skipped, err
}
return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil
sectorInfo := make([]proof.SectorInfo, len(si))
for i, xssi := range si {
sectorInfo[i] = proof.SectorInfo{
SealProof: xssi.SealProof,
SectorNumber: xssi.SectorNumber,
SealedCID: xssi.SealedCID,
}
}
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil
}
func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) []byte {
func generateFakePoStProof(sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) []byte {
randomness[31] &= 0x3f
hasher := sha256.New()
@ -396,13 +412,13 @@ func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRa
}
func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof5.PoStProof {
func generateFakePoSt(sectorInfo []proof.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof.PoStProof {
wp, err := rpt(sectorInfo[0].SealProof)
if err != nil {
panic(err)
}
return []proof5.PoStProof{
return []proof.PoStProof{
{
PoStProof: wp,
ProofBytes: generateFakePoStProof(sectorInfo, randomness),
@ -474,6 +490,14 @@ func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.Sector
return nil
}
func (mgr *SectorMgr) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error {
return nil
}
func (mgr *SectorMgr) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error {
return nil
}
func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) error {
mgr.lk.Lock()
defer mgr.lk.Unlock()
@ -562,7 +586,7 @@ func (mgr *SectorMgr) ReturnGenerateSectorKeyFromData(ctx context.Context, callI
panic("not supported")
}
func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
func (m mockVerifProver) VerifySeal(svi proof.SealVerifyInfo) (bool, error) {
plen, err := svi.SealProof.ProofSize()
if err != nil {
return false, err
@ -583,7 +607,7 @@ func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
return true, nil
}
func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
func (m mockVerifProver) VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) {
out := make([]byte, m.aggLen(len(aggregate.Infos)))
for pi, svi := range aggregate.Infos {
for i := 0; i < 32; i++ {
@ -609,11 +633,11 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri
return ok, nil
}
func (m mockVerifProver) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
func (m mockVerifProver) VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) {
return true, nil
}
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length
for pi, proof := range proofs {
for i := range proof[:32] {
@ -655,12 +679,12 @@ func (m mockVerifProver) aggLen(nproofs int) int {
}
}
func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
return true, nil
}
func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
if len(info.Proofs) != 1 {
return false, xerrors.Errorf("expected 1 proof entry")
}
@ -683,7 +707,7 @@ func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context,
}
var MockVerifier = mockVerifProver{
aggregates: map[string]proof5.AggregateSealVerifyProofAndInfos{},
aggregates: map[string]proof.AggregateSealVerifyProofAndInfos{},
}
var MockProver = MockVerifier

View File

@ -7,7 +7,7 @@ import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
@ -23,11 +23,11 @@ type testExec struct {
apch chan chan apres
}
func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
panic("implement me")
}
func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) {
func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) {
panic("implement me")
}
@ -67,6 +67,14 @@ func (t *testExec) ReleaseSealed(ctx context.Context, sector storage.SectorRef)
panic("implement me")
}
func (t *testExec) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error {
panic("implement me")
}
func (t *testExec) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error {
panic("implement me")
}
func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error {
panic("implement me")
}

View File

@ -143,7 +143,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{184, 26}); err != nil {
if _, err := w.Write([]byte{184, 32}); err != nil {
return err
}
@ -573,6 +573,137 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
return err
}
// t.CCUpdate (bool) (bool)
if len("CCUpdate") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"CCUpdate\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CCUpdate"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("CCUpdate")); err != nil {
return err
}
if err := cbg.WriteBool(w, t.CCUpdate); err != nil {
return err
}
// t.CCPieces ([]sealing.Piece) (slice)
if len("CCPieces") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"CCPieces\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CCPieces"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("CCPieces")); err != nil {
return err
}
if len(t.CCPieces) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.CCPieces was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.CCPieces))); err != nil {
return err
}
for _, v := range t.CCPieces {
if err := v.MarshalCBOR(w); err != nil {
return err
}
}
// t.UpdateSealed (cid.Cid) (struct)
if len("UpdateSealed") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"UpdateSealed\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UpdateSealed"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("UpdateSealed")); err != nil {
return err
}
if t.UpdateSealed == nil {
if _, err := w.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCidBuf(scratch, w, *t.UpdateSealed); err != nil {
return xerrors.Errorf("failed to write cid field t.UpdateSealed: %w", err)
}
}
// t.UpdateUnsealed (cid.Cid) (struct)
if len("UpdateUnsealed") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"UpdateUnsealed\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UpdateUnsealed"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("UpdateUnsealed")); err != nil {
return err
}
if t.UpdateUnsealed == nil {
if _, err := w.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCidBuf(scratch, w, *t.UpdateUnsealed); err != nil {
return xerrors.Errorf("failed to write cid field t.UpdateUnsealed: %w", err)
}
}
// t.ReplicaUpdateProof (storage.ReplicaUpdateProof) (slice)
if len("ReplicaUpdateProof") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"ReplicaUpdateProof\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReplicaUpdateProof"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("ReplicaUpdateProof")); err != nil {
return err
}
if len(t.ReplicaUpdateProof) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.ReplicaUpdateProof was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ReplicaUpdateProof))); err != nil {
return err
}
if _, err := w.Write(t.ReplicaUpdateProof[:]); err != nil {
return err
}
// t.ReplicaUpdateMessage (cid.Cid) (struct)
if len("ReplicaUpdateMessage") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"ReplicaUpdateMessage\" was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReplicaUpdateMessage"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("ReplicaUpdateMessage")); err != nil {
return err
}
if t.ReplicaUpdateMessage == nil {
if _, err := w.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCidBuf(scratch, w, *t.ReplicaUpdateMessage); err != nil {
return xerrors.Errorf("failed to write cid field t.ReplicaUpdateMessage: %w", err)
}
}
// t.FaultReportMsg (cid.Cid) (struct)
if len("FaultReportMsg") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"FaultReportMsg\" was too long")
@ -1166,6 +1297,145 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error {
}
t.InvalidProofs = uint64(extra)
}
// t.CCUpdate (bool) (bool)
case "CCUpdate":
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajOther {
return fmt.Errorf("booleans must be major type 7")
}
switch extra {
case 20:
t.CCUpdate = false
case 21:
t.CCUpdate = true
default:
return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
}
// t.CCPieces ([]sealing.Piece) (slice)
case "CCPieces":
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if extra > cbg.MaxLength {
return fmt.Errorf("t.CCPieces: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.CCPieces = make([]Piece, extra)
}
for i := 0; i < int(extra); i++ {
var v Piece
if err := v.UnmarshalCBOR(br); err != nil {
return err
}
t.CCPieces[i] = v
}
// t.UpdateSealed (cid.Cid) (struct)
case "UpdateSealed":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(br)
if err != nil {
return xerrors.Errorf("failed to read cid field t.UpdateSealed: %w", err)
}
t.UpdateSealed = &c
}
}
// t.UpdateUnsealed (cid.Cid) (struct)
case "UpdateUnsealed":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(br)
if err != nil {
return xerrors.Errorf("failed to read cid field t.UpdateUnsealed: %w", err)
}
t.UpdateUnsealed = &c
}
}
// t.ReplicaUpdateProof (storage.ReplicaUpdateProof) (slice)
case "ReplicaUpdateProof":
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.ReplicaUpdateProof: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.ReplicaUpdateProof = make([]uint8, extra)
}
if _, err := io.ReadFull(br, t.ReplicaUpdateProof[:]); err != nil {
return err
}
// t.ReplicaUpdateMessage (cid.Cid) (struct)
case "ReplicaUpdateMessage":
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(br)
if err != nil {
return xerrors.Errorf("failed to read cid field t.ReplicaUpdateMessage: %w", err)
}
t.ReplicaUpdateMessage = &c
}
}
// t.FaultReportMsg (cid.Cid) (struct)
case "FaultReportMsg":

View File

@ -35,6 +35,9 @@ type ErrInvalidProof struct{ error }
type ErrNoPrecommit struct{ error }
type ErrCommitWaitFailed struct{ error }
type ErrBadRU struct{ error }
type ErrBadPR struct{ error }
func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error {
tok, height, err := api.ChainHead(ctx)
if err != nil {
@ -187,3 +190,32 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
return nil
}
// check that sector info is good after running a replica update
func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, api SealingAPI) error {
if err := checkPieces(ctx, maddr, si, api); err != nil {
return err
}
if !si.CCUpdate {
return xerrors.Errorf("replica update on sector not marked for update")
}
commD, err := api.StateComputeDataCommitment(ctx, maddr, si.SectorType, si.dealIDs(), tok)
if err != nil {
return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)}
}
if si.UpdateUnsealed == nil || !commD.Equals(*si.UpdateUnsealed) {
return &ErrBadRU{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)}
}
if si.UpdateSealed == nil {
return &ErrBadRU{xerrors.Errorf("nil sealed cid")}
}
if si.ReplicaUpdateProof == nil {
return ErrBadPR{xerrors.Errorf("nil PR2 proof")}
}
return nil
}

View File

@ -133,6 +133,44 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorFinalizeFailed{}, FinalizeFailed),
),
// Snap deals
SnapDealsWaitDeals: planOne(
on(SectorAddPiece{}, SnapDealsAddPiece),
on(SectorStartPacking{}, SnapDealsPacking),
),
SnapDealsAddPiece: planOne(
on(SectorPieceAdded{}, SnapDealsWaitDeals),
apply(SectorStartPacking{}),
apply(SectorAddPiece{}),
on(SectorAddPieceFailed{}, SnapDealsAddPieceFailed),
),
SnapDealsPacking: planOne(
on(SectorPacked{}, UpdateReplica),
),
UpdateReplica: planOne(
on(SectorReplicaUpdate{}, ProveReplicaUpdate),
on(SectorUpdateReplicaFailed{}, ReplicaUpdateFailed),
on(SectorDealsExpired{}, SnapDealsDealsExpired),
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
),
ProveReplicaUpdate: planOne(
on(SectorProveReplicaUpdate{}, SubmitReplicaUpdate),
on(SectorProveReplicaUpdateFailed{}, ReplicaUpdateFailed),
on(SectorDealsExpired{}, SnapDealsDealsExpired),
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
),
SubmitReplicaUpdate: planOne(
on(SectorReplicaUpdateSubmitted{}, ReplicaUpdateWait),
on(SectorSubmitReplicaUpdateFailed{}, ReplicaUpdateFailed),
),
ReplicaUpdateWait: planOne(
on(SectorReplicaUpdateLanded{}, FinalizeReplicaUpdate),
on(SectorSubmitReplicaUpdateFailed{}, ReplicaUpdateFailed),
on(SectorAbortUpgrade{}, AbortUpgrade),
),
FinalizeReplicaUpdate: planOne(
on(SectorFinalized{}, Proving),
),
// Sealing errors
AddPieceFailed: planOne(
@ -188,11 +226,37 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
onReturning(SectorUpdateDealIDs{}),
),
// Snap Deals Errors
SnapDealsAddPieceFailed: planOne(
on(SectorRetryWaitDeals{}, SnapDealsWaitDeals),
apply(SectorStartPacking{}),
apply(SectorAddPiece{}),
),
SnapDealsDealsExpired: planOne(
on(SectorAbortUpgrade{}, AbortUpgrade),
),
SnapDealsRecoverDealIDs: planOne(
on(SectorUpdateDealIDs{}, SubmitReplicaUpdate),
on(SectorAbortUpgrade{}, AbortUpgrade),
),
AbortUpgrade: planOneOrIgnore(
on(SectorRevertUpgradeToProving{}, Proving),
),
ReplicaUpdateFailed: planOne(
on(SectorRetrySubmitReplicaUpdateWait{}, ReplicaUpdateWait),
on(SectorRetrySubmitReplicaUpdate{}, SubmitReplicaUpdate),
on(SectorRetryReplicaUpdate{}, UpdateReplica),
on(SectorRetryProveReplicaUpdate{}, ProveReplicaUpdate),
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
on(SectorDealsExpired{}, SnapDealsDealsExpired),
),
// Post-seal
Proving: planOne(
on(SectorFaultReported{}, FaultReported),
on(SectorFaulty{}, Faulty),
on(SectorStartCCUpdate{}, SnapDealsWaitDeals),
),
Terminating: planOne(
on(SectorTerminating{}, TerminateWait),
@ -209,7 +273,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
TerminateFailed: planOne(
// SectorTerminating (global)
),
Removing: planOne(
Removing: planOneOrIgnore(
on(SectorRemoved{}, Removed),
on(SectorRemoveFailed{}, RemoveFailed),
),
@ -355,13 +419,6 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
log.Errorw("update sector stats", "error", err)
}
// todo: drop this, use Context iface everywhere
wrapCtx := func(f func(Context, SectorInfo) error) func(statemachine.Context, SectorInfo) error {
return func(ctx statemachine.Context, info SectorInfo) error {
return f(&ctx, info)
}
}
switch state.State {
// Happy path
case Empty:
@ -403,6 +460,24 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
case FinalizeSector:
return m.handleFinalizeSector, processed, nil
// Snap deals updates
case SnapDealsWaitDeals:
return m.handleWaitDeals, processed, nil
case SnapDealsAddPiece:
return m.handleAddPiece, processed, nil
case SnapDealsPacking:
return m.handlePacking, processed, nil
case UpdateReplica:
return m.handleReplicaUpdate, processed, nil
case ProveReplicaUpdate:
return m.handleProveReplicaUpdate, processed, nil
case SubmitReplicaUpdate:
return m.handleSubmitReplicaUpdate, processed, nil
case ReplicaUpdateWait:
return m.handleReplicaUpdateWait, processed, nil
case FinalizeReplicaUpdate:
return m.handleFinalizeReplicaUpdate, processed, nil
// Handled failure modes
case AddPieceFailed:
return m.handleAddPieceFailed, processed, nil
@ -426,7 +501,20 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
case DealsExpired:
return m.handleDealsExpired, processed, nil
case RecoverDealIDs:
return wrapCtx(m.HandleRecoverDealIDs), processed, nil
return m.HandleRecoverDealIDs, processed, nil
// Snap Deals failure modes
case SnapDealsAddPieceFailed:
return m.handleAddPieceFailed, processed, nil
case SnapDealsDealsExpired:
return m.handleDealsExpiredSnapDeals, processed, nil
case SnapDealsRecoverDealIDs:
return m.handleSnapDealsRecoverDealIDs, processed, nil
case ReplicaUpdateFailed:
return m.handleSubmitReplicaUpdateFailed, processed, nil
case AbortUpgrade:
return m.handleAbortUpgrade, processed, nil
// Post-seal
case Proving:
@ -642,3 +730,16 @@ func planOne(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err e
return uint64(len(events)), nil
}
}
// planOne but ignores unhandled states without erroring, this prevents the need to handle all possible events creating
// error during forced override
func planOneOrIgnore(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err error))) func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
f := planOne(ts...)
return func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
cnt, err := f(events, state)
if err != nil {
log.Warnf("planOneOrIgnore: ignoring error from planOne: %s", err)
}
return cnt, nil
}
}

View File

@ -295,6 +295,46 @@ type SectorFinalizeFailed struct{ error }
func (evt SectorFinalizeFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorFinalizeFailed) apply(*SectorInfo) {}
// Snap deals // CC update path
type SectorStartCCUpdate struct{}
func (evt SectorStartCCUpdate) apply(state *SectorInfo) {
state.CCUpdate = true
// Clear filler piece but remember in case of abort
state.CCPieces = state.Pieces
state.Pieces = nil
}
type SectorReplicaUpdate struct {
Out storage.ReplicaUpdateOut
}
func (evt SectorReplicaUpdate) apply(state *SectorInfo) {
state.UpdateSealed = &evt.Out.NewSealed
state.UpdateUnsealed = &evt.Out.NewUnsealed
}
type SectorProveReplicaUpdate struct {
Proof storage.ReplicaUpdateProof
}
func (evt SectorProveReplicaUpdate) apply(state *SectorInfo) {
state.ReplicaUpdateProof = evt.Proof
}
type SectorReplicaUpdateSubmitted struct {
Message cid.Cid
}
func (evt SectorReplicaUpdateSubmitted) apply(state *SectorInfo) {
state.ReplicaUpdateMessage = &evt.Message
}
type SectorReplicaUpdateLanded struct{}
func (evt SectorReplicaUpdateLanded) apply(state *SectorInfo) {}
// Failed state recovery
type SectorRetrySealPreCommit1 struct{}
@ -351,6 +391,60 @@ func (evt SectorUpdateDealIDs) apply(state *SectorInfo) {
}
}
// Snap Deals failure and recovery
type SectorRetryReplicaUpdate struct{}
func (evt SectorRetryReplicaUpdate) apply(state *SectorInfo) {}
type SectorRetryProveReplicaUpdate struct{}
func (evt SectorRetryProveReplicaUpdate) apply(state *SectorInfo) {}
type SectorUpdateReplicaFailed struct{ error }
func (evt SectorUpdateReplicaFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorUpdateReplicaFailed) apply(state *SectorInfo) {}
type SectorProveReplicaUpdateFailed struct{ error }
func (evt SectorProveReplicaUpdateFailed) FormatError(xerrors.Printer) (next error) {
return evt.error
}
func (evt SectorProveReplicaUpdateFailed) apply(state *SectorInfo) {}
type SectorAbortUpgrade struct{ error }
func (evt SectorAbortUpgrade) apply(state *SectorInfo) {}
func (evt SectorAbortUpgrade) FormatError(xerrors.Printer) (next error) {
return evt.error
}
type SectorRevertUpgradeToProving struct{}
func (evt SectorRevertUpgradeToProving) apply(state *SectorInfo) {
// cleanup sector state so that it is back in proving
state.CCUpdate = false
state.UpdateSealed = nil
state.UpdateUnsealed = nil
state.ReplicaUpdateProof = nil
state.ReplicaUpdateMessage = nil
state.Pieces = state.CCPieces
state.CCPieces = nil
}
type SectorRetrySubmitReplicaUpdateWait struct{}
func (evt SectorRetrySubmitReplicaUpdateWait) apply(state *SectorInfo) {}
type SectorRetrySubmitReplicaUpdate struct{}
func (evt SectorRetrySubmitReplicaUpdate) apply(state *SectorInfo) {}
type SectorSubmitReplicaUpdateFailed struct{}
func (evt SectorSubmitReplicaUpdateFailed) apply(state *SectorInfo) {}
// Faults
type SectorFaulty struct{}

View File

@ -59,6 +59,8 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e
return ctx.Send(SectorAddPiece{})
},
number: sector.SectorNumber,
ccUpdate: sector.CCUpdate,
}
} else {
// make sure we're only accounting for pieces which were correctly added
@ -329,6 +331,17 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec
return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err
}
func (m *Sealing) MatchPendingPiecesToOpenSectors(ctx context.Context) error {
sp, err := m.currentSealProof(ctx)
if err != nil {
return xerrors.Errorf("failed to get current seal proof: %w", err)
}
log.Debug("pieces to sector matching waiting for lock")
m.inputLk.Lock()
defer m.inputLk.Unlock()
return m.updateInput(ctx, sp)
}
// called with m.inputLk
func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) error {
ssize, err := sp.SectorSize()
@ -356,8 +369,33 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e
toAssign[proposalCid] = struct{}{}
memo := make(map[abi.SectorNumber]abi.ChainEpoch)
expF := func(sn abi.SectorNumber) (abi.ChainEpoch, error) {
if exp, ok := memo[sn]; ok {
return exp, nil
}
onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, sn, TipSetToken{})
if err != nil {
return 0, err
}
memo[sn] = onChainInfo.Expiration
return onChainInfo.Expiration, nil
}
for id, sector := range m.openSectors {
avail := abi.PaddedPieceSize(ssize).Unpadded() - sector.used
// check that sector lifetime is long enough to fit deal using latest expiration from on chain
ok, err := sector.dealFitsInLifetime(piece.deal.DealProposal.EndEpoch, expF)
if err != nil {
log.Errorf("failed to check expiration for cc Update sector %d", sector.number)
continue
}
if !ok {
exp, _ := expF(sector.number)
log.Infof("CC update sector %d cannot fit deal, expiration %d before deal end epoch %d", id, exp, piece.deal.DealProposal.EndEpoch)
continue
}
if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding)
matches = append(matches, match{
@ -416,6 +454,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e
}
if len(toAssign) > 0 {
log.Errorf("we are trying to create a new sector with open sectors %v", m.openSectors)
if err := m.tryCreateDealSector(ctx, sp); err != nil {
log.Errorw("Failed to create a new sector for deals", "error", err)
}

View File

@ -213,6 +213,21 @@ func (mr *MockSealingAPIMockRecorder) StateMarketStorageDealProposal(arg0, arg1,
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDealProposal", reflect.TypeOf((*MockSealingAPI)(nil).StateMarketStorageDealProposal), arg0, arg1, arg2)
}
// StateMinerActiveSectors mocks base method.
func (m *MockSealingAPI) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2)
ret0, _ := ret[0].([]*miner.SectorOnChainInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors.
func (mr *MockSealingAPIMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockSealingAPI)(nil).StateMinerActiveSectors), arg0, arg1, arg2)
}
// StateMinerAvailableBalance mocks base method.
func (m *MockSealingAPI) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) {
m.ctrl.T.Helper()

View File

@ -63,6 +63,7 @@ type SealingAPI interface {
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error)
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error)
StateMinerActiveSectors(context.Context, address.Address, TipSetToken) ([]*miner.SectorOnChainInfo, error)
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error)
StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
@ -121,11 +122,24 @@ type Sealing struct {
}
type openSector struct {
used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors
used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors
number abi.SectorNumber
ccUpdate bool
maybeAccept func(cid.Cid) error // called with inputLk
}
func (o *openSector) dealFitsInLifetime(dealEnd abi.ChainEpoch, expF func(sn abi.SectorNumber) (abi.ChainEpoch, error)) (bool, error) {
if !o.ccUpdate {
return true, nil
}
expiration, err := expF(o.number)
if err != nil {
return false, err
}
return expiration >= dealEnd, nil
}
type pendingPiece struct {
size abi.UnpaddedPieceSize
deal api.PieceDealInfo

View File

@ -3,50 +3,65 @@ package sealing
type SectorState string
var ExistSectorStateList = map[SectorState]struct{}{
Empty: {},
WaitDeals: {},
Packing: {},
AddPiece: {},
AddPieceFailed: {},
GetTicket: {},
PreCommit1: {},
PreCommit2: {},
PreCommitting: {},
PreCommitWait: {},
SubmitPreCommitBatch: {},
PreCommitBatchWait: {},
WaitSeed: {},
Committing: {},
CommitFinalize: {},
CommitFinalizeFailed: {},
SubmitCommit: {},
CommitWait: {},
SubmitCommitAggregate: {},
CommitAggregateWait: {},
FinalizeSector: {},
Proving: {},
FailedUnrecoverable: {},
SealPreCommit1Failed: {},
SealPreCommit2Failed: {},
PreCommitFailed: {},
ComputeProofFailed: {},
CommitFailed: {},
PackingFailed: {},
FinalizeFailed: {},
DealsExpired: {},
RecoverDealIDs: {},
Faulty: {},
FaultReported: {},
FaultedFinal: {},
Terminating: {},
TerminateWait: {},
TerminateFinality: {},
TerminateFailed: {},
Removing: {},
RemoveFailed: {},
Removed: {},
Empty: {},
WaitDeals: {},
Packing: {},
AddPiece: {},
AddPieceFailed: {},
GetTicket: {},
PreCommit1: {},
PreCommit2: {},
PreCommitting: {},
PreCommitWait: {},
SubmitPreCommitBatch: {},
PreCommitBatchWait: {},
WaitSeed: {},
Committing: {},
CommitFinalize: {},
CommitFinalizeFailed: {},
SubmitCommit: {},
CommitWait: {},
SubmitCommitAggregate: {},
CommitAggregateWait: {},
FinalizeSector: {},
Proving: {},
FailedUnrecoverable: {},
SealPreCommit1Failed: {},
SealPreCommit2Failed: {},
PreCommitFailed: {},
ComputeProofFailed: {},
CommitFailed: {},
PackingFailed: {},
FinalizeFailed: {},
DealsExpired: {},
RecoverDealIDs: {},
Faulty: {},
FaultReported: {},
FaultedFinal: {},
Terminating: {},
TerminateWait: {},
TerminateFinality: {},
TerminateFailed: {},
Removing: {},
RemoveFailed: {},
Removed: {},
SnapDealsWaitDeals: {},
SnapDealsAddPiece: {},
SnapDealsPacking: {},
UpdateReplica: {},
ProveReplicaUpdate: {},
SubmitReplicaUpdate: {},
ReplicaUpdateWait: {},
FinalizeReplicaUpdate: {},
SnapDealsAddPieceFailed: {},
SnapDealsDealsExpired: {},
SnapDealsRecoverDealIDs: {},
ReplicaUpdateFailed: {},
AbortUpgrade: {},
}
// cmd/lotus-miner/info.go defines CLI colors corresponding to these states
// update files there when adding new states
const (
UndefinedSectorState SectorState = ""
@ -79,6 +94,17 @@ const (
FinalizeSector SectorState = "FinalizeSector"
Proving SectorState = "Proving"
// snap deals / cc update
SnapDealsWaitDeals SectorState = "SnapDealsWaitDeals"
SnapDealsAddPiece SectorState = "SnapDealsAddPiece"
SnapDealsPacking SectorState = "SnapDealsPacking"
UpdateReplica SectorState = "UpdateReplica"
ProveReplicaUpdate SectorState = "ProveReplicaUpdate"
SubmitReplicaUpdate SectorState = "SubmitReplicaUpdate"
ReplicaUpdateWait SectorState = "ReplicaUpdateWait"
FinalizeReplicaUpdate SectorState = "FinalizeReplicaUpdate"
// error modes
FailedUnrecoverable SectorState = "FailedUnrecoverable"
AddPieceFailed SectorState = "AddPieceFailed"
@ -92,6 +118,13 @@ const (
DealsExpired SectorState = "DealsExpired"
RecoverDealIDs SectorState = "RecoverDealIDs"
// snap deals error modes
SnapDealsAddPieceFailed SectorState = "SnapDealsAddPieceFailed"
SnapDealsDealsExpired SectorState = "SnapDealsDealsExpired"
SnapDealsRecoverDealIDs SectorState = "SnapDealsRecoverDealIDs"
AbortUpgrade SectorState = "AbortUpgrade"
ReplicaUpdateFailed SectorState = "ReplicaUpdateFailed"
Faulty SectorState = "Faulty" // sector is corrupted or gone for some reason
FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain
FaultedFinal SectorState = "FaultedFinal" // fault declared on chain
@ -108,11 +141,11 @@ const (
func toStatState(st SectorState, finEarly bool) statSectorState {
switch st {
case UndefinedSectorState, Empty, WaitDeals, AddPiece, AddPieceFailed:
case UndefinedSectorState, Empty, WaitDeals, AddPiece, AddPieceFailed, SnapDealsWaitDeals, SnapDealsAddPiece:
return sstStaging
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, FinalizeSector:
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, FinalizeSector, SnapDealsPacking, UpdateReplica, ProveReplicaUpdate, FinalizeReplicaUpdate:
return sstSealing
case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait:
case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, SubmitReplicaUpdate, ReplicaUpdateWait:
if finEarly {
// we use statSectorState for throttling storage use. With FinalizeEarly
// we can consider sectors in states after CommitFinalize as finalized, so

View File

@ -1,11 +1,13 @@
package sealing
import (
"context"
"time"
"github.com/hashicorp/go-multierror"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -181,6 +183,67 @@ func (m *Sealing) handleComputeProofFailed(ctx statemachine.Context, sector Sect
return ctx.Send(SectorRetryComputeProof{})
}
func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sector SectorInfo) error {
if sector.ReplicaUpdateMessage != nil {
mw, err := m.Api.StateSearchMsg(ctx.Context(), *sector.ReplicaUpdateMessage)
if err != nil {
// API error
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetrySubmitReplicaUpdateWait{})
}
if mw == nil {
return ctx.Send(SectorRetrySubmitReplicaUpdateWait{})
}
switch mw.Receipt.ExitCode {
case exitcode.Ok:
return ctx.Send(SectorRetrySubmitReplicaUpdateWait{})
case exitcode.SysErrOutOfGas:
return ctx.Send(SectorRetrySubmitReplicaUpdate{})
default:
// something else went wrong
}
}
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
return nil
}
if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil {
switch err.(type) {
case *ErrApi:
log.Errorf("handleSubmitReplicaUpdateFailed: api error, not proceeding: %+v", err)
return nil
case *ErrBadRU:
log.Errorf("bad replica update: %+v", err)
return ctx.Send(SectorRetryReplicaUpdate{})
case *ErrBadPR:
log.Errorf("bad PR1: +%v", err)
return ctx.Send(SectorRetryProveReplicaUpdate{})
case *ErrInvalidDeals:
return ctx.Send(SectorInvalidDealIDs{})
case *ErrExpiredDeals:
return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)})
default:
log.Errorf("sanity check error, not proceeding: +%v", err)
return xerrors.Errorf("checkPieces sanity check error: %w", err)
}
}
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetrySubmitReplicaUpdate{})
}
func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo) error {
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
@ -319,61 +382,40 @@ func (m *Sealing) handleDealsExpired(ctx statemachine.Context, sector SectorInfo
return ctx.Send(SectorRemove{})
}
func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error {
tok, height, err := m.Api.ChainHead(ctx.Context())
func (m *Sealing) handleDealsExpiredSnapDeals(ctx statemachine.Context, sector SectorInfo) error {
if !sector.CCUpdate {
// Should be impossible
return xerrors.Errorf("should never reach SnapDealsDealsExpired as a non-CCUpdate sector")
}
return ctx.Send(SectorAbortUpgrade{xerrors.Errorf("one of upgrade deals expired")})
}
func (m *Sealing) handleAbortUpgrade(ctx statemachine.Context, sector SectorInfo) error {
if !sector.CCUpdate {
return xerrors.Errorf("should never reach AbortUpgrade as a non-CCUpdate sector")
}
// Remove snap deals replica if any
if err := m.sealer.ReleaseReplicaUpgrade(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil {
return xerrors.Errorf("removing CC update files from sector storage")
}
return ctx.Send(SectorRevertUpgradeToProving{})
}
// failWith is a mutator or global mutator
func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, sector SectorInfo, failWith interface{}) error {
toFix, paddingPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr)
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
return err
}
var toFix []int
paddingPieces := 0
for i, p := range sector.Pieces {
// if no deal is associated with the piece, ensure that we added it as
// filler (i.e. ensure that it has a zero PieceCID)
if p.DealInfo == nil {
exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded())
if !p.Piece.PieceCID.Equals(exp) {
return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID)
}
paddingPieces++
continue
}
proposal, err := m.Api.StateMarketStorageDealProposal(ctx.Context(), p.DealInfo.DealID, tok)
if err != nil {
log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err)
toFix = append(toFix, i)
continue
}
if proposal.Provider != m.maddr {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.Provider, m.maddr)
toFix = append(toFix, i)
continue
}
if proposal.PieceCID != p.Piece.PieceCID {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)
toFix = append(toFix, i)
continue
}
if p.Piece.Size != proposal.PieceSize {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize)
toFix = append(toFix, i)
continue
}
if height >= proposal.StartEpoch {
// TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces
// (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval)
return xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height)
}
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
return err
}
failed := map[int]error{}
updates := map[int]abi.DealID{}
for _, i := range toFix {
p := sector.Pieces[i]
@ -381,7 +423,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error {
// TODO: check if we are in an early enough state try to remove this piece
log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID)
// Not much to do here (and this can only happen for old spacerace sectors)
return ctx.Send(SectorRemove{})
return ctx.Send(failWith)
}
var dp *market.DealProposal
@ -416,7 +458,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error {
if len(failed)+paddingPieces == len(sector.Pieces) {
log.Errorf("removing sector %d: all deals expired or unrecoverable: %+v", sector.SectorNumber, merr)
return ctx.Send(SectorRemove{})
return ctx.Send(failWith)
}
// todo: try to remove bad pieces (hard; see the todo above)
@ -424,9 +466,73 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error {
// for now removing sectors is probably better than having them stuck in RecoverDealIDs
// and expire anyways
log.Errorf("removing sector %d: deals expired or unrecoverable: %+v", sector.SectorNumber, merr)
return ctx.Send(SectorRemove{})
return ctx.Send(failWith)
}
// Not much to do here, we can't go back in time to commit this sector
return ctx.Send(SectorUpdateDealIDs{Updates: updates})
}
func (m *Sealing) HandleRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error {
return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorRemove{})
}
func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error {
return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{})
}
func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) {
tok, height, err := api.ChainHead(ctx)
if err != nil {
return nil, 0, xerrors.Errorf("getting chain head: %w", err)
}
var toFix []int
paddingPieces := 0
for i, p := range sector.Pieces {
// if no deal is associated with the piece, ensure that we added it as
// filler (i.e. ensure that it has a zero PieceCID)
if p.DealInfo == nil {
exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded())
if !p.Piece.PieceCID.Equals(exp) {
return nil, 0, xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID)
}
paddingPieces++
continue
}
proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok)
if err != nil {
log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err)
toFix = append(toFix, i)
continue
}
if proposal.Provider != maddr {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.Provider, maddr)
toFix = append(toFix, i)
continue
}
if proposal.PieceCID != p.Piece.PieceCID {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)
toFix = append(toFix, i)
continue
}
if p.Piece.Size != proposal.PieceSize {
log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize)
toFix = append(toFix, i)
continue
}
if height >= proposal.StartEpoch {
// TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces
// (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval)
return nil, 0, xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height)
}
}
return toFix, paddingPieces, nil
}

View File

@ -6,6 +6,7 @@ import (
"testing"
"github.com/filecoin-project/go-state-types/network"
statemachine "github.com/filecoin-project/go-statemachine"
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
@ -25,6 +26,7 @@ import (
)
func TestStateRecoverDealIDs(t *testing.T) {
t.Skip("Bring this back when we can correctly mock a state machine context: Issue #7867")
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
@ -40,7 +42,7 @@ func TestStateRecoverDealIDs(t *testing.T) {
sctx := mocks.NewMockContext(mockCtrl)
sctx.EXPECT().Context().AnyTimes().Return(ctx)
api.EXPECT().ChainHead(ctx).Times(1).Return(nil, abi.ChainEpoch(10), nil)
api.EXPECT().ChainHead(ctx).Times(2).Return(nil, abi.ChainEpoch(10), nil)
var dealId abi.DealID = 12
dealProposal := market.DealProposal{
@ -70,7 +72,9 @@ func TestStateRecoverDealIDs(t *testing.T) {
sctx.EXPECT().Send(sealing.SectorRemove{}).Return(nil)
err := fakeSealing.HandleRecoverDealIDs(sctx, sealing.SectorInfo{
// TODO sctx should satisfy an interface so it can be useable for mocking. This will fail because we are passing in an empty context now to get this to build.
// https://github.com/filecoin-project/lotus/issues/7867
err := fakeSealing.HandleRecoverDealIDs(statemachine.Context{}, sealing.SectorInfo{
Pieces: []sealing.Piece{
{
DealInfo: &api2.PieceDealInfo{

View File

@ -0,0 +1,222 @@
package sealing
import (
"bytes"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
statemachine "github.com/filecoin-project/go-statemachine"
api "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"golang.org/x/xerrors"
)
func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state
return handleErrors(ctx, err, sector)
}
out, err := m.sealer.ReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.pieceInfos())
if err != nil {
return ctx.Send(SectorUpdateReplicaFailed{xerrors.Errorf("replica update failed: %w", err)})
}
return ctx.Send(SectorReplicaUpdate{
Out: out,
})
}
func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
if sector.UpdateSealed == nil || sector.UpdateUnsealed == nil {
return xerrors.Errorf("invalid sector %d with nil UpdateSealed or UpdateUnsealed output", sector.SectorNumber)
}
if sector.CommR == nil {
return xerrors.Errorf("invalid sector %d with nil CommR", sector.SectorNumber)
}
vanillaProofs, err := m.sealer.ProveReplicaUpdate1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed)
if err != nil {
return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (1) failed: %w", err)})
}
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state
return handleErrors(ctx, err, sector)
}
proof, err := m.sealer.ProveReplicaUpdate2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed, vanillaProofs)
if err != nil {
return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (2) failed: %w", err)})
}
return ctx.Send(SectorProveReplicaUpdate{
Proof: proof,
})
}
func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err)
return nil
}
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state
return handleErrors(ctx, err, sector)
}
if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil {
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
sl, err := m.Api.StateSectorPartition(ctx.Context(), m.maddr, sector.SectorNumber, tok)
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err)
return nil
}
updateProof, err := sector.SectorType.RegisteredUpdateProof()
if err != nil {
log.Errorf("failed to get update proof type from seal proof: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
enc := new(bytes.Buffer)
params := &miner.ProveReplicaUpdatesParams{
Updates: []miner.ReplicaUpdate{
{
SectorID: sector.SectorNumber,
Deadline: sl.Deadline,
Partition: sl.Partition,
NewSealedSectorCID: *sector.UpdateSealed,
Deals: sector.dealIDs(),
UpdateProofType: updateProof,
ReplicaProof: sector.ReplicaUpdateProof,
},
},
}
if err := params.MarshalCBOR(enc); err != nil {
log.Errorf("failed to serialize update replica params: %w", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting config: %w", err)
}
onChainInfo, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok)
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err)
return nil
}
sp, err := m.currentSealProof(ctx.Context())
if err != nil {
log.Errorf("sealer failed to return current seal proof not proceeding: %+v", err)
return nil
}
virtualPCI := miner.SectorPreCommitInfo{
SealProof: sp,
SectorNumber: sector.SectorNumber,
SealedCID: *sector.UpdateSealed,
//SealRandEpoch: 0,
DealIDs: sector.dealIDs(),
Expiration: onChainInfo.Expiration,
//ReplaceCapacity: false,
//ReplaceSectorDeadline: 0,
//ReplaceSectorPartition: 0,
//ReplaceSectorNumber: 0,
}
collateral, err := m.Api.StateMinerInitialPledgeCollateral(ctx.Context(), m.maddr, virtualPCI, tok)
if err != nil {
return xerrors.Errorf("getting initial pledge collateral: %w", err)
}
collateral = big.Sub(collateral, onChainInfo.InitialPledge)
if collateral.LessThan(big.Zero()) {
collateral = big.Zero()
}
collateral, err = collateralSendAmount(ctx.Context(), m.Api, m.maddr, cfg, collateral)
if err != nil {
log.Errorf("collateral send amount failed not proceeding: %+v", err)
return nil
}
goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee))
mi, err := m.Api.StateMinerInfo(ctx.Context(), m.maddr, tok)
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err)
return nil
}
from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral)
if err != nil {
log.Errorf("no good address to send replica update message from: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveReplicaUpdates, big.Zero(), big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes())
if err != nil {
log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid})
}
func (m *Sealing) handleReplicaUpdateWait(ctx statemachine.Context, sector SectorInfo) error {
if sector.ReplicaUpdateMessage == nil {
log.Errorf("handleReplicaUpdateWait: no replica update message cid recorded")
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.ReplicaUpdateMessage)
if err != nil {
log.Errorf("handleReplicaUpdateWait: failed to wait for message: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
switch mw.Receipt.ExitCode {
case exitcode.Ok:
//expected
case exitcode.SysErrInsufficientFunds:
fallthrough
case exitcode.SysErrOutOfGas:
log.Errorf("gas estimator was wrong or out of funds")
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
default:
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
si, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSetTok)
if err != nil {
log.Errorf("api err failed to get sector info: %+v", err)
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
if si == nil {
log.Errorf("api err sector not found")
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
}
if !si.SealedCID.Equals(*sector.UpdateSealed) {
log.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID)
return ctx.Send(SectorAbortUpgrade{})
}
return ctx.Send(SectorReplicaUpdateLanded{})
}
func (m *Sealing) handleFinalizeReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
return ctx.Send(SectorFinalized{})
}
func handleErrors(ctx statemachine.Context, err error, sector SectorInfo) error {
switch err.(type) {
case *ErrApi:
log.Errorf("handleReplicaUpdate: api error, not proceeding: %+v", err)
return nil
case *ErrInvalidDeals:
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
return ctx.Send(SectorInvalidDealIDs{})
case *ErrExpiredDeals: // Probably not much we can do here, maybe re-pack the sector?
return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)})
default:
return xerrors.Errorf("checkPieces sanity check error: %w", err)
}
}

View File

@ -280,8 +280,8 @@ func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo)
}
// TODO: We should probably invoke this method in most (if not all) state transition failures after handlePreCommitting
func (m *Sealing) remarkForUpgrade(sid abi.SectorNumber) {
err := m.MarkForUpgrade(sid)
func (m *Sealing) remarkForUpgrade(ctx context.Context, sid abi.SectorNumber) {
err := m.MarkForUpgrade(ctx, sid)
if err != nil {
log.Errorf("error re-marking sector %d as for upgrade: %+v", sid, err)
}
@ -424,7 +424,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes())
if err != nil {
if params.ReplaceCapacity {
m.remarkForUpgrade(params.ReplaceSectorNumber)
m.remarkForUpgrade(ctx.Context(), params.ReplaceSectorNumber)
}
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
}

View File

@ -73,7 +73,7 @@ type SectorInfo struct {
// PreCommit2
CommD *cid.Cid
CommR *cid.Cid
CommR *cid.Cid // SectorKey
Proof []byte
PreCommitInfo *miner.SectorPreCommitInfo
@ -91,6 +91,14 @@ type SectorInfo struct {
CommitMessage *cid.Cid
InvalidProofs uint64 // failed proof computations (doesn't validate with proof inputs; can't compute)
// CCUpdate
CCUpdate bool
CCPieces []Piece
UpdateSealed *cid.Cid
UpdateUnsealed *cid.Cid
ReplicaUpdateProof storage.ReplicaUpdateProof
ReplicaUpdateMessage *cid.Cid
// Faults
FaultReportMsg *cid.Cid

View File

@ -4,6 +4,7 @@ import (
"context"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
"golang.org/x/xerrors"
@ -18,7 +19,8 @@ func (m *Sealing) IsMarkedForUpgrade(id abi.SectorNumber) bool {
return found
}
func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error {
func (m *Sealing) MarkForUpgrade(ctx context.Context, id abi.SectorNumber) error {
m.upgradeLk.Lock()
defer m.upgradeLk.Unlock()
@ -27,6 +29,37 @@ func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error {
return xerrors.Errorf("sector %d already marked for upgrade", id)
}
si, err := m.GetSectorInfo(id)
if err != nil {
return xerrors.Errorf("getting sector info: %w", err)
}
if si.State != Proving {
return xerrors.Errorf("can't mark sectors not in the 'Proving' state for upgrade")
}
if len(si.Pieces) != 1 {
return xerrors.Errorf("not a committed-capacity sector, expected 1 piece")
}
if si.Pieces[0].DealInfo != nil {
return xerrors.Errorf("not a committed-capacity sector, has deals")
}
m.toUpgrade[id] = struct{}{}
return nil
}
func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) error {
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting storage config: %w", err)
}
curStaging := m.stats.curStaging()
if cfg.MaxWaitDealsSectors > 0 && curStaging >= cfg.MaxWaitDealsSectors {
return xerrors.Errorf("already waiting for deals in %d >= %d (cfg.MaxWaitDealsSectors) sectors, no free resources to wait for deals in another",
curStaging, cfg.MaxWaitDealsSectors)
}
si, err := m.GetSectorInfo(id)
if err != nil {
return xerrors.Errorf("getting sector info: %w", err)
@ -44,11 +77,37 @@ func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error {
return xerrors.Errorf("not a committed-capacity sector, has deals")
}
// TODO: more checks to match actor constraints
tok, head, err := m.Api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("couldnt get chain head: %w", err)
}
onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, id, tok)
if err != nil {
return xerrors.Errorf("failed to read sector on chain info: %w", err)
}
m.toUpgrade[id] = struct{}{}
active, err := m.Api.StateMinerActiveSectors(ctx, m.maddr, tok)
if err != nil {
return xerrors.Errorf("failed to check active sectors: %w", err)
}
// Ensure the upgraded sector is active
var found bool
for _, si := range active {
if si.SectorNumber == id {
found = true
break
}
}
if !found {
return xerrors.Errorf("cannot mark inactive sector for upgrade")
}
return nil
if onChainInfo.Expiration-head < market7.DealMinDuration {
return xerrors.Errorf("pointless to upgrade sector %d, expiration %d is less than a min deal duration away from current epoch."+
"Upgrade expiration before marking for upgrade", id, onChainInfo.Expiration)
}
return m.sectors.Send(uint64(id), SectorStartCCUpdate{})
}
func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreCommitInfo) big.Int {

10
go.mod
View File

@ -40,7 +40,7 @@ require (
github.com/filecoin-project/go-jsonrpc v0.1.5
github.com/filecoin-project/go-padreader v0.0.1
github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53
github.com/filecoin-project/go-state-types v0.1.1
github.com/filecoin-project/go-state-types v0.1.3
github.com/filecoin-project/go-statemachine v1.0.1
github.com/filecoin-project/go-statestore v0.2.0
github.com/filecoin-project/go-storedcounter v0.1.0
@ -50,7 +50,7 @@ require (
github.com/filecoin-project/specs-actors/v4 v4.0.1
github.com/filecoin-project/specs-actors/v5 v5.0.4
github.com/filecoin-project/specs-actors/v6 v6.0.1
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1
github.com/filecoin-project/specs-storage v0.1.1-0.20220114131651-ee969fade269
github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.1
@ -109,7 +109,7 @@ require (
github.com/libp2p/go-buffer-pool v0.0.2
github.com/libp2p/go-eventbus v0.2.1
github.com/libp2p/go-libp2p v0.17.0
github.com/libp2p/go-libp2p-connmgr v0.3.0
github.com/libp2p/go-libp2p-connmgr v0.3.1
github.com/libp2p/go-libp2p-core v0.13.0
github.com/libp2p/go-libp2p-discovery v0.6.0
github.com/libp2p/go-libp2p-kad-dht v0.15.0
@ -173,3 +173,7 @@ require (
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
replace github.com/filecoin-project/test-vectors => ./extern/test-vectors
//replace github.com/filecoin-project/specs-actors/v7 => /Users/zenground0/pl/repos/specs-actors
// replace github.com/filecon-project/specs-storage => /Users/zenground0/pl/repos/specs-storage

13
go.sum
View File

@ -300,6 +300,8 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoC
github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o=
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE=
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo=
github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 h1:XM81BJ4/6h3FV0WfFjh74cIDIgqMbJsMBLM0fIuLUUk=
github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE=
github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk=
@ -346,8 +348,9 @@ github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1 h1:LR260vya4p++atgf256W6yV3Lxl5mKrBFcEZePWQrdg=
github.com/filecoin-project/go-state-types v0.1.1/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.3 h1:rzIJyQo5HO2ptc8Jcu8P0qTutnI7NWwTle54eAHoNO0=
github.com/filecoin-project/go-state-types v0.1.3/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw=
github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54=
@ -374,6 +377,11 @@ github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVi
github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew=
github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 h1:FuDaXIbcw2hRsFI8SDTmsGGCE+NumpF6aiBoU/2X5W4=
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 h1:oUYOvF7EvdXS0Zmk9mNkaB6Bu0l+WXBYPzVodKMiLug=
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402 h1:jPJlk7YpWKDNLx/3Dosgm3ie3wsNhe3YP6Xl8Gc5vqY=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-storage v0.1.1-0.20220114131651-ee969fade269 h1:icvWX24jKnt8qCobOE5fLJhMxiYEZy5RcGSkLPTPndU=
@ -1021,8 +1029,9 @@ github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFj
github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA=
github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk=
github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0=
github.com/libp2p/go-libp2p-connmgr v0.3.0 h1:yerFXrYa0oxpuVsLlndwm/bLulouHYDcvFrY/4H4fx8=
github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik=
github.com/libp2p/go-libp2p-connmgr v0.3.1 h1:alEy2fpGKFu+7ZhQF4GF0dvKLyVHeLtIfS/KziwoiZw=
github.com/libp2p/go-libp2p-connmgr v0.3.1/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik=
github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco=
github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco=
github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE=

View File

@ -6,14 +6,17 @@ import (
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TODO: This needs to be repurposed into a SnapDeals test suite
func TestCCUpgrade(t *testing.T) {
kit.QuietMiningLogs()
@ -29,37 +32,43 @@ func TestCCUpgrade(t *testing.T) {
}
}
func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) *kit.TestFullNode {
ctx := context.Background()
blockTime := 5 * time.Millisecond
blockTime := 1 * time.Millisecond
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.TurboUpgradeAt(upgradeHeight))
ens.InterconnectAll().BeginMining(blockTime)
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15))
ens.InterconnectAll().BeginMiningMustPost(blockTime)
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
CC := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
Upgraded := CC + 1
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
// wait for deadline 0 to pass so that committing starts after post on preseals
// this gives max time for post to complete minimizing chances of timeout
// waitForDeadline(ctx, t, 1, client, maddr)
miner.PledgeSectors(ctx, 1, 0, nil)
sl, err := miner.SectorsList(ctx)
require.NoError(t, err)
require.Len(t, sl, 1, "expected 1 sector")
require.Equal(t, CC, sl[0], "unexpected sector number")
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
{
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
require.NoError(t, err)
require.Less(t, 50000, int(si.Expiration))
}
waitForSectorActive(ctx, t, CCUpgrade, client, maddr)
err = miner.SectorMarkForUpgrade(ctx, sl[0])
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
require.NoError(t, err)
sl, err = miner.SectorsList(ctx)
require.NoError(t, err)
require.Len(t, sl, 1, "expected 1 sector")
dh := kit.NewDealHarness(t, client, miner, miner)
deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
Rseed: 6,
@ -68,37 +77,112 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
kit.AssertFilesEqual(t, inPath, outPath)
// Validate upgrade
{
exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK)
if err != nil {
require.Contains(t, err.Error(), "failed to find sector 3") // already cleaned up
} else {
require.NoError(t, err)
require.NotNil(t, exp)
require.Greater(t, 50000, int(exp.OnTime))
}
}
{
exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK)
require.NoError(t, err)
require.Less(t, 50000, int(exp.OnTime))
}
dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
status, err := miner.SectorsStatus(ctx, CCUpgrade, true)
require.NoError(t, err)
assert.Equal(t, 1, len(status.Deals))
return client
}
// Sector should expire.
func waitForDeadline(ctx context.Context, t *testing.T, waitIdx uint64, node *kit.TestFullNode, maddr address.Address) {
for {
// Wait for the sector to expire.
status, err := miner.SectorsStatus(ctx, CC, true)
ts, err := node.ChainHead(ctx)
require.NoError(t, err)
if status.OnTime == 0 && status.Early == 0 {
break
dl, err := node.StateMinerProvingDeadline(ctx, maddr, ts.Key())
require.NoError(t, err)
if dl.Index == waitIdx {
return
}
t.Log("waiting for sector to expire")
// wait one deadline per loop.
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime)
}
}
func waitForSectorActive(ctx context.Context, t *testing.T, sn abi.SectorNumber, node *kit.TestFullNode, maddr address.Address) {
for {
active, err := node.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
for _, si := range active {
if si.SectorNumber == sn {
fmt.Printf("ACTIVE\n")
return
}
}
time.Sleep(time.Second)
}
}
func waitForSectorStartUpgrade(ctx context.Context, t *testing.T, sn abi.SectorNumber, miner *kit.TestMiner) {
for {
si, err := miner.StorageMiner.SectorsStatus(ctx, sn, false)
require.NoError(t, err)
if si.State != api.SectorState("Proving") {
t.Logf("Done proving sector in state: %s", si.State)
return
}
}
}
func TestCCUpgradeAndPoSt(t *testing.T) {
kit.QuietMiningLogs()
t.Run("upgrade and then post", func(t *testing.T) {
ctx := context.Background()
n := runTestCCUpgrade(t, 100)
ts, err := n.ChainHead(ctx)
require.NoError(t, err)
start := ts.Height()
// wait for a full proving period
t.Log("waiting for chain")
n.WaitTillChain(ctx, func(ts *types.TipSet) bool {
if ts.Height() > start+abi.ChainEpoch(2880) {
return true
}
return false
})
})
}
func TestTooManyMarkedForUpgrade(t *testing.T) {
kit.QuietMiningLogs()
ctx := context.Background()
blockTime := 1 * time.Millisecond
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15))
ens.InterconnectAll().BeginMiningMustPost(blockTime)
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
waitForDeadline(ctx, t, 1, client, maddr)
miner.PledgeSectors(ctx, 3, 0, nil)
sl, err := miner.SectorsList(ctx)
require.NoError(t, err)
require.Len(t, sl, 3, "expected 3 sectors")
{
si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
require.NoError(t, err)
require.Less(t, 50000, int(si.Expiration))
}
waitForSectorActive(ctx, t, CCUpgrade, client, maddr)
waitForSectorActive(ctx, t, CCUpgrade+1, client, maddr)
waitForSectorActive(ctx, t, CCUpgrade+2, client, maddr)
err = miner.SectorMarkForUpgrade(ctx, CCUpgrade, true)
require.NoError(t, err)
err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+1, true)
require.NoError(t, err)
waitForSectorStartUpgrade(ctx, t, CCUpgrade, miner)
waitForSectorStartUpgrade(ctx, t, CCUpgrade+1, miner)
err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+2, true)
require.Error(t, err)
assert.Contains(t, err.Error(), "no free resources to wait for deals")
}

View File

@ -1,13 +1,18 @@
package kit
import (
"bytes"
"context"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
aminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
"github.com/stretchr/testify/require"
)
@ -30,6 +35,176 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
}
}
type partitionTracker struct {
partitions []api.Partition
posted bitfield.BitField
}
func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker {
dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK)
require.NoError(bm.t, err)
dl := dlines[dlIdx]
parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK)
require.NoError(bm.t, err)
return &partitionTracker{
partitions: parts,
posted: dl.PostSubmissions,
}
}
func (p *partitionTracker) count(t *testing.T) uint64 {
pCnt, err := p.posted.Count()
require.NoError(t, err)
return pCnt
}
func (p *partitionTracker) done(t *testing.T) bool {
return uint64(len(p.partitions)) == p.count(t)
}
func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, smsg *types.SignedMessage) (ret bool) {
defer func() {
ret = p.done(t)
}()
msg := smsg.Message
if !(msg.To == bm.miner.ActorAddr) {
return
}
if msg.Method != aminer.Methods.SubmitWindowedPoSt {
return
}
params := aminer.SubmitWindowedPoStParams{}
require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(msg.Params)))
for _, part := range params.Partitions {
p.posted.Set(part.Index)
}
return
}
// Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool
// and everything shuts down if a post fails. It also enforces that every block mined succeeds
func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) {
time.Sleep(3 * time.Second)
// wrap context in a cancellable context.
ctx, bm.cancel = context.WithCancel(ctx)
bm.wg.Add(1)
go func() {
defer bm.wg.Done()
activeDeadlines := make(map[int]struct{})
_ = activeDeadlines
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)
wait := make(chan bool)
chg, err := bm.miner.FullNode.ChainNotify(ctx)
require.NoError(bm.t, err)
// read current out
curr := <-chg
require.Equal(bm.t, ts.Height(), curr[0].Val.Height())
for {
select {
case <-time.After(blocktime):
case <-ctx.Done():
return
}
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
require.Equal(bm.t, int64(0), nulls, "Injecting > 0 null blocks while `MustPost` mining is currently unsupported")
// Wake up and figure out if we are at the end of an active deadline
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)
tsk := ts.Key()
dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, tsk)
require.NoError(bm.t, err)
if ts.Height()+1 == dlinfo.Last() { // Last epoch in dline, we need to check that miner has posted
tracker := newPartitionTracker(ctx, dlinfo.Index, bm)
if !tracker.done(bm.t) { // need to wait for post
bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t))
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx)
require.NoError(bm.t, err)
// First check pending messages we'll mine this epoch
msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK)
require.NoError(bm.t, err)
for _, msg := range msgs {
tracker.recordIfPost(bm.t, bm, msg)
}
// post not yet in mpool, wait for it
if !tracker.done(bm.t) {
bm.t.Logf("post missing from mpool, block mining suspended until it arrives")
POOL:
for {
bm.t.Logf("mpool event wait loop at block height %d, ts: %s", ts.Height(), ts.Key())
select {
case <-ctx.Done():
return
case evt := <-poolEvts:
bm.t.Logf("pool event: %d", evt.Type)
if evt.Type == api.MpoolAdd {
bm.t.Logf("incoming message %v", evt.Message)
if tracker.recordIfPost(bm.t, bm, evt.Message) {
break POOL
}
}
}
}
bm.t.Logf("done waiting on mpool")
}
}
}
var target abi.ChainEpoch
reportSuccessFn := func(success bool, epoch abi.ChainEpoch, err error) {
require.NoError(bm.t, err)
target = epoch
wait <- success
}
var success bool
for i := int64(0); !success; i++ {
err = bm.miner.MineOne(ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls + i),
Done: reportSuccessFn,
})
success = <-wait
}
// Wait until it shows up on the given full nodes ChainHead
// TODO this replicates a flaky condition from MineUntil,
// it would be better to use api to wait for sync,
// but currently this is a bit difficult
// and flaky failure is easy to debug and retry
nloops := 200
for i := 0; i < nloops; i++ {
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)
if ts.Height() == target {
break
}
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
time.Sleep(time.Millisecond * 10)
}
switch {
case err == nil: // wrap around
case ctx.Err() != nil: // context fired.
return
default: // log error
bm.t.Error(err)
}
}
}()
}
func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) {
time.Sleep(time.Second)

View File

@ -104,8 +104,9 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
fmt.Printf("WAIT DEAL SEALEDS START\n")
dh.WaitDealSealed(ctx, deal, false, false, nil)
fmt.Printf("WAIT DEAL SEALEDS END\n")
return deal, res, path
}
@ -176,6 +177,7 @@ loop:
cb()
}
}
fmt.Printf("WAIT DEAL SEALED LOOP BROKEN\n")
}
// WaitDealSealedQuiet waits until the deal is sealed, without logging anything.
@ -290,12 +292,11 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
snums, err := dh.main.SectorsList(ctx)
require.NoError(dh.t, err)
for _, snum := range snums {
si, err := dh.main.SectorsStatus(ctx, snum, false)
require.NoError(dh.t, err)
dh.t.Logf("Sector state: %s", si.State)
dh.t.Logf("Sector state <%d>-[%d]:, %s", snum, si.SealProof, si.State)
if si.State == api.SectorState(sealing.WaitDeals) {
require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum))
}

View File

@ -777,6 +777,43 @@ func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble {
return n
}
func (n *Ensemble) BeginMiningMustPost(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner {
ctx := context.Background()
// wait one second to make sure that nodes are connected and have handshaken.
// TODO make this deterministic by listening to identify events on the
// libp2p eventbus instead (or something else).
time.Sleep(1 * time.Second)
var bms []*BlockMiner
if len(miners) == 0 {
// no miners have been provided explicitly, instantiate block miners
// for all active miners that aren't still mining.
for _, m := range n.active.miners {
if _, ok := n.active.bms[m]; ok {
continue // skip, already have a block miner
}
miners = append(miners, m)
}
}
if len(miners) > 1 {
n.t.Fatalf("Only one active miner for MustPost, but have %d", len(miners))
}
for _, m := range miners {
bm := NewBlockMiner(n.t, m)
bm.MineBlocksMustPost(ctx, blocktime)
n.t.Cleanup(bm.Stop)
bms = append(bms, bm)
n.active.bms[m] = bm
}
return bms
}
// BeginMining kicks off mining for the specified miners. If nil or 0-length,
// it will kick off mining for all enrolled and active miners. It also adds a
// cleanup function to stop all mining operations on test teardown.

View File

@ -712,7 +712,7 @@
"CreateMiner",
"UpdateClaimedPower",
"EnrollCronEvent",
"OnEpochTickEnd",
"CronTick",
"UpdatePledgeTotal",
"SubmitPoRepForBulkVerify",
"CurrentTotalPower"
@ -728,6 +728,7 @@
"RemoveVerifier",
"AddVerifiedClient",
"UseBytes",
"RestoreBytes"
"RestoreBytes",
"RemoveVerifiedClientDataCap"
]
}

View File

@ -31,20 +31,28 @@ type SectorAccessor interface {
}
type minerAPI struct {
pieceStore piecestore.PieceStore
sa SectorAccessor
throttle throttle.Throttler
readyMgr *shared.ReadyManager
pieceStore piecestore.PieceStore
sa SectorAccessor
throttle throttle.Throttler
unsealThrottle throttle.Throttler
readyMgr *shared.ReadyManager
}
var _ MinerAPI = (*minerAPI)(nil)
func NewMinerAPI(store piecestore.PieceStore, sa SectorAccessor, concurrency int) MinerAPI {
func NewMinerAPI(store piecestore.PieceStore, sa SectorAccessor, concurrency int, unsealConcurrency int) MinerAPI {
var unsealThrottle throttle.Throttler
if unsealConcurrency == 0 {
unsealThrottle = throttle.Noop()
} else {
unsealThrottle = throttle.Fixed(unsealConcurrency)
}
return &minerAPI{
pieceStore: store,
sa: sa,
throttle: throttle.Fixed(concurrency),
readyMgr: shared.NewReadyManager(),
pieceStore: store,
sa: sa,
throttle: throttle.Fixed(concurrency),
unsealThrottle: unsealThrottle,
readyMgr: shared.NewReadyManager(),
}
}
@ -152,13 +160,19 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mo
}
lastErr := xerrors.New("no sectors found to unseal from")
// if there is no unsealed sector containing the piece, just read the piece from the first sector we are able to unseal.
for _, deal := range pieceInfo.Deals {
// Note that if the deal data is not already unsealed, unsealing may
// block for a long time with the current PoRep
//
// This path is unthrottled.
reader, err := m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
var reader mount.Reader
deal := deal
err := m.throttle.Do(ctx, func(ctx context.Context) (err error) {
// Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing.
reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
return err
})
if err != nil {
lastErr = xerrors.Errorf("failed to unseal deal %d: %w", deal.DealID, err)
log.Warn(lastErr.Error())

View File

@ -75,7 +75,7 @@ func TestLotusAccessorFetchUnsealedPiece(t *testing.T) {
rpn := &mockRPN{
sectors: mockData,
}
api := NewMinerAPI(ps, rpn, 100)
api := NewMinerAPI(ps, rpn, 100, 5)
require.NoError(t, api.Start(ctx))
// Add deals to piece store
@ -115,7 +115,7 @@ func TestLotusAccessorGetUnpaddedCARSize(t *testing.T) {
ps := getPieceStore(t)
rpn := &mockRPN{}
api := NewMinerAPI(ps, rpn, 100)
api := NewMinerAPI(ps, rpn, 100, 5)
require.NoError(t, api.Start(ctx))
// Add a deal with data Length 10
@ -142,7 +142,7 @@ func TestThrottle(t *testing.T) {
unsealedSectorID: "foo",
},
}
api := NewMinerAPI(ps, rpn, 3)
api := NewMinerAPI(ps, rpn, 3, 5)
require.NoError(t, api.Start(ctx))
// Add a deal with data Length 10

View File

@ -96,7 +96,7 @@ func TestShardRegistration(t *testing.T) {
cfg := config.DefaultStorageMiner().DAGStore
cfg.RootDir = t.TempDir()
mapi := NewMinerAPI(ps, &wrappedSA{sa}, 10)
mapi := NewMinerAPI(ps, &wrappedSA{sa}, 10, 5)
dagst, w, err := NewDAGStore(cfg, mapi)
require.NoError(t, err)
require.NotNil(t, dagst)

View File

@ -5,6 +5,7 @@ import (
"context"
"sync"
"github.com/filecoin-project/go-bitfield"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@ -110,7 +111,7 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context,
// Watch for a pre-commit message to the provider.
matchEvent := func(msg *types.Message) (bool, error) {
matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch)
matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch || msg.Method == miner.Methods.ProveReplicaUpdates)
return matched, nil
}
@ -145,6 +146,20 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context,
return false, err
}
// If this is a replica update method that succeeded the deal is active
if msg.Method == miner.Methods.ProveReplicaUpdates {
sn, err := dealSectorInReplicaUpdateSuccess(msg, rec, res)
if err != nil {
return false, err
}
if sn != nil {
cb(*sn, true, nil)
return false, nil
}
// Didn't find the deal ID in this message, so keep looking
return true, nil
}
// Extract the message parameters
sn, err := dealSectorInPreCommitMsg(msg, res)
if err != nil {
@ -264,6 +279,42 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr
return nil
}
func dealSectorInReplicaUpdateSuccess(msg *types.Message, rec *types.MessageReceipt, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) {
var params miner.ProveReplicaUpdatesParams
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return nil, xerrors.Errorf("unmarshal prove replica update: %w", err)
}
var seekUpdate miner.ReplicaUpdate
var found bool
for _, update := range params.Updates {
for _, did := range update.Deals {
if did == res.DealID {
seekUpdate = update
found = true
break
}
}
}
if !found {
return nil, nil
}
// check that this update passed validation steps
var successBf bitfield.BitField
if err := successBf.UnmarshalCBOR(bytes.NewReader(rec.Return)); err != nil {
return nil, xerrors.Errorf("unmarshal return value: %w", err)
}
success, err := successBf.IsSet(uint64(seekUpdate.SectorID))
if err != nil {
return nil, xerrors.Errorf("failed to check success of replica update: %w", err)
}
if !success {
return nil, xerrors.Errorf("replica update %d failed", seekUpdate.SectorID)
}
return &seekUpdate.SectorID, nil
}
// dealSectorInPreCommitMsg tries to find a sector containing the specified deal
func dealSectorInPreCommitMsg(msg *types.Message, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) {
switch msg.Method {

View File

@ -535,8 +535,12 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
prand := abi.PoStRandomness(rand)
tSeed := build.Clock.Now()
nv, err := m.api.StateNetworkVersion(ctx, base.TipSet.Key())
if err != nil {
return nil, err
}
postProof, err := m.epp.ComputeProof(ctx, mbi.Sectors, prand)
postProof, err := m.epp.ComputeProof(ctx, mbi.Sectors, prand, round, nv)
if err != nil {
err = xerrors.Errorf("failed to compute winning post proof: %w", err)
return nil, err

View File

@ -10,8 +10,7 @@ import (
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/lotus/chain/types"
)
@ -61,13 +60,22 @@ out:
return xerrors.Errorf("getting sector info: %w", err)
}
_, err = m.epp.ComputeProof(ctx, []proof2.SectorInfo{
ts, err := m.api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("getting chain head")
}
nv, err := m.api.StateNetworkVersion(ctx, ts.Key())
if err != nil {
return xerrors.Errorf("getting network version")
}
_, err = m.epp.ComputeProof(ctx, []proof7.ExtendedSectorInfo{
{
SealProof: si.SealProof,
SectorNumber: sector,
SealedCID: si.SealedCID,
},
}, r)
}, r, ts.Height(), nv)
if err != nil {
return xerrors.Errorf("failed to compute proof: %w", err)
}

View File

@ -109,10 +109,11 @@ func DefaultStorageMiner() *StorageMiner {
AvailableBalanceBuffer: types.FIL(big.Zero()),
DisableCollateralFallback: false,
BatchPreCommits: true,
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
BatchPreCommits: true,
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
// XXX snap deals wait deals slack if first
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(policy.GetMaxSectorExpirationExtension()) * uint64(time.Second)),
@ -131,11 +132,13 @@ func DefaultStorageMiner() *StorageMiner {
},
Storage: sectorstorage.SealerConfig{
AllowAddPiece: true,
AllowPreCommit1: true,
AllowPreCommit2: true,
AllowCommit: true,
AllowUnseal: true,
AllowAddPiece: true,
AllowPreCommit1: true,
AllowPreCommit2: true,
AllowCommit: true,
AllowUnseal: true,
AllowReplicaUpdate: true,
AllowProveReplicaUpdate2: true,
// Default to 10 - tcp should still be able to figure this out, and
// it's the ratio between 10gbit / 1gbit
@ -213,6 +216,7 @@ func DefaultStorageMiner() *StorageMiner {
DAGStore: DAGStoreConfig{
MaxConcurrentIndex: 5,
MaxConcurrencyStorageCalls: 100,
MaxConcurrentUnseals: 5,
GCInterval: Duration(1 * time.Minute),
},
}

View File

@ -162,6 +162,14 @@ Default value: 5.`,
Comment: `The maximum amount of unsealed deals that can be fetched simultaneously
from the storage subsystem. 0 means unlimited.
Default value: 0 (unlimited).`,
},
{
Name: "MaxConcurrentUnseals",
Type: "int",
Comment: `The maximum amount of unseals that can be processed simultaneously
from the storage subsystem. 0 means unlimited.
Default value: 0 (unlimited).`,
},
{

View File

@ -75,6 +75,11 @@ type DAGStoreConfig struct {
// Default value: 0 (unlimited).
MaxConcurrentReadyFetches int
// The maximum amount of unseals that can be processed simultaneously
// from the storage subsystem. 0 means unlimited.
// Default value: 0 (unlimited).
MaxConcurrentUnseals int
// The maximum number of simultaneous inflight API calls to the storage
// subsystem.
// Default value: 100.

View File

@ -37,6 +37,7 @@ import (
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
@ -386,8 +387,8 @@ func (sm *StorageMinerAPI) SectorPreCommitPending(ctx context.Context) ([]abi.Se
return sm.Miner.SectorPreCommitPending(ctx)
}
func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error {
return sm.Miner.MarkForUpgrade(id)
func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error {
return sm.Miner.MarkForUpgrade(ctx, id, snap)
}
func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) {
@ -398,6 +399,10 @@ func (sm *StorageMinerAPI) SectorCommitPending(ctx context.Context) ([]abi.Secto
return sm.Miner.CommitPending(ctx)
}
func (sm *StorageMinerAPI) SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error {
return sm.Miner.SectorMatchPendingPiecesToOpenSectors(ctx)
}
func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error {
w, err := connectRemoteWorker(ctx, sm, url)
if err != nil {
@ -1155,8 +1160,8 @@ func (sm *StorageMinerAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocume
return build.OpenRPCDiscoverJSON_Miner(), nil
}
func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) {
return sm.Epp.ComputeProof(ctx, ssi, rand)
func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv network.Version) ([]builtin.PoStProof, error) {
return sm.Epp.ComputeProof(ctx, ssi, rand, poStEpoch, nv)
}
func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubsystems, err error) {

View File

@ -38,7 +38,7 @@ func NewMinerAPI(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderP
}
}
mountApi := mdagstore.NewMinerAPI(pieceStore, sa, cfg.MaxConcurrencyStorageCalls)
mountApi := mdagstore.NewMinerAPI(pieceStore, sa, cfg.MaxConcurrencyStorageCalls, cfg.MaxConcurrentUnseals)
ready := make(chan error, 1)
pieceStore.OnReady(func(err error) {
ready <- err

View File

@ -112,6 +112,15 @@ func (s SealingAPIAdapter) StateMinerSectorAllocated(ctx context.Context, maddr
return s.delegate.StateMinerSectorAllocated(ctx, maddr, sid, tsk)
}
func (s SealingAPIAdapter) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
return nil, xerrors.Errorf("faile dto unmarshal TipSetToken to TipSetKey: %w", err)
}
return s.delegate.StateMinerActiveSectors(ctx, maddr, tsk)
}
func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (sealing.MsgLookup, error) {
wmsg, err := s.delegate.StateWaitMsg(ctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true)
if err != nil {

Some files were not shown because too many files have changed in this diff Show More