Merge pull request #6466 from filecoin-project/chore/merge-1.10
Merge 1.10 changes into master
This commit is contained in:
commit
5008914846
@ -19,18 +19,18 @@ import (
|
||||
var baseFeeUpperBoundFactor = types.NewInt(10)
|
||||
|
||||
// CheckMessages performs a set of logic checks for a list of messages, prior to submitting it to the mpool
|
||||
func (mp *MessagePool) CheckMessages(protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
|
||||
func (mp *MessagePool) CheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
|
||||
flex := make([]bool, len(protos))
|
||||
msgs := make([]*types.Message, len(protos))
|
||||
for i, p := range protos {
|
||||
flex[i] = !p.ValidNonce
|
||||
msgs[i] = &p.Message
|
||||
}
|
||||
return mp.checkMessages(msgs, false, flex)
|
||||
return mp.checkMessages(ctx, msgs, false, flex)
|
||||
}
|
||||
|
||||
// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor
|
||||
func (mp *MessagePool) CheckPendingMessages(from address.Address) ([][]api.MessageCheckStatus, error) {
|
||||
func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
|
||||
var msgs []*types.Message
|
||||
mp.lk.Lock()
|
||||
mset, ok := mp.pending[from]
|
||||
@ -49,12 +49,12 @@ func (mp *MessagePool) CheckPendingMessages(from address.Address) ([][]api.Messa
|
||||
return msgs[i].Nonce < msgs[j].Nonce
|
||||
})
|
||||
|
||||
return mp.checkMessages(msgs, true, nil)
|
||||
return mp.checkMessages(ctx, msgs, true, nil)
|
||||
}
|
||||
|
||||
// CheckReplaceMessages performs a set of logical checks for related messages while performing a
|
||||
// replacement.
|
||||
func (mp *MessagePool) CheckReplaceMessages(replace []*types.Message) ([][]api.MessageCheckStatus, error) {
|
||||
func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*types.Message) ([][]api.MessageCheckStatus, error) {
|
||||
msgMap := make(map[address.Address]map[uint64]*types.Message)
|
||||
count := 0
|
||||
|
||||
@ -94,12 +94,12 @@ func (mp *MessagePool) CheckReplaceMessages(replace []*types.Message) ([][]api.M
|
||||
start = end
|
||||
}
|
||||
|
||||
return mp.checkMessages(msgs, true, nil)
|
||||
return mp.checkMessages(ctx, msgs, true, nil)
|
||||
}
|
||||
|
||||
// flexibleNonces should be either nil or of len(msgs), it signifies that message at given index
|
||||
// has non-determied nonce at this point
|
||||
func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]api.MessageCheckStatus, err error) {
|
||||
func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]api.MessageCheckStatus, err error) {
|
||||
if mp.api.IsLite() {
|
||||
return nil, nil
|
||||
}
|
||||
@ -160,7 +160,7 @@ func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexi
|
||||
} else {
|
||||
mp.lk.Unlock()
|
||||
|
||||
stateNonce, err := mp.getStateNonce(m.From, curTs)
|
||||
stateNonce, err := mp.getStateNonce(ctx, m.From, curTs)
|
||||
if err != nil {
|
||||
check.OK = false
|
||||
check.Err = fmt.Sprintf("error retrieving state nonce: %s", err.Error())
|
||||
@ -193,7 +193,7 @@ func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexi
|
||||
|
||||
balance, ok := balances[m.From]
|
||||
if !ok {
|
||||
balance, err = mp.getStateBalance(m.From, curTs)
|
||||
balance, err = mp.getStateBalance(ctx, m.From, curTs)
|
||||
if err != nil {
|
||||
check.OK = false
|
||||
check.Err = fmt.Sprintf("error retrieving state balance: %s", err)
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
|
||||
"github.com/raulk/clock"
|
||||
@ -577,7 +578,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusio
|
||||
// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusion
|
||||
// and whether the message has enough funds to be included in the next 20 blocks.
|
||||
// If the message is not valid for block inclusion, it returns an error.
|
||||
// For local messages, if the message can be included in the next 20 blocks, it returns true to
|
||||
@ -631,6 +632,9 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
|
||||
}
|
||||
|
||||
func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
|
||||
done := metrics.Timer(ctx, metrics.MpoolPushDuration)
|
||||
defer done()
|
||||
|
||||
err := mp.checkMessage(m)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
@ -697,6 +701,9 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
||||
}
|
||||
|
||||
func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
|
||||
done := metrics.Timer(ctx, metrics.MpoolAddDuration)
|
||||
defer done()
|
||||
|
||||
err := mp.checkMessage(m)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -752,7 +759,7 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error {
|
||||
}
|
||||
|
||||
func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error {
|
||||
balance, err := mp.getStateBalance(m.Message.From, curTs)
|
||||
balance, err := mp.getStateBalance(ctx, m.Message.From, curTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure)
|
||||
}
|
||||
@ -785,7 +792,10 @@ func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage,
|
||||
}
|
||||
|
||||
func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
|
||||
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
||||
done := metrics.Timer(ctx, metrics.MpoolAddTsDuration)
|
||||
defer done()
|
||||
|
||||
snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
||||
}
|
||||
@ -833,7 +843,7 @@ func (mp *MessagePool) addLoaded(ctx context.Context, m *types.SignedMessage) er
|
||||
return xerrors.Errorf("current tipset not loaded")
|
||||
}
|
||||
|
||||
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
||||
snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
||||
}
|
||||
@ -885,7 +895,7 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st
|
||||
}
|
||||
|
||||
if !ok {
|
||||
nonce, err := mp.getStateNonce(m.Message.From, mp.curTs)
|
||||
nonce, err := mp.getStateNonce(ctx, m.Message.From, mp.curTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get initial actor nonce: %w", err)
|
||||
}
|
||||
@ -946,7 +956,7 @@ func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types
|
||||
}
|
||||
|
||||
func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) {
|
||||
stateNonce, err := mp.getStateNonce(addr, curTs) // sanity check
|
||||
stateNonce, err := mp.getStateNonce(ctx, addr, curTs) // sanity check
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -970,7 +980,10 @@ func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address,
|
||||
return stateNonce, nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) getStateNonce(addr address.Address, ts *types.TipSet) (uint64, error) {
|
||||
func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, ts *types.TipSet) (uint64, error) {
|
||||
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
|
||||
defer done()
|
||||
|
||||
act, err := mp.api.GetActorAfter(addr, ts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -979,7 +992,10 @@ func (mp *MessagePool) getStateNonce(addr address.Address, ts *types.TipSet) (ui
|
||||
return act.Nonce, nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) {
|
||||
func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (types.BigInt, error) {
|
||||
done := metrics.Timer(ctx, metrics.MpoolGetBalanceDuration)
|
||||
defer done()
|
||||
|
||||
act, err := mp.api.GetActorAfter(addr, ts)
|
||||
if err != nil {
|
||||
return types.EmptyInt, err
|
||||
|
@ -507,6 +507,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return mv.validateLocalMessage(ctx, msg)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
ms := time.Now().Sub(start).Microseconds()
|
||||
stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000))
|
||||
}()
|
||||
|
||||
stats.Record(ctx, metrics.MessageReceived.M(1))
|
||||
m, err := types.DecodeSignedMessage(msg.Message.GetData())
|
||||
if err != nil {
|
||||
@ -538,6 +544,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
}
|
||||
|
||||
ctx, _ = tag.New(
|
||||
ctx,
|
||||
tag.Upsert(metrics.MsgValid, "true"),
|
||||
)
|
||||
|
||||
stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
|
||||
return pubsub.ValidationAccept
|
||||
}
|
||||
@ -547,6 +559,13 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
|
||||
ctx,
|
||||
tag.Upsert(metrics.Local, "true"),
|
||||
)
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
ms := time.Now().Sub(start).Microseconds()
|
||||
stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000))
|
||||
}()
|
||||
|
||||
// do some lightweight validation
|
||||
stats.Record(ctx, metrics.MessagePublished.M(1))
|
||||
|
||||
@ -581,6 +600,11 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
ctx, _ = tag.New(
|
||||
ctx,
|
||||
tag.Upsert(metrics.MsgValid, "true"),
|
||||
)
|
||||
|
||||
stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
|
||||
return pubsub.ValidationAccept
|
||||
}
|
||||
|
@ -295,6 +295,7 @@ var stateList = []stateMeta{
|
||||
{col: color.FgYellow, state: sealing.PreCommitBatchWait},
|
||||
{col: color.FgYellow, state: sealing.WaitSeed},
|
||||
{col: color.FgYellow, state: sealing.Committing},
|
||||
{col: color.FgYellow, state: sealing.CommitFinalize},
|
||||
{col: color.FgYellow, state: sealing.SubmitCommit},
|
||||
{col: color.FgYellow, state: sealing.CommitWait},
|
||||
{col: color.FgYellow, state: sealing.SubmitCommitAggregate},
|
||||
@ -315,6 +316,7 @@ var stateList = []stateMeta{
|
||||
{col: color.FgRed, state: sealing.PreCommitFailed},
|
||||
{col: color.FgRed, state: sealing.ComputeProofFailed},
|
||||
{col: color.FgRed, state: sealing.CommitFailed},
|
||||
{col: color.FgRed, state: sealing.CommitFinalizeFailed},
|
||||
{col: color.FgRed, state: sealing.PackingFailed},
|
||||
{col: color.FgRed, state: sealing.FinalizeFailed},
|
||||
{col: color.FgRed, state: sealing.Faulty},
|
||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
||||
Subproject commit 1c7190dcc5bdef8042ca091129d6d3c10898dbdb
|
||||
Subproject commit 57a91e861d4858379b509db42603a9cbaf0421aa
|
14
extern/storage-sealing/fsm.go
vendored
14
extern/storage-sealing/fsm.go
vendored
@ -103,6 +103,10 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
on(SectorChainPreCommitFailed{}, PreCommitFailed),
|
||||
),
|
||||
Committing: planCommitting,
|
||||
CommitFinalize: planOne(
|
||||
on(SectorFinalized{}, SubmitCommit),
|
||||
on(SectorFinalizeFailed{}, CommitFinalizeFailed),
|
||||
),
|
||||
SubmitCommit: planOne(
|
||||
on(SectorCommitSubmitted{}, CommitWait),
|
||||
on(SectorSubmitCommitAggregate{}, SubmitCommitAggregate),
|
||||
@ -151,6 +155,9 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
on(SectorRetryComputeProof{}, Committing),
|
||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||
),
|
||||
CommitFinalizeFailed: planOne(
|
||||
on(SectorRetryFinalize{}, CommitFinalizeFailed),
|
||||
),
|
||||
CommitFailed: planOne(
|
||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||
on(SectorRetryWaitSeed{}, WaitSeed),
|
||||
@ -379,6 +386,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
fallthrough
|
||||
case CommitWait:
|
||||
return m.handleCommitWait, processed, nil
|
||||
case CommitFinalize:
|
||||
fallthrough
|
||||
case FinalizeSector:
|
||||
return m.handleFinalizeSector, processed, nil
|
||||
|
||||
@ -393,6 +402,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
return m.handleComputeProofFailed, processed, nil
|
||||
case CommitFailed:
|
||||
return m.handleCommitFailed, processed, nil
|
||||
case CommitFinalizeFailed:
|
||||
fallthrough
|
||||
case FinalizeFailed:
|
||||
return m.handleFinalizeFailed, processed, nil
|
||||
case PackingFailed: // DEPRECATED: remove this for the next reset
|
||||
@ -482,6 +493,9 @@ func planCommitting(events []statemachine.Event, state *SectorInfo) (uint64, err
|
||||
case SectorCommitted: // the normal case
|
||||
e.apply(state)
|
||||
state.State = SubmitCommit
|
||||
case SectorProofReady: // early finalize
|
||||
e.apply(state)
|
||||
state.State = CommitFinalize
|
||||
case SectorSeedReady: // seed changed :/
|
||||
if e.SeedEpoch == state.SeedEpoch && bytes.Equal(e.SeedValue, state.SeedValue) {
|
||||
log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change")
|
||||
|
9
extern/storage-sealing/fsm_events.go
vendored
9
extern/storage-sealing/fsm_events.go
vendored
@ -245,6 +245,15 @@ func (evt SectorCommitted) apply(state *SectorInfo) {
|
||||
state.Proof = evt.Proof
|
||||
}
|
||||
|
||||
// like SectorCommitted, but finalizes before sending the proof to the chain
|
||||
type SectorProofReady struct {
|
||||
Proof []byte
|
||||
}
|
||||
|
||||
func (evt SectorProofReady) apply(state *SectorInfo) {
|
||||
state.Proof = evt.Proof
|
||||
}
|
||||
|
||||
type SectorSubmitCommitAggregate struct{}
|
||||
|
||||
func (evt SectorSubmitCommitAggregate) apply(*SectorInfo) {}
|
||||
|
67
extern/storage-sealing/fsm_test.go
vendored
67
extern/storage-sealing/fsm_test.go
vendored
@ -87,6 +87,73 @@ func TestHappyPath(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHappyPathFinalizeEarly(t *testing.T) {
|
||||
var notif []struct{ before, after SectorInfo }
|
||||
ma, _ := address.NewIDAddress(55151)
|
||||
m := test{
|
||||
s: &Sealing{
|
||||
maddr: ma,
|
||||
stats: SectorStats{
|
||||
bySector: map[abi.SectorID]statSectorState{},
|
||||
},
|
||||
notifee: func(before, after SectorInfo) {
|
||||
notif = append(notif, struct{ before, after SectorInfo }{before, after})
|
||||
},
|
||||
},
|
||||
t: t,
|
||||
state: &SectorInfo{State: Packing},
|
||||
}
|
||||
|
||||
m.planSingle(SectorPacked{})
|
||||
require.Equal(m.t, m.state.State, GetTicket)
|
||||
|
||||
m.planSingle(SectorTicket{})
|
||||
require.Equal(m.t, m.state.State, PreCommit1)
|
||||
|
||||
m.planSingle(SectorPreCommit1{})
|
||||
require.Equal(m.t, m.state.State, PreCommit2)
|
||||
|
||||
m.planSingle(SectorPreCommit2{})
|
||||
require.Equal(m.t, m.state.State, PreCommitting)
|
||||
|
||||
m.planSingle(SectorPreCommitted{})
|
||||
require.Equal(m.t, m.state.State, PreCommitWait)
|
||||
|
||||
m.planSingle(SectorPreCommitLanded{})
|
||||
require.Equal(m.t, m.state.State, WaitSeed)
|
||||
|
||||
m.planSingle(SectorSeedReady{})
|
||||
require.Equal(m.t, m.state.State, Committing)
|
||||
|
||||
m.planSingle(SectorProofReady{})
|
||||
require.Equal(m.t, m.state.State, CommitFinalize)
|
||||
|
||||
m.planSingle(SectorFinalized{})
|
||||
require.Equal(m.t, m.state.State, SubmitCommit)
|
||||
|
||||
m.planSingle(SectorSubmitCommitAggregate{})
|
||||
require.Equal(m.t, m.state.State, SubmitCommitAggregate)
|
||||
|
||||
m.planSingle(SectorCommitAggregateSent{})
|
||||
require.Equal(m.t, m.state.State, CommitWait)
|
||||
|
||||
m.planSingle(SectorProving{})
|
||||
require.Equal(m.t, m.state.State, FinalizeSector)
|
||||
|
||||
m.planSingle(SectorFinalized{})
|
||||
require.Equal(m.t, m.state.State, Proving)
|
||||
|
||||
expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, SubmitCommitAggregate, CommitWait, FinalizeSector, Proving}
|
||||
for i, n := range notif {
|
||||
if n.before.State != expected[i] {
|
||||
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
|
||||
}
|
||||
if n.after.State != expected[i+1] {
|
||||
t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeedRevert(t *testing.T) {
|
||||
ma, _ := address.NewIDAddress(55151)
|
||||
m := test{
|
||||
|
2
extern/storage-sealing/sealiface/config.go
vendored
2
extern/storage-sealing/sealiface/config.go
vendored
@ -18,6 +18,8 @@ type Config struct {
|
||||
|
||||
AlwaysKeepUnsealedCopy bool
|
||||
|
||||
FinalizeEarly bool
|
||||
|
||||
BatchPreCommits bool
|
||||
MaxPreCommitBatch int
|
||||
MinPreCommitBatch int
|
||||
|
10
extern/storage-sealing/sector_state.go
vendored
10
extern/storage-sealing/sector_state.go
vendored
@ -17,6 +17,8 @@ var ExistSectorStateList = map[SectorState]struct{}{
|
||||
PreCommitBatchWait: {},
|
||||
WaitSeed: {},
|
||||
Committing: {},
|
||||
CommitFinalize: {},
|
||||
CommitFinalizeFailed: {},
|
||||
SubmitCommit: {},
|
||||
CommitWait: {},
|
||||
SubmitCommitAggregate: {},
|
||||
@ -63,8 +65,10 @@ const (
|
||||
SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch"
|
||||
PreCommitBatchWait SectorState = "PreCommitBatchWait"
|
||||
|
||||
WaitSeed SectorState = "WaitSeed" // waiting for seed
|
||||
Committing SectorState = "Committing" // compute PoRep
|
||||
WaitSeed SectorState = "WaitSeed" // waiting for seed
|
||||
Committing SectorState = "Committing" // compute PoRep
|
||||
CommitFinalize SectorState = "CommitFinalize" // cleanup sector metadata before submitting the proof (early finalize)
|
||||
CommitFinalizeFailed SectorState = "CommitFinalizeFailed"
|
||||
|
||||
// single commit
|
||||
SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain
|
||||
@ -106,7 +110,7 @@ func toStatState(st SectorState) statSectorState {
|
||||
switch st {
|
||||
case UndefinedSectorState, Empty, WaitDeals, AddPiece:
|
||||
return sstStaging
|
||||
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector:
|
||||
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector:
|
||||
return sstSealing
|
||||
case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed:
|
||||
return sstProving
|
||||
|
25
extern/storage-sealing/states_sealing.go
vendored
25
extern/storage-sealing/states_sealing.go
vendored
@ -478,6 +478,11 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := m.getConfig()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting config: %w", err)
|
||||
}
|
||||
|
||||
log.Info("scheduling seal proof computation...")
|
||||
|
||||
log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD)
|
||||
@ -500,6 +505,24 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)})
|
||||
}
|
||||
|
||||
{
|
||||
tok, _, err := m.api.ChainHead(ctx.Context())
|
||||
if err != nil {
|
||||
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := m.checkCommit(ctx.Context(), sector, proof, tok); err != nil {
|
||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("commit check error: %w", err)})
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.FinalizeEarly {
|
||||
return ctx.Send(SectorProofReady{
|
||||
Proof: proof,
|
||||
})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorCommitted{
|
||||
Proof: proof,
|
||||
})
|
||||
@ -524,7 +547,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
|
||||
|
||||
tok, _, err := m.api.ChainHead(ctx.Context())
|
||||
if err != nil {
|
||||
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
|
||||
log.Errorf("handleSubmitCommit: api error, not proceeding: %+v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,7 @@ var (
|
||||
MessageTo, _ = tag.NewKey("message_to")
|
||||
MessageNonce, _ = tag.NewKey("message_nonce")
|
||||
ReceivedFrom, _ = tag.NewKey("received_from")
|
||||
MsgValid, _ = tag.NewKey("message_valid")
|
||||
Endpoint, _ = tag.NewKey("endpoint")
|
||||
APIInterface, _ = tag.NewKey("api") // to distinguish between gateway api and full node api endpoint calls
|
||||
|
||||
@ -61,6 +62,12 @@ var (
|
||||
MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless)
|
||||
MessageValidationFailure = stats.Int64("message/failure", "Counter for message validation failures", stats.UnitDimensionless)
|
||||
MessageValidationSuccess = stats.Int64("message/success", "Counter for message validation successes", stats.UnitDimensionless)
|
||||
MessageValidationDuration = stats.Float64("message/validation_ms", "Duration of message validation", stats.UnitMilliseconds)
|
||||
MpoolGetNonceDuration = stats.Float64("mpool/getnonce_ms", "Duration of getStateNonce in mpool", stats.UnitMilliseconds)
|
||||
MpoolGetBalanceDuration = stats.Float64("mpool/getbalance_ms", "Duration of getStateBalance in mpool", stats.UnitMilliseconds)
|
||||
MpoolAddTsDuration = stats.Float64("mpool/addts_ms", "Duration of addTs in mpool", stats.UnitMilliseconds)
|
||||
MpoolAddDuration = stats.Float64("mpool/add_ms", "Duration of Add in mpool", stats.UnitMilliseconds)
|
||||
MpoolPushDuration = stats.Float64("mpool/push_ms", "Duration of Push in mpool", stats.UnitMilliseconds)
|
||||
BlockPublished = stats.Int64("block/published", "Counter for total locally published blocks", stats.UnitDimensionless)
|
||||
BlockReceived = stats.Int64("block/received", "Counter for total received blocks", stats.UnitDimensionless)
|
||||
BlockValidationFailure = stats.Int64("block/failure", "Counter for block validation failures", stats.UnitDimensionless)
|
||||
@ -170,6 +177,31 @@ var (
|
||||
Measure: MessageValidationSuccess,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
MessageValidationDurationView = &view.View{
|
||||
Measure: MessageValidationDuration,
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
TagKeys: []tag.Key{MsgValid, Local},
|
||||
}
|
||||
MpoolGetNonceDurationView = &view.View{
|
||||
Measure: MpoolGetNonceDuration,
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
MpoolGetBalanceDurationView = &view.View{
|
||||
Measure: MpoolGetBalanceDuration,
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
MpoolAddTsDurationView = &view.View{
|
||||
Measure: MpoolAddTsDuration,
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
MpoolAddDurationView = &view.View{
|
||||
Measure: MpoolAddDuration,
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
MpoolPushDurationView = &view.View{
|
||||
Measure: MpoolPushDuration,
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
PeerCountView = &view.View{
|
||||
Measure: PeerCount,
|
||||
Aggregation: view.LastValue(),
|
||||
@ -313,6 +345,12 @@ var ChainNodeViews = append([]*view.View{
|
||||
MessageReceivedView,
|
||||
MessageValidationFailureView,
|
||||
MessageValidationSuccessView,
|
||||
MessageValidationDurationView,
|
||||
MpoolGetNonceDurationView,
|
||||
MpoolGetBalanceDurationView,
|
||||
MpoolAddTsDurationView,
|
||||
MpoolAddDurationView,
|
||||
MpoolPushDurationView,
|
||||
PubsubPublishMessageView,
|
||||
PubsubDeliverMessageView,
|
||||
PubsubRejectMessageView,
|
||||
|
@ -86,6 +86,9 @@ type SealingConfig struct {
|
||||
|
||||
AlwaysKeepUnsealedCopy bool
|
||||
|
||||
// Run sector finalization before submitting sector proof to the chain
|
||||
FinalizeEarly bool
|
||||
|
||||
// enable / disable precommit batching (takes effect after nv13)
|
||||
BatchPreCommits bool
|
||||
// maximum precommit batch size - batches will be sent immediately above this size
|
||||
@ -279,6 +282,7 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
MaxSealingSectorsForDeals: 0,
|
||||
WaitDealsDelay: Duration(time.Hour * 6),
|
||||
AlwaysKeepUnsealedCopy: true,
|
||||
FinalizeEarly: false,
|
||||
|
||||
BatchPreCommits: true,
|
||||
MinPreCommitBatch: 1, // we must have at least one precommit to batch
|
||||
|
@ -226,15 +226,15 @@ func (a *MpoolAPI) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Mess
|
||||
}
|
||||
|
||||
func (a *MpoolAPI) MpoolCheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
|
||||
return a.Mpool.CheckMessages(protos)
|
||||
return a.Mpool.CheckMessages(ctx, protos)
|
||||
}
|
||||
|
||||
func (a *MpoolAPI) MpoolCheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
|
||||
return a.Mpool.CheckPendingMessages(from)
|
||||
return a.Mpool.CheckPendingMessages(ctx, from)
|
||||
}
|
||||
|
||||
func (a *MpoolAPI) MpoolCheckReplaceMessages(ctx context.Context, msgs []*types.Message) ([][]api.MessageCheckStatus, error) {
|
||||
return a.Mpool.CheckReplaceMessages(msgs)
|
||||
return a.Mpool.CheckReplaceMessages(ctx, msgs)
|
||||
}
|
||||
|
||||
func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
|
||||
|
@ -834,6 +834,7 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error
|
||||
MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals,
|
||||
WaitDealsDelay: config.Duration(cfg.WaitDealsDelay),
|
||||
AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy,
|
||||
FinalizeEarly: cfg.FinalizeEarly,
|
||||
|
||||
BatchPreCommits: cfg.BatchPreCommits,
|
||||
MinPreCommitBatch: cfg.MinPreCommitBatch,
|
||||
@ -865,6 +866,7 @@ func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error
|
||||
MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals,
|
||||
WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay),
|
||||
AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy,
|
||||
FinalizeEarly: cfg.Sealing.FinalizeEarly,
|
||||
|
||||
BatchPreCommits: cfg.Sealing.BatchPreCommits,
|
||||
MinPreCommitBatch: cfg.Sealing.MinPreCommitBatch,
|
||||
|
Loading…
Reference in New Issue
Block a user