Merge pull request #6466 from filecoin-project/chore/merge-1.10

Merge 1.10 changes into master
This commit is contained in:
Łukasz Magiera 2021-06-14 17:58:10 +02:00 committed by GitHub
commit 5008914846
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 230 additions and 25 deletions

View File

@ -19,18 +19,18 @@ import (
var baseFeeUpperBoundFactor = types.NewInt(10) var baseFeeUpperBoundFactor = types.NewInt(10)
// CheckMessages performs a set of logic checks for a list of messages, prior to submitting it to the mpool // CheckMessages performs a set of logic checks for a list of messages, prior to submitting it to the mpool
func (mp *MessagePool) CheckMessages(protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) { func (mp *MessagePool) CheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
flex := make([]bool, len(protos)) flex := make([]bool, len(protos))
msgs := make([]*types.Message, len(protos)) msgs := make([]*types.Message, len(protos))
for i, p := range protos { for i, p := range protos {
flex[i] = !p.ValidNonce flex[i] = !p.ValidNonce
msgs[i] = &p.Message msgs[i] = &p.Message
} }
return mp.checkMessages(msgs, false, flex) return mp.checkMessages(ctx, msgs, false, flex)
} }
// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor // CheckPendingMessages performs a set of logical sets for all messages pending from a given actor
func (mp *MessagePool) CheckPendingMessages(from address.Address) ([][]api.MessageCheckStatus, error) { func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
var msgs []*types.Message var msgs []*types.Message
mp.lk.Lock() mp.lk.Lock()
mset, ok := mp.pending[from] mset, ok := mp.pending[from]
@ -49,12 +49,12 @@ func (mp *MessagePool) CheckPendingMessages(from address.Address) ([][]api.Messa
return msgs[i].Nonce < msgs[j].Nonce return msgs[i].Nonce < msgs[j].Nonce
}) })
return mp.checkMessages(msgs, true, nil) return mp.checkMessages(ctx, msgs, true, nil)
} }
// CheckReplaceMessages performs a set of logical checks for related messages while performing a // CheckReplaceMessages performs a set of logical checks for related messages while performing a
// replacement. // replacement.
func (mp *MessagePool) CheckReplaceMessages(replace []*types.Message) ([][]api.MessageCheckStatus, error) { func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*types.Message) ([][]api.MessageCheckStatus, error) {
msgMap := make(map[address.Address]map[uint64]*types.Message) msgMap := make(map[address.Address]map[uint64]*types.Message)
count := 0 count := 0
@ -94,12 +94,12 @@ func (mp *MessagePool) CheckReplaceMessages(replace []*types.Message) ([][]api.M
start = end start = end
} }
return mp.checkMessages(msgs, true, nil) return mp.checkMessages(ctx, msgs, true, nil)
} }
// flexibleNonces should be either nil or of len(msgs), it signifies that message at given index // flexibleNonces should be either nil or of len(msgs), it signifies that message at given index
// has non-determied nonce at this point // has non-determied nonce at this point
func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]api.MessageCheckStatus, err error) { func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]api.MessageCheckStatus, err error) {
if mp.api.IsLite() { if mp.api.IsLite() {
return nil, nil return nil, nil
} }
@ -160,7 +160,7 @@ func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexi
} else { } else {
mp.lk.Unlock() mp.lk.Unlock()
stateNonce, err := mp.getStateNonce(m.From, curTs) stateNonce, err := mp.getStateNonce(ctx, m.From, curTs)
if err != nil { if err != nil {
check.OK = false check.OK = false
check.Err = fmt.Sprintf("error retrieving state nonce: %s", err.Error()) check.Err = fmt.Sprintf("error retrieving state nonce: %s", err.Error())
@ -193,7 +193,7 @@ func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexi
balance, ok := balances[m.From] balance, ok := balances[m.From]
if !ok { if !ok {
balance, err = mp.getStateBalance(m.From, curTs) balance, err = mp.getStateBalance(ctx, m.From, curTs)
if err != nil { if err != nil {
check.OK = false check.OK = false
check.Err = fmt.Sprintf("error retrieving state balance: %s", err) check.Err = fmt.Sprintf("error retrieving state balance: %s", err)

View File

@ -34,6 +34,7 @@ import (
"github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/raulk/clock" "github.com/raulk/clock"
@ -577,7 +578,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
return nil return nil
} }
// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusio // verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusion
// and whether the message has enough funds to be included in the next 20 blocks. // and whether the message has enough funds to be included in the next 20 blocks.
// If the message is not valid for block inclusion, it returns an error. // If the message is not valid for block inclusion, it returns an error.
// For local messages, if the message can be included in the next 20 blocks, it returns true to // For local messages, if the message can be included in the next 20 blocks, it returns true to
@ -631,6 +632,9 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
} }
func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) { func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
done := metrics.Timer(ctx, metrics.MpoolPushDuration)
defer done()
err := mp.checkMessage(m) err := mp.checkMessage(m)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
@ -697,6 +701,9 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
} }
func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error { func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
done := metrics.Timer(ctx, metrics.MpoolAddDuration)
defer done()
err := mp.checkMessage(m) err := mp.checkMessage(m)
if err != nil { if err != nil {
return err return err
@ -752,7 +759,7 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error {
} }
func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error { func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error {
balance, err := mp.getStateBalance(m.Message.From, curTs) balance, err := mp.getStateBalance(ctx, m.Message.From, curTs)
if err != nil { if err != nil {
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure) return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure)
} }
@ -785,7 +792,10 @@ func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage,
} }
func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) { func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
snonce, err := mp.getStateNonce(m.Message.From, curTs) done := metrics.Timer(ctx, metrics.MpoolAddTsDuration)
defer done()
snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
if err != nil { if err != nil {
return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
} }
@ -833,7 +843,7 @@ func (mp *MessagePool) addLoaded(ctx context.Context, m *types.SignedMessage) er
return xerrors.Errorf("current tipset not loaded") return xerrors.Errorf("current tipset not loaded")
} }
snonce, err := mp.getStateNonce(m.Message.From, curTs) snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
if err != nil { if err != nil {
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
} }
@ -885,7 +895,7 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st
} }
if !ok { if !ok {
nonce, err := mp.getStateNonce(m.Message.From, mp.curTs) nonce, err := mp.getStateNonce(ctx, m.Message.From, mp.curTs)
if err != nil { if err != nil {
return xerrors.Errorf("failed to get initial actor nonce: %w", err) return xerrors.Errorf("failed to get initial actor nonce: %w", err)
} }
@ -946,7 +956,7 @@ func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types
} }
func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) { func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) {
stateNonce, err := mp.getStateNonce(addr, curTs) // sanity check stateNonce, err := mp.getStateNonce(ctx, addr, curTs) // sanity check
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -970,7 +980,10 @@ func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address,
return stateNonce, nil return stateNonce, nil
} }
func (mp *MessagePool) getStateNonce(addr address.Address, ts *types.TipSet) (uint64, error) { func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, ts *types.TipSet) (uint64, error) {
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
defer done()
act, err := mp.api.GetActorAfter(addr, ts) act, err := mp.api.GetActorAfter(addr, ts)
if err != nil { if err != nil {
return 0, err return 0, err
@ -979,7 +992,10 @@ func (mp *MessagePool) getStateNonce(addr address.Address, ts *types.TipSet) (ui
return act.Nonce, nil return act.Nonce, nil
} }
func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) { func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (types.BigInt, error) {
done := metrics.Timer(ctx, metrics.MpoolGetBalanceDuration)
defer done()
act, err := mp.api.GetActorAfter(addr, ts) act, err := mp.api.GetActorAfter(addr, ts)
if err != nil { if err != nil {
return types.EmptyInt, err return types.EmptyInt, err

View File

@ -507,6 +507,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
return mv.validateLocalMessage(ctx, msg) return mv.validateLocalMessage(ctx, msg)
} }
start := time.Now()
defer func() {
ms := time.Now().Sub(start).Microseconds()
stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000))
}()
stats.Record(ctx, metrics.MessageReceived.M(1)) stats.Record(ctx, metrics.MessageReceived.M(1))
m, err := types.DecodeSignedMessage(msg.Message.GetData()) m, err := types.DecodeSignedMessage(msg.Message.GetData())
if err != nil { if err != nil {
@ -538,6 +544,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
return pubsub.ValidationReject return pubsub.ValidationReject
} }
} }
ctx, _ = tag.New(
ctx,
tag.Upsert(metrics.MsgValid, "true"),
)
stats.Record(ctx, metrics.MessageValidationSuccess.M(1)) stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
return pubsub.ValidationAccept return pubsub.ValidationAccept
} }
@ -547,6 +559,13 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
ctx, ctx,
tag.Upsert(metrics.Local, "true"), tag.Upsert(metrics.Local, "true"),
) )
start := time.Now()
defer func() {
ms := time.Now().Sub(start).Microseconds()
stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000))
}()
// do some lightweight validation // do some lightweight validation
stats.Record(ctx, metrics.MessagePublished.M(1)) stats.Record(ctx, metrics.MessagePublished.M(1))
@ -581,6 +600,11 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
return pubsub.ValidationIgnore return pubsub.ValidationIgnore
} }
ctx, _ = tag.New(
ctx,
tag.Upsert(metrics.MsgValid, "true"),
)
stats.Record(ctx, metrics.MessageValidationSuccess.M(1)) stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
return pubsub.ValidationAccept return pubsub.ValidationAccept
} }

View File

@ -295,6 +295,7 @@ var stateList = []stateMeta{
{col: color.FgYellow, state: sealing.PreCommitBatchWait}, {col: color.FgYellow, state: sealing.PreCommitBatchWait},
{col: color.FgYellow, state: sealing.WaitSeed}, {col: color.FgYellow, state: sealing.WaitSeed},
{col: color.FgYellow, state: sealing.Committing}, {col: color.FgYellow, state: sealing.Committing},
{col: color.FgYellow, state: sealing.CommitFinalize},
{col: color.FgYellow, state: sealing.SubmitCommit}, {col: color.FgYellow, state: sealing.SubmitCommit},
{col: color.FgYellow, state: sealing.CommitWait}, {col: color.FgYellow, state: sealing.CommitWait},
{col: color.FgYellow, state: sealing.SubmitCommitAggregate}, {col: color.FgYellow, state: sealing.SubmitCommitAggregate},
@ -315,6 +316,7 @@ var stateList = []stateMeta{
{col: color.FgRed, state: sealing.PreCommitFailed}, {col: color.FgRed, state: sealing.PreCommitFailed},
{col: color.FgRed, state: sealing.ComputeProofFailed}, {col: color.FgRed, state: sealing.ComputeProofFailed},
{col: color.FgRed, state: sealing.CommitFailed}, {col: color.FgRed, state: sealing.CommitFailed},
{col: color.FgRed, state: sealing.CommitFinalizeFailed},
{col: color.FgRed, state: sealing.PackingFailed}, {col: color.FgRed, state: sealing.PackingFailed},
{col: color.FgRed, state: sealing.FinalizeFailed}, {col: color.FgRed, state: sealing.FinalizeFailed},
{col: color.FgRed, state: sealing.Faulty}, {col: color.FgRed, state: sealing.Faulty},

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit 1c7190dcc5bdef8042ca091129d6d3c10898dbdb Subproject commit 57a91e861d4858379b509db42603a9cbaf0421aa

View File

@ -103,6 +103,10 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorChainPreCommitFailed{}, PreCommitFailed),
), ),
Committing: planCommitting, Committing: planCommitting,
CommitFinalize: planOne(
on(SectorFinalized{}, SubmitCommit),
on(SectorFinalizeFailed{}, CommitFinalizeFailed),
),
SubmitCommit: planOne( SubmitCommit: planOne(
on(SectorCommitSubmitted{}, CommitWait), on(SectorCommitSubmitted{}, CommitWait),
on(SectorSubmitCommitAggregate{}, SubmitCommitAggregate), on(SectorSubmitCommitAggregate{}, SubmitCommitAggregate),
@ -151,6 +155,9 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorRetryComputeProof{}, Committing), on(SectorRetryComputeProof{}, Committing),
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
), ),
CommitFinalizeFailed: planOne(
on(SectorRetryFinalize{}, CommitFinalizeFailed),
),
CommitFailed: planOne( CommitFailed: planOne(
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
on(SectorRetryWaitSeed{}, WaitSeed), on(SectorRetryWaitSeed{}, WaitSeed),
@ -379,6 +386,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
fallthrough fallthrough
case CommitWait: case CommitWait:
return m.handleCommitWait, processed, nil return m.handleCommitWait, processed, nil
case CommitFinalize:
fallthrough
case FinalizeSector: case FinalizeSector:
return m.handleFinalizeSector, processed, nil return m.handleFinalizeSector, processed, nil
@ -393,6 +402,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
return m.handleComputeProofFailed, processed, nil return m.handleComputeProofFailed, processed, nil
case CommitFailed: case CommitFailed:
return m.handleCommitFailed, processed, nil return m.handleCommitFailed, processed, nil
case CommitFinalizeFailed:
fallthrough
case FinalizeFailed: case FinalizeFailed:
return m.handleFinalizeFailed, processed, nil return m.handleFinalizeFailed, processed, nil
case PackingFailed: // DEPRECATED: remove this for the next reset case PackingFailed: // DEPRECATED: remove this for the next reset
@ -482,6 +493,9 @@ func planCommitting(events []statemachine.Event, state *SectorInfo) (uint64, err
case SectorCommitted: // the normal case case SectorCommitted: // the normal case
e.apply(state) e.apply(state)
state.State = SubmitCommit state.State = SubmitCommit
case SectorProofReady: // early finalize
e.apply(state)
state.State = CommitFinalize
case SectorSeedReady: // seed changed :/ case SectorSeedReady: // seed changed :/
if e.SeedEpoch == state.SeedEpoch && bytes.Equal(e.SeedValue, state.SeedValue) { if e.SeedEpoch == state.SeedEpoch && bytes.Equal(e.SeedValue, state.SeedValue) {
log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change") log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change")

View File

@ -245,6 +245,15 @@ func (evt SectorCommitted) apply(state *SectorInfo) {
state.Proof = evt.Proof state.Proof = evt.Proof
} }
// like SectorCommitted, but finalizes before sending the proof to the chain
type SectorProofReady struct {
Proof []byte
}
func (evt SectorProofReady) apply(state *SectorInfo) {
state.Proof = evt.Proof
}
type SectorSubmitCommitAggregate struct{} type SectorSubmitCommitAggregate struct{}
func (evt SectorSubmitCommitAggregate) apply(*SectorInfo) {} func (evt SectorSubmitCommitAggregate) apply(*SectorInfo) {}

View File

@ -87,6 +87,73 @@ func TestHappyPath(t *testing.T) {
} }
} }
func TestHappyPathFinalizeEarly(t *testing.T) {
var notif []struct{ before, after SectorInfo }
ma, _ := address.NewIDAddress(55151)
m := test{
s: &Sealing{
maddr: ma,
stats: SectorStats{
bySector: map[abi.SectorID]statSectorState{},
},
notifee: func(before, after SectorInfo) {
notif = append(notif, struct{ before, after SectorInfo }{before, after})
},
},
t: t,
state: &SectorInfo{State: Packing},
}
m.planSingle(SectorPacked{})
require.Equal(m.t, m.state.State, GetTicket)
m.planSingle(SectorTicket{})
require.Equal(m.t, m.state.State, PreCommit1)
m.planSingle(SectorPreCommit1{})
require.Equal(m.t, m.state.State, PreCommit2)
m.planSingle(SectorPreCommit2{})
require.Equal(m.t, m.state.State, PreCommitting)
m.planSingle(SectorPreCommitted{})
require.Equal(m.t, m.state.State, PreCommitWait)
m.planSingle(SectorPreCommitLanded{})
require.Equal(m.t, m.state.State, WaitSeed)
m.planSingle(SectorSeedReady{})
require.Equal(m.t, m.state.State, Committing)
m.planSingle(SectorProofReady{})
require.Equal(m.t, m.state.State, CommitFinalize)
m.planSingle(SectorFinalized{})
require.Equal(m.t, m.state.State, SubmitCommit)
m.planSingle(SectorSubmitCommitAggregate{})
require.Equal(m.t, m.state.State, SubmitCommitAggregate)
m.planSingle(SectorCommitAggregateSent{})
require.Equal(m.t, m.state.State, CommitWait)
m.planSingle(SectorProving{})
require.Equal(m.t, m.state.State, FinalizeSector)
m.planSingle(SectorFinalized{})
require.Equal(m.t, m.state.State, Proving)
expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, SubmitCommitAggregate, CommitWait, FinalizeSector, Proving}
for i, n := range notif {
if n.before.State != expected[i] {
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
}
if n.after.State != expected[i+1] {
t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State)
}
}
}
func TestSeedRevert(t *testing.T) { func TestSeedRevert(t *testing.T) {
ma, _ := address.NewIDAddress(55151) ma, _ := address.NewIDAddress(55151)
m := test{ m := test{

View File

@ -18,6 +18,8 @@ type Config struct {
AlwaysKeepUnsealedCopy bool AlwaysKeepUnsealedCopy bool
FinalizeEarly bool
BatchPreCommits bool BatchPreCommits bool
MaxPreCommitBatch int MaxPreCommitBatch int
MinPreCommitBatch int MinPreCommitBatch int

View File

@ -17,6 +17,8 @@ var ExistSectorStateList = map[SectorState]struct{}{
PreCommitBatchWait: {}, PreCommitBatchWait: {},
WaitSeed: {}, WaitSeed: {},
Committing: {}, Committing: {},
CommitFinalize: {},
CommitFinalizeFailed: {},
SubmitCommit: {}, SubmitCommit: {},
CommitWait: {}, CommitWait: {},
SubmitCommitAggregate: {}, SubmitCommitAggregate: {},
@ -63,8 +65,10 @@ const (
SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch" SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch"
PreCommitBatchWait SectorState = "PreCommitBatchWait" PreCommitBatchWait SectorState = "PreCommitBatchWait"
WaitSeed SectorState = "WaitSeed" // waiting for seed WaitSeed SectorState = "WaitSeed" // waiting for seed
Committing SectorState = "Committing" // compute PoRep Committing SectorState = "Committing" // compute PoRep
CommitFinalize SectorState = "CommitFinalize" // cleanup sector metadata before submitting the proof (early finalize)
CommitFinalizeFailed SectorState = "CommitFinalizeFailed"
// single commit // single commit
SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain
@ -106,7 +110,7 @@ func toStatState(st SectorState) statSectorState {
switch st { switch st {
case UndefinedSectorState, Empty, WaitDeals, AddPiece: case UndefinedSectorState, Empty, WaitDeals, AddPiece:
return sstStaging return sstStaging
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector: case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector:
return sstSealing return sstSealing
case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed:
return sstProving return sstProving

View File

@ -478,6 +478,11 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
} }
} }
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting config: %w", err)
}
log.Info("scheduling seal proof computation...") log.Info("scheduling seal proof computation...")
log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD)
@ -500,6 +505,24 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)}) return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)})
} }
{
tok, _, err := m.api.ChainHead(ctx.Context())
if err != nil {
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
return nil
}
if err := m.checkCommit(ctx.Context(), sector, proof, tok); err != nil {
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("commit check error: %w", err)})
}
}
if cfg.FinalizeEarly {
return ctx.Send(SectorProofReady{
Proof: proof,
})
}
return ctx.Send(SectorCommitted{ return ctx.Send(SectorCommitted{
Proof: proof, Proof: proof,
}) })
@ -524,7 +547,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
tok, _, err := m.api.ChainHead(ctx.Context()) tok, _, err := m.api.ChainHead(ctx.Context())
if err != nil { if err != nil {
log.Errorf("handleCommitting: api error, not proceeding: %+v", err) log.Errorf("handleSubmitCommit: api error, not proceeding: %+v", err)
return nil return nil
} }

View File

@ -38,6 +38,7 @@ var (
MessageTo, _ = tag.NewKey("message_to") MessageTo, _ = tag.NewKey("message_to")
MessageNonce, _ = tag.NewKey("message_nonce") MessageNonce, _ = tag.NewKey("message_nonce")
ReceivedFrom, _ = tag.NewKey("received_from") ReceivedFrom, _ = tag.NewKey("received_from")
MsgValid, _ = tag.NewKey("message_valid")
Endpoint, _ = tag.NewKey("endpoint") Endpoint, _ = tag.NewKey("endpoint")
APIInterface, _ = tag.NewKey("api") // to distinguish between gateway api and full node api endpoint calls APIInterface, _ = tag.NewKey("api") // to distinguish between gateway api and full node api endpoint calls
@ -61,6 +62,12 @@ var (
MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless) MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless)
MessageValidationFailure = stats.Int64("message/failure", "Counter for message validation failures", stats.UnitDimensionless) MessageValidationFailure = stats.Int64("message/failure", "Counter for message validation failures", stats.UnitDimensionless)
MessageValidationSuccess = stats.Int64("message/success", "Counter for message validation successes", stats.UnitDimensionless) MessageValidationSuccess = stats.Int64("message/success", "Counter for message validation successes", stats.UnitDimensionless)
MessageValidationDuration = stats.Float64("message/validation_ms", "Duration of message validation", stats.UnitMilliseconds)
MpoolGetNonceDuration = stats.Float64("mpool/getnonce_ms", "Duration of getStateNonce in mpool", stats.UnitMilliseconds)
MpoolGetBalanceDuration = stats.Float64("mpool/getbalance_ms", "Duration of getStateBalance in mpool", stats.UnitMilliseconds)
MpoolAddTsDuration = stats.Float64("mpool/addts_ms", "Duration of addTs in mpool", stats.UnitMilliseconds)
MpoolAddDuration = stats.Float64("mpool/add_ms", "Duration of Add in mpool", stats.UnitMilliseconds)
MpoolPushDuration = stats.Float64("mpool/push_ms", "Duration of Push in mpool", stats.UnitMilliseconds)
BlockPublished = stats.Int64("block/published", "Counter for total locally published blocks", stats.UnitDimensionless) BlockPublished = stats.Int64("block/published", "Counter for total locally published blocks", stats.UnitDimensionless)
BlockReceived = stats.Int64("block/received", "Counter for total received blocks", stats.UnitDimensionless) BlockReceived = stats.Int64("block/received", "Counter for total received blocks", stats.UnitDimensionless)
BlockValidationFailure = stats.Int64("block/failure", "Counter for block validation failures", stats.UnitDimensionless) BlockValidationFailure = stats.Int64("block/failure", "Counter for block validation failures", stats.UnitDimensionless)
@ -170,6 +177,31 @@ var (
Measure: MessageValidationSuccess, Measure: MessageValidationSuccess,
Aggregation: view.Count(), Aggregation: view.Count(),
} }
MessageValidationDurationView = &view.View{
Measure: MessageValidationDuration,
Aggregation: defaultMillisecondsDistribution,
TagKeys: []tag.Key{MsgValid, Local},
}
MpoolGetNonceDurationView = &view.View{
Measure: MpoolGetNonceDuration,
Aggregation: defaultMillisecondsDistribution,
}
MpoolGetBalanceDurationView = &view.View{
Measure: MpoolGetBalanceDuration,
Aggregation: defaultMillisecondsDistribution,
}
MpoolAddTsDurationView = &view.View{
Measure: MpoolAddTsDuration,
Aggregation: defaultMillisecondsDistribution,
}
MpoolAddDurationView = &view.View{
Measure: MpoolAddDuration,
Aggregation: defaultMillisecondsDistribution,
}
MpoolPushDurationView = &view.View{
Measure: MpoolPushDuration,
Aggregation: defaultMillisecondsDistribution,
}
PeerCountView = &view.View{ PeerCountView = &view.View{
Measure: PeerCount, Measure: PeerCount,
Aggregation: view.LastValue(), Aggregation: view.LastValue(),
@ -313,6 +345,12 @@ var ChainNodeViews = append([]*view.View{
MessageReceivedView, MessageReceivedView,
MessageValidationFailureView, MessageValidationFailureView,
MessageValidationSuccessView, MessageValidationSuccessView,
MessageValidationDurationView,
MpoolGetNonceDurationView,
MpoolGetBalanceDurationView,
MpoolAddTsDurationView,
MpoolAddDurationView,
MpoolPushDurationView,
PubsubPublishMessageView, PubsubPublishMessageView,
PubsubDeliverMessageView, PubsubDeliverMessageView,
PubsubRejectMessageView, PubsubRejectMessageView,

View File

@ -86,6 +86,9 @@ type SealingConfig struct {
AlwaysKeepUnsealedCopy bool AlwaysKeepUnsealedCopy bool
// Run sector finalization before submitting sector proof to the chain
FinalizeEarly bool
// enable / disable precommit batching (takes effect after nv13) // enable / disable precommit batching (takes effect after nv13)
BatchPreCommits bool BatchPreCommits bool
// maximum precommit batch size - batches will be sent immediately above this size // maximum precommit batch size - batches will be sent immediately above this size
@ -279,6 +282,7 @@ func DefaultStorageMiner() *StorageMiner {
MaxSealingSectorsForDeals: 0, MaxSealingSectorsForDeals: 0,
WaitDealsDelay: Duration(time.Hour * 6), WaitDealsDelay: Duration(time.Hour * 6),
AlwaysKeepUnsealedCopy: true, AlwaysKeepUnsealedCopy: true,
FinalizeEarly: false,
BatchPreCommits: true, BatchPreCommits: true,
MinPreCommitBatch: 1, // we must have at least one precommit to batch MinPreCommitBatch: 1, // we must have at least one precommit to batch

View File

@ -226,15 +226,15 @@ func (a *MpoolAPI) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Mess
} }
func (a *MpoolAPI) MpoolCheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) { func (a *MpoolAPI) MpoolCheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
return a.Mpool.CheckMessages(protos) return a.Mpool.CheckMessages(ctx, protos)
} }
func (a *MpoolAPI) MpoolCheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) { func (a *MpoolAPI) MpoolCheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
return a.Mpool.CheckPendingMessages(from) return a.Mpool.CheckPendingMessages(ctx, from)
} }
func (a *MpoolAPI) MpoolCheckReplaceMessages(ctx context.Context, msgs []*types.Message) ([][]api.MessageCheckStatus, error) { func (a *MpoolAPI) MpoolCheckReplaceMessages(ctx context.Context, msgs []*types.Message) ([][]api.MessageCheckStatus, error) {
return a.Mpool.CheckReplaceMessages(msgs) return a.Mpool.CheckReplaceMessages(ctx, msgs)
} }
func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {

View File

@ -834,6 +834,7 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error
MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals, MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals,
WaitDealsDelay: config.Duration(cfg.WaitDealsDelay), WaitDealsDelay: config.Duration(cfg.WaitDealsDelay),
AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy, AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy,
FinalizeEarly: cfg.FinalizeEarly,
BatchPreCommits: cfg.BatchPreCommits, BatchPreCommits: cfg.BatchPreCommits,
MinPreCommitBatch: cfg.MinPreCommitBatch, MinPreCommitBatch: cfg.MinPreCommitBatch,
@ -865,6 +866,7 @@ func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error
MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals, MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals,
WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay), WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay),
AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy, AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy,
FinalizeEarly: cfg.Sealing.FinalizeEarly,
BatchPreCommits: cfg.Sealing.BatchPreCommits, BatchPreCommits: cfg.Sealing.BatchPreCommits,
MinPreCommitBatch: cfg.Sealing.MinPreCommitBatch, MinPreCommitBatch: cfg.Sealing.MinPreCommitBatch,