Merge branch 'master' into sbansal/nonce-coordination-and-consensus-for-chain-nodes

This commit is contained in:
Shrenuj Bansal 2022-09-12 16:23:24 -04:00
commit a1f2fdb706
197 changed files with 1195 additions and 406 deletions

View File

@ -314,6 +314,11 @@ type StorageMiner interface {
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
// RecoverFault can be used to declare recoveries manually. It sends messages
// to the miner actor with details of recovered sectors and returns the CID of messages. It honors the
// maxPartitionsPerRecoveryMessage from the config
RecoverFault(ctx context.Context, sectors []abi.SectorNumber) ([]cid.Cid, error) //perm:admin
} }
var _ storiface.WorkerReturn = *new(StorageMiner) var _ storiface.WorkerReturn = *new(StorageMiner)

View File

@ -775,6 +775,8 @@ type StorageMinerStruct struct {
PledgeSector func(p0 context.Context) (abi.SectorID, error) `perm:"write"` PledgeSector func(p0 context.Context) (abi.SectorID, error) `perm:"write"`
RecoverFault func(p0 context.Context, p1 []abi.SectorNumber) ([]cid.Cid, error) `perm:"admin"`
ReturnAddPiece func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"` ReturnAddPiece func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"`
ReturnDataCid func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"` ReturnDataCid func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"`
@ -4644,6 +4646,17 @@ func (s *StorageMinerStub) PledgeSector(p0 context.Context) (abi.SectorID, error
return *new(abi.SectorID), ErrNotSupported return *new(abi.SectorID), ErrNotSupported
} }
func (s *StorageMinerStruct) RecoverFault(p0 context.Context, p1 []abi.SectorNumber) ([]cid.Cid, error) {
if s.Internal.RecoverFault == nil {
return *new([]cid.Cid), ErrNotSupported
}
return s.Internal.RecoverFault(p0, p1)
}
func (s *StorageMinerStub) RecoverFault(p0 context.Context, p1 []abi.SectorNumber) ([]cid.Cid, error) {
return *new([]cid.Cid), ErrNotSupported
}
func (s *StorageMinerStruct) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error { func (s *StorageMinerStruct) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error {
if s.Internal.ReturnAddPiece == nil { if s.Internal.ReturnAddPiece == nil {
return ErrNotSupported return ErrNotSupported

View File

@ -519,6 +519,7 @@ func (s *SplitStore) applyProtectors() error {
// - At this point we are ready to begin purging: // - At this point we are ready to begin purging:
// - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references) // - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references)
// - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live // - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
//
// - We then end the transaction and compact/gc the hotstore. // - We then end the transaction and compact/gc the hotstore.
func (s *SplitStore) compact(curTs *types.TipSet) { func (s *SplitStore) compact(curTs *types.TipSet) {
log.Info("waiting for active views to complete") log.Info("waiting for active views to complete")

View File

@ -12,10 +12,9 @@ type unionBlockstore []Blockstore
// Union returns an unioned blockstore. // Union returns an unioned blockstore.
// //
// * Reads return from the first blockstore that has the value, querying in the // - Reads return from the first blockstore that has the value, querying in the
// supplied order. // supplied order.
// * Writes (puts and deletes) are broadcast to all stores. // - Writes (puts and deletes) are broadcast to all stores.
//
func Union(stores ...Blockstore) Blockstore { func Union(stores ...Blockstore) Blockstore {
return unionBlockstore(stores) return unionBlockstore(stores)
} }

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -5,7 +5,6 @@
// //
// Its purpose is to unlock various degrees of flexibility and parametrization // Its purpose is to unlock various degrees of flexibility and parametrization
// when writing Testground plans for Lotus. // when writing Testground plans for Lotus.
//
package build package build
import ( import (

View File

@ -23,6 +23,7 @@ type triggerID = uint64
type msgH = abi.ChainEpoch type msgH = abi.ChainEpoch
// triggerH is the block height at which the listener will be notified about the // triggerH is the block height at which the listener will be notified about the
//
// message (msgH+confidence) // message (msgH+confidence)
type triggerH = abi.ChainEpoch type triggerH = abi.ChainEpoch
@ -39,6 +40,7 @@ type EventHandler func(ctx context.Context, data eventData, prevTs, ts *types.Ti
// //
// If `done` is true, timeout won't be triggered // If `done` is true, timeout won't be triggered
// If `more` is false, no messages will be sent to EventHandler (RevertHandler // If `more` is false, no messages will be sent to EventHandler (RevertHandler
//
// may still be called) // may still be called)
type CheckFunc func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) type CheckFunc func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error)
@ -375,29 +377,29 @@ type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error)
// StateChanged registers a callback which is triggered when a specified state // StateChanged registers a callback which is triggered when a specified state
// change occurs or a timeout is reached. // change occurs or a timeout is reached.
// //
// * `CheckFunc` callback is invoked immediately with a recent tipset, it // - `CheckFunc` callback is invoked immediately with a recent tipset, it
// returns two booleans - `done`, and `more`. // returns two booleans - `done`, and `more`.
// //
// * `done` should be true when some on-chain state change we are waiting // - `done` should be true when some on-chain state change we are waiting
// for has happened. When `done` is set to true, timeout trigger is disabled. // for has happened. When `done` is set to true, timeout trigger is disabled.
// //
// * `more` should be false when we don't want to receive new notifications // - `more` should be false when we don't want to receive new notifications
// through StateChangeHandler. Note that notifications may still be delivered to // through StateChangeHandler. Note that notifications may still be delivered to
// RevertHandler // RevertHandler
// //
// * `StateChangeHandler` is called when the specified state change was observed // - `StateChangeHandler` is called when the specified state change was observed
// on-chain, and a confidence threshold was reached, or the specified `timeout` // on-chain, and a confidence threshold was reached, or the specified `timeout`
// height was reached with no state change observed. When this callback is // height was reached with no state change observed. When this callback is
// invoked on a timeout, `oldTs` and `states are set to nil. // invoked on a timeout, `oldTs` and `states are set to nil.
// This callback returns a boolean specifying whether further notifications // This callback returns a boolean specifying whether further notifications
// should be sent, like `more` return param from `CheckFunc` above. // should be sent, like `more` return param from `CheckFunc` above.
// //
// * `RevertHandler` is called after apply handler, when we drop the tipset // - `RevertHandler` is called after apply handler, when we drop the tipset
// containing the message. The tipset passed as the argument is the tipset // containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the event dropped may be re-applied // that is being dropped. Note that the event dropped may be re-applied
// in a different tipset in small amount of time. // in a different tipset in small amount of time.
// //
// * `StateMatchFunc` is called against each tipset state. If there is a match, // - `StateMatchFunc` is called against each tipset state. If there is a match,
// the state change is queued up until the confidence interval has elapsed (and // the state change is queued up until the confidence interval has elapsed (and
// `StateChangeHandler` is called) // `StateChangeHandler` is called)
func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error { func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error {
@ -503,31 +505,32 @@ type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.Ti
type MsgMatchFunc func(msg *types.Message) (matched bool, err error) type MsgMatchFunc func(msg *types.Message) (matched bool, err error)
// Called registers a callback which is triggered when a specified method is // Called registers a callback which is triggered when a specified method is
//
// called on an actor, or a timeout is reached. // called on an actor, or a timeout is reached.
// //
// * `CheckFunc` callback is invoked immediately with a recent tipset, it // - `CheckFunc` callback is invoked immediately with a recent tipset, it
// returns two booleans - `done`, and `more`. // returns two booleans - `done`, and `more`.
// //
// * `done` should be true when some on-chain action we are waiting for has // - `done` should be true when some on-chain action we are waiting for has
// happened. When `done` is set to true, timeout trigger is disabled. // happened. When `done` is set to true, timeout trigger is disabled.
// //
// * `more` should be false when we don't want to receive new notifications // - `more` should be false when we don't want to receive new notifications
// through MsgHandler. Note that notifications may still be delivered to // through MsgHandler. Note that notifications may still be delivered to
// RevertHandler // RevertHandler
// //
// * `MsgHandler` is called when the specified event was observed on-chain, // - `MsgHandler` is called when the specified event was observed on-chain,
// and a confidence threshold was reached, or the specified `timeout` height // and a confidence threshold was reached, or the specified `timeout` height
// was reached with no events observed. When this callback is invoked on a // was reached with no events observed. When this callback is invoked on a
// timeout, `msg` is set to nil. This callback returns a boolean specifying // timeout, `msg` is set to nil. This callback returns a boolean specifying
// whether further notifications should be sent, like `more` return param // whether further notifications should be sent, like `more` return param
// from `CheckFunc` above. // from `CheckFunc` above.
// //
// * `RevertHandler` is called after apply handler, when we drop the tipset // - `RevertHandler` is called after apply handler, when we drop the tipset
// containing the message. The tipset passed as the argument is the tipset // containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the message dropped may be re-applied // that is being dropped. Note that the message dropped may be re-applied
// in a different tipset in small amount of time. // in a different tipset in small amount of time.
// //
// * `MsgMatchFunc` is called against each message. If there is a match, the // - `MsgMatchFunc` is called against each message. If there is a match, the
// message is queued up until the confidence interval has elapsed (and // message is queued up until the confidence interval has elapsed (and
// `MsgHandler` is called) // `MsgHandler` is called)
func (me *messageEvents) Called(ctx context.Context, check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error { func (me *messageEvents) Called(ctx context.Context, check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error {

View File

@ -21,6 +21,7 @@ const (
) )
// FIXME: Bumped from original 800 to this to accommodate `syncFork()` // FIXME: Bumped from original 800 to this to accommodate `syncFork()`
//
// use of `GetBlocks()`. It seems the expectation of that API is to // use of `GetBlocks()`. It seems the expectation of that API is to
// fetch any amount of blocks leaving it to the internal logic here // fetch any amount of blocks leaving it to the internal logic here
// to partition and reassemble the requests if they go above the maximum. // to partition and reassemble the requests if they go above the maximum.
@ -147,10 +148,11 @@ type BSTipSet struct {
// `BlsIncludes`/`SecpkIncludes` matches `Bls`/`Secpk` messages // `BlsIncludes`/`SecpkIncludes` matches `Bls`/`Secpk` messages
// to blocks in the tipsets with the format: // to blocks in the tipsets with the format:
// `BlsIncludes[BI][MI]` // `BlsIncludes[BI][MI]`
// * BI: block index in the tipset. // - BI: block index in the tipset.
// * MI: message index in `Bls` list // - MI: message index in `Bls` list
// //
// FIXME: The logic to decompress this structure should belong // FIXME: The logic to decompress this structure should belong
//
// to itself, not to the consumer. // to itself, not to the consumer.
type CompactedMessages struct { type CompactedMessages struct {
Bls []*types.Message Bls []*types.Message

View File

@ -9,10 +9,15 @@ import (
// WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer. // WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer.
// minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will // minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will
//
// wait for that long to coalesce more head changes. // wait for that long to coalesce more head changes.
//
// maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change // maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change
//
// more than that. // more than that.
//
// mergeInterval is the interval that triggers additional coalesce delay; if the last head change was // mergeInterval is the interval that triggers additional coalesce delay; if the last head change was
//
// within the merge interval when the coalesce timer fires, then the coalesce time is extended // within the merge interval when the coalesce timer fires, then the coalesce time is extended
// by min delay and up to max delay total. // by min delay and up to max delay total.
func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee { func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee {

View File

@ -453,6 +453,7 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
// The "fast forward" case is covered in this logic as a valid fork of length 0. // The "fast forward" case is covered in this logic as a valid fork of length 0.
// //
// FIXME: We may want to replace some of the logic in `syncFork()` with this. // FIXME: We may want to replace some of the logic in `syncFork()` with this.
//
// `syncFork()` counts the length on both sides of the fork at the moment (we // `syncFork()` counts the length on both sides of the fork at the moment (we
// need to settle on that) but here we just enforce it on the `synced` side. // need to settle on that) but here we just enforce it on the `synced` side.
func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) { func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) {

View File

@ -159,8 +159,11 @@ func FetchSignedMessagesByCids(
} }
// Fetch `cids` from the block service, apply `cb` on each of them. Used // Fetch `cids` from the block service, apply `cb` on each of them. Used
//
// by the fetch message functions above. // by the fetch message functions above.
//
// We check that each block is received only once and we do not received // We check that each block is received only once and we do not received
//
// blocks we did not request. // blocks we did not request.
func fetchCids( func fetchCids(
ctx context.Context, ctx context.Context,

View File

@ -60,14 +60,14 @@ var (
// Syncer is in charge of running the chain synchronization logic. As such, it // Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others: // is tasked with these functions, amongst others:
// //
// * Fast-forwards the chain as it learns of new TipSets from the network via // - Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager. // the SyncManager.
// * Applies the fork choice rule to select the correct side when confronted // - Applies the fork choice rule to select the correct side when confronted
// with a fork in the network. // with a fork in the network.
// * Requests block headers and messages from other peers when not available // - Requests block headers and messages from other peers when not available
// in our BlockStore. // in our BlockStore.
// * Tracks blocks marked as bad in a cache. // - Tracks blocks marked as bad in a cache.
// * Keeps the BlockStore and ChainStore consistent with our view of the world, // - Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been // the latter of which in turn informs other components when a reorg has been
// committed. // committed.
// //

View File

@ -99,11 +99,11 @@ func tipsetSortFunc(blks []*BlockHeader) func(i, j int) bool {
} }
// Checks: // Checks:
// * A tipset is composed of at least one block. (Because of our variable // - A tipset is composed of at least one block. (Because of our variable
// number of blocks per tipset, determined by randomness, we do not impose // number of blocks per tipset, determined by randomness, we do not impose
// an upper limit.) // an upper limit.)
// * All blocks have the same height. // - All blocks have the same height.
// * All blocks have the same parents (same number of them and matching CIDs). // - All blocks have the same parents (same number of them and matching CIDs).
func NewTipSet(blks []*BlockHeader) (*TipSet, error) { func NewTipSet(blks []*BlockHeader) (*TipSet, error) {
if len(blks) == 0 { if len(blks) == 0 {
return nil, xerrors.Errorf("NewTipSet called with zero length array of blocks") return nil, xerrors.Errorf("NewTipSet called with zero length array of blocks")

View File

@ -69,6 +69,7 @@ var CommonCommands = []*cli.Command{
var Commands = []*cli.Command{ var Commands = []*cli.Command{
WithCategory("basic", sendCmd), WithCategory("basic", sendCmd),
WithCategory("basic", walletCmd), WithCategory("basic", walletCmd),
WithCategory("basic", infoCmd),
WithCategory("basic", clientCmd), WithCategory("basic", clientCmd),
WithCategory("basic", multisigCmd), WithCategory("basic", multisigCmd),
WithCategory("basic", filplusCmd), WithCategory("basic", filplusCmd),

230
cli/info.go Normal file
View File

@ -0,0 +1,230 @@
package cli
import (
"context"
"fmt"
"math"
"os"
"sort"
"strings"
"text/tabwriter"
"time"
"github.com/dustin/go-humanize"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
)
var infoCmd = &cli.Command{
Name: "info",
Usage: "Print node info",
Action: infoCmdAct,
}
func infoCmdAct(cctx *cli.Context) error {
fullapi, acloser, err := GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer acloser()
ctx := ReqContext(cctx)
network, err := fullapi.StateGetNetworkParams(ctx)
if err != nil {
return err
}
fmt.Printf("Network: %s\n", network.NetworkName)
fmt.Print("Chain: ")
err = SyncBasefeeCheck(ctx, fullapi)
if err != nil {
return err
}
status, err := fullapi.NodeStatus(ctx, true)
if err != nil {
return err
}
fmt.Printf(" [epoch %s]\n", color.MagentaString(("%d"), status.SyncStatus.Epoch))
fmt.Printf("Peers to: [publish messages %d] [publish blocks %d]\n", status.PeerStatus.PeersToPublishMsgs, status.PeerStatus.PeersToPublishBlocks)
//Chain health calculated as percentage: amount of blocks in last finality / very healthy amount of blocks in a finality (900 epochs * 5 blocks per tipset)
health := (100 * (900 * status.ChainStatus.BlocksPerTipsetLastFinality) / (900 * 5))
switch {
case health > 85:
fmt.Printf("Chain health: %.f%% [%s]\n", health, color.GreenString("healthy"))
case health < 85:
fmt.Printf("Chain health: %.f%% [%s]\n", health, color.RedString("unhealthy"))
}
fmt.Println()
addr, err := fullapi.WalletDefaultAddress(ctx)
if err == nil {
fmt.Printf("Default address: \n")
balance, err := fullapi.WalletBalance(ctx, addr)
if err != nil {
return err
}
fmt.Printf(" %s [%s]\n", addr.String(), types.FIL(balance).Short())
} else {
fmt.Printf("Default address: address not set\n")
}
fmt.Println()
addrs, err := fullapi.WalletList(ctx)
if err != nil {
return err
}
totalBalance := big.Zero()
for _, addr := range addrs {
totbal, err := fullapi.WalletBalance(ctx, addr)
if err != nil {
return err
}
totalBalance = big.Add(totalBalance, totbal)
}
switch {
case len(addrs) <= 1:
fmt.Printf("Wallet: %v address\n", len(addrs))
case len(addrs) > 1:
fmt.Printf("Wallet: %v addresses\n", len(addrs))
}
fmt.Printf(" Total balance: %s\n", types.FIL(totalBalance).Short())
mbLockedSum := big.Zero()
mbAvailableSum := big.Zero()
for _, addr := range addrs {
mbal, err := fullapi.StateMarketBalance(ctx, addr, types.EmptyTSK)
if err != nil {
if strings.Contains(err.Error(), "actor not found") {
continue
} else {
return err
}
}
mbLockedSum = big.Add(mbLockedSum, mbal.Locked)
mbAvailableSum = big.Add(mbAvailableSum, mbal.Escrow)
}
fmt.Printf(" Market locked: %s\n", types.FIL(mbLockedSum).Short())
fmt.Printf(" Market available: %s\n", types.FIL(mbAvailableSum).Short())
fmt.Println()
chs, err := fullapi.PaychList(ctx)
if err != nil {
return err
}
switch {
case len(chs) <= 1:
fmt.Printf("Payment Channels: %v channel\n", len(chs))
case len(chs) > 1:
fmt.Printf("Payment Channels: %v channels\n", len(chs))
}
fmt.Println()
localDeals, err := fullapi.ClientListDeals(ctx)
if err != nil {
return err
}
var totalSize uint64
byState := map[storagemarket.StorageDealStatus][]uint64{}
for _, deal := range localDeals {
totalSize += deal.Size
byState[deal.State] = append(byState[deal.State], deal.Size)
}
fmt.Printf("Deals: %d, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize)))
type stateStat struct {
state storagemarket.StorageDealStatus
count int
bytes uint64
}
stateStats := make([]stateStat, 0, len(byState))
for state, deals := range byState {
if state == storagemarket.StorageDealActive {
state = math.MaxUint64 // for sort
}
st := stateStat{
state: state,
count: len(deals),
}
for _, b := range deals {
st.bytes += b
}
stateStats = append(stateStats, st)
}
sort.Slice(stateStats, func(i, j int) bool {
return int64(stateStats[i].state) < int64(stateStats[j].state)
})
for _, st := range stateStats {
if st.state == math.MaxUint64 {
st.state = storagemarket.StorageDealActive
}
fmt.Printf(" %s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes)))
}
fmt.Println()
tw := tabwriter.NewWriter(os.Stdout, 6, 6, 2, ' ', 0)
s, err := fullapi.NetBandwidthStats(ctx)
if err != nil {
return err
}
fmt.Printf("Bandwidth:\n")
fmt.Fprintf(tw, "\tTotalIn\tTotalOut\tRateIn\tRateOut\n")
fmt.Fprintf(tw, "\t%s\t%s\t%s/s\t%s/s\n", humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut)))
return tw.Flush()
}
func SyncBasefeeCheck(ctx context.Context, fullapi v1api.FullNode) error {
head, err := fullapi.ChainHead(ctx)
if err != nil {
return err
}
switch {
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs
fmt.Printf("[%s]", color.GreenString("sync ok"))
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs
fmt.Printf("[%s]", color.YellowString("sync slow (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
default:
fmt.Printf("[%s]", color.RedString("sync behind! (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
}
basefee := head.MinTicketBlock().ParentBaseFee
gasCol := []color.Attribute{color.FgBlue}
switch {
case basefee.GreaterThan(big.NewInt(7000_000_000)): // 7 nFIL
gasCol = []color.Attribute{color.BgRed, color.FgBlack}
case basefee.GreaterThan(big.NewInt(3000_000_000)): // 3 nFIL
gasCol = []color.Attribute{color.FgRed}
case basefee.GreaterThan(big.NewInt(750_000_000)): // 750 uFIL
gasCol = []color.Attribute{color.FgYellow}
case basefee.GreaterThan(big.NewInt(100_000_000)): // 100 uFIL
gasCol = []color.Attribute{color.FgGreen}
}
fmt.Printf(" [basefee %s]", color.New(gasCol...).Sprint(types.FIL(basefee).Short()))
return nil
}

View File

@ -3,6 +3,7 @@ package cli
import ( import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"strings"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -152,6 +153,9 @@ var sendCmd = &cli.Command{
sm, err := InteractiveSend(ctx, cctx, srv, proto) sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil { if err != nil {
if strings.Contains(err.Error(), "no current EF") {
return xerrors.Errorf("transaction rejected on ledger: %w", err)
}
return err return err
} }

View File

@ -55,6 +55,10 @@ var actorSetAddrsCmd = &cli.Command{
Aliases: []string{"set-addrs"}, Aliases: []string{"set-addrs"},
Usage: "set addresses that your miner can be publicly dialed on", Usage: "set addresses that your miner can be publicly dialed on",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
Usage: "optionally specify the account to send the message from",
},
&cli.Int64Flag{ &cli.Int64Flag{
Name: "gas-limit", Name: "gas-limit",
Usage: "set gas limit", Usage: "set gas limit",
@ -117,6 +121,25 @@ var actorSetAddrsCmd = &cli.Command{
return err return err
} }
fromAddr := minfo.Worker
if from := cctx.String("from"); from != "" {
addr, err := address.NewFromString(from)
if err != nil {
return err
}
fromAddr = addr
}
fromId, err := api.StateLookupID(ctx, fromAddr, types.EmptyTSK)
if err != nil {
return err
}
if !isController(minfo, fromId) {
return xerrors.Errorf("sender isn't a controller of miner: %s", fromId)
}
params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs}) params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs})
if err != nil { if err != nil {
return err return err
@ -126,7 +149,7 @@ var actorSetAddrsCmd = &cli.Command{
smsg, err := api.MpoolPushMessage(ctx, &types.Message{ smsg, err := api.MpoolPushMessage(ctx, &types.Message{
To: maddr, To: maddr,
From: minfo.Worker, From: fromId,
Value: types.NewInt(0), Value: types.NewInt(0),
GasLimit: gasLimit, GasLimit: gasLimit,
Method: builtin.MethodsMiner.ChangeMultiaddrs, Method: builtin.MethodsMiner.ChangeMultiaddrs,

View File

@ -25,7 +25,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
@ -70,7 +70,7 @@ func infoCmdAct(cctx *cli.Context) error {
} }
defer closer() defer closer()
fullapi, acloser, err := lcli.GetFullNodeAPI(cctx) fullapi, acloser, err := lcli.GetFullNodeAPIV1(cctx)
if err != nil { if err != nil {
return err return err
} }
@ -94,34 +94,11 @@ func infoCmdAct(cctx *cli.Context) error {
fmt.Print("Chain: ") fmt.Print("Chain: ")
head, err := fullapi.ChainHead(ctx) err = lcli.SyncBasefeeCheck(ctx, fullapi)
if err != nil { if err != nil {
return err return err
} }
switch {
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs
fmt.Printf("[%s]", color.GreenString("sync ok"))
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs
fmt.Printf("[%s]", color.YellowString("sync slow (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
default:
fmt.Printf("[%s]", color.RedString("sync behind! (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
}
basefee := head.MinTicketBlock().ParentBaseFee
gasCol := []color.Attribute{color.FgBlue}
switch {
case basefee.GreaterThan(big.NewInt(7000_000_000)): // 7 nFIL
gasCol = []color.Attribute{color.BgRed, color.FgBlack}
case basefee.GreaterThan(big.NewInt(3000_000_000)): // 3 nFIL
gasCol = []color.Attribute{color.FgRed}
case basefee.GreaterThan(big.NewInt(750_000_000)): // 750 uFIL
gasCol = []color.Attribute{color.FgYellow}
case basefee.GreaterThan(big.NewInt(100_000_000)): // 100 uFIL
gasCol = []color.Attribute{color.FgGreen}
}
fmt.Printf(" [basefee %s]", color.New(gasCol...).Sprint(types.FIL(basefee).Short()))
fmt.Println() fmt.Println()
alerts, err := minerApi.LogAlerts(ctx) alerts, err := minerApi.LogAlerts(ctx)
@ -152,7 +129,7 @@ func infoCmdAct(cctx *cli.Context) error {
return nil return nil
} }
func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.FullNode, nodeApi api.StorageMiner) error { func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v1api.FullNode, nodeApi api.StorageMiner) error {
maddr, err := getActorAddress(ctx, cctx) maddr, err := getActorAddress(ctx, cctx)
if err != nil { if err != nil {
return err return err
@ -615,7 +592,7 @@ func colorTokenAmount(format string, amount abi.TokenAmount) {
} }
} }
func producedBlocks(ctx context.Context, count int, maddr address.Address, napi v0api.FullNode) error { func producedBlocks(ctx context.Context, count int, maddr address.Address, napi v1api.FullNode) error {
var err error var err error
head, err := napi.ChainHead(ctx) head, err := napi.ChainHead(ctx)
if err != nil { if err != nil {

View File

@ -7,10 +7,12 @@ import (
"os" "os"
"strconv" "strconv"
"strings" "strings"
"sync"
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -19,6 +21,7 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -37,6 +40,7 @@ var provingCmd = &cli.Command{
provingCheckProvableCmd, provingCheckProvableCmd,
workersCmd(false), workersCmd(false),
provingComputeCmd, provingComputeCmd,
provingRecoverFaultsCmd,
}, },
} }
@ -644,3 +648,82 @@ It will not send any messages to the chain.`,
return nil return nil
}, },
} }
var provingRecoverFaultsCmd = &cli.Command{
Name: "recover-faults",
Usage: "Manually recovers faulty sectors on chain",
ArgsUsage: "<faulty sectors>",
Flags: []cli.Flag{
&cli.IntFlag{
Name: "confidence",
Usage: "number of block confirmations to wait for",
Value: int(build.MessageConfidence),
},
},
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() < 1 {
return xerrors.Errorf("must pass at least 1 sector number")
}
arglist := cctx.Args().Slice()
var sectors []abi.SectorNumber
for _, v := range arglist {
s, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return xerrors.Errorf("failed to convert sectors, please check the arguments: %w", err)
}
sectors = append(sectors, abi.SectorNumber(s))
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
msgs, err := nodeApi.RecoverFault(ctx, sectors)
if err != nil {
return err
}
// wait for msgs to get mined into a block
var wg sync.WaitGroup
wg.Add(len(msgs))
results := make(chan error, len(msgs))
for _, msg := range msgs {
go func(m cid.Cid) {
defer wg.Done()
wait, err := api.StateWaitMsg(ctx, m, uint64(cctx.Int("confidence")))
if err != nil {
results <- xerrors.Errorf("Timeout waiting for message to land on chain %s", wait.Message)
return
}
if wait.Receipt.ExitCode != 0 {
results <- xerrors.Errorf("Failed to execute message %s: %w", wait.Message, wait.Receipt.ExitCode.Error())
return
}
results <- nil
return
}(msg)
}
wg.Wait()
close(results)
for v := range results {
if v != nil {
fmt.Println("Failed to execute the message %w", v)
}
}
return nil
},
}

View File

@ -276,6 +276,7 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP
// messages that originate from secp256k senders, leaving all // messages that originate from secp256k senders, leaving all
// others untouched. // others untouched.
// TODO: generate a signature in the DSL so that it's encoded in // TODO: generate a signature in the DSL so that it's encoded in
//
// the test vector. // the test vector.
func toChainMsg(msg *types.Message) (ret types.ChainMsg) { func toChainMsg(msg *types.Message) (ret types.ChainMsg) {
ret = msg ret = msg

View File

@ -106,6 +106,8 @@
* [PiecesListPieces](#PiecesListPieces) * [PiecesListPieces](#PiecesListPieces)
* [Pledge](#Pledge) * [Pledge](#Pledge)
* [PledgeSector](#PledgeSector) * [PledgeSector](#PledgeSector)
* [Recover](#Recover)
* [RecoverFault](#RecoverFault)
* [Return](#Return) * [Return](#Return)
* [ReturnAddPiece](#ReturnAddPiece) * [ReturnAddPiece](#ReturnAddPiece)
* [ReturnDataCid](#ReturnDataCid) * [ReturnDataCid](#ReturnDataCid)
@ -2265,6 +2267,36 @@ Response:
} }
``` ```
## Recover
### RecoverFault
RecoverFault can be used to declare recoveries manually. It sends messages
to the miner actor with details of recovered sectors and returns the CID of messages. It honors the
maxPartitionsPerRecoveryMessage from the config
Perms: admin
Inputs:
```json
[
[
123,
124
]
]
```
Response:
```json
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
]
```
## Return ## Return
@ -4397,26 +4429,26 @@ Response:
}, },
"seal/v0/datacid": { "seal/v0/datacid": {
"0": { "0": {
"MinMemory": 2048, "MinMemory": 4294967296,
"MaxMemory": 2048, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 2048, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"1": { "1": {
"MinMemory": 8388608, "MinMemory": 4294967296,
"MaxMemory": 8388608, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 8388608, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"2": { "2": {
"MinMemory": 1073741824, "MinMemory": 4294967296,
"MaxMemory": 1073741824, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -4433,8 +4465,8 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"4": { "4": {
"MinMemory": 8589934592, "MinMemory": 4294967296,
"MaxMemory": 8589934592, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -4442,26 +4474,26 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"5": { "5": {
"MinMemory": 2048, "MinMemory": 4294967296,
"MaxMemory": 2048, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 2048, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"6": { "6": {
"MinMemory": 8388608, "MinMemory": 4294967296,
"MaxMemory": 8388608, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 8388608, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"7": { "7": {
"MinMemory": 1073741824, "MinMemory": 4294967296,
"MaxMemory": 1073741824, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -4478,8 +4510,8 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"9": { "9": {
"MinMemory": 8589934592, "MinMemory": 4294967296,
"MaxMemory": 8589934592, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,

View File

@ -579,26 +579,26 @@ Response:
}, },
"seal/v0/datacid": { "seal/v0/datacid": {
"0": { "0": {
"MinMemory": 2048, "MinMemory": 4294967296,
"MaxMemory": 2048, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 2048, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"1": { "1": {
"MinMemory": 8388608, "MinMemory": 4294967296,
"MaxMemory": 8388608, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 8388608, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"2": { "2": {
"MinMemory": 1073741824, "MinMemory": 4294967296,
"MaxMemory": 1073741824, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -615,8 +615,8 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"4": { "4": {
"MinMemory": 8589934592, "MinMemory": 4294967296,
"MaxMemory": 8589934592, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -624,26 +624,26 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"5": { "5": {
"MinMemory": 2048, "MinMemory": 4294967296,
"MaxMemory": 2048, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 2048, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"6": { "6": {
"MinMemory": 8388608, "MinMemory": 4294967296,
"MaxMemory": 8388608, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 8388608, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"7": { "7": {
"MinMemory": 1073741824, "MinMemory": 4294967296,
"MaxMemory": 1073741824, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -660,8 +660,8 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"9": { "9": {
"MinMemory": 8589934592, "MinMemory": 4294967296,
"MaxMemory": 8589934592, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,

View File

@ -2100,6 +2100,7 @@ COMMANDS:
check Check sectors provable check Check sectors provable
workers list workers workers list workers
compute Compute simulated proving tasks compute Compute simulated proving tasks
recover-faults Manually recovers faulty sectors on chain
help, h Shows a list of commands or help for one command help, h Shows a list of commands or help for one command
OPTIONS: OPTIONS:
@ -2210,6 +2211,19 @@ OPTIONS:
``` ```
``` ```
### lotus-miner proving recover-faults
```
NAME:
lotus-miner proving recover-faults - Manually recovers faulty sectors on chain
USAGE:
lotus-miner proving recover-faults [command options] <faulty sectors>
OPTIONS:
--confidence value number of block confirmations to wait for (default: 5)
```
## lotus-miner storage ## lotus-miner storage
``` ```
NAME: NAME:

View File

@ -18,6 +18,7 @@ COMMANDS:
BASIC: BASIC:
send Send funds between accounts send Send funds between accounts
wallet Manage wallet wallet Manage wallet
info Print node info
client Make deals, store data, retrieve data client Make deals, store data, retrieve data
msig Interact with a multisig wallet msig Interact with a multisig wallet
filplus Interact with the verified registry actor used by Filplus filplus Interact with the verified registry actor used by Filplus
@ -398,6 +399,22 @@ OPTIONS:
``` ```
## lotus info
```
NAME:
lotus info - Print node info
USAGE:
lotus info [command options] [arguments...]
CATEGORY:
BASIC
OPTIONS:
--help, -h show help (default: false)
```
## lotus client ## lotus client
``` ```
NAME: NAME:

View File

@ -109,7 +109,6 @@ func init() {
// kit.EnsembleMinimal() // kit.EnsembleMinimal()
// kit.EnsembleOneTwo() // kit.EnsembleOneTwo()
// kit.EnsembleTwoOne() // kit.EnsembleTwoOne()
//
type Ensemble struct { type Ensemble struct {
t *testing.T t *testing.T
bootstrapped bool bootstrapped bool

View File

@ -300,3 +300,170 @@ func TestWindowPostMaxSectorsRecoveryConfig(t *testing.T) {
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-1, int(sectors)) // -1 not recovered sector require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-1, int(sectors)) // -1 not recovered sector
} }
func TestWindowPostManualSectorsRecovery(t *testing.T) {
oldVal := wdpost.RecoveringSectorLimit
defer func() {
wdpost.RecoveringSectorLimit = oldVal
}()
wdpost.RecoveringSectorLimit = 1
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client, miner, ens := kit.EnsembleMinimal(t,
kit.LatestActorsAt(-1),
kit.MockProofs())
ens.InterconnectAll().BeginMining(2 * time.Millisecond)
nSectors := 10
miner.PledgeSectors(ctx, nSectors, 0, nil)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
mid, err := address.IDFromAddress(maddr)
require.NoError(t, err)
t.Log("Running one proving period")
waitUntil := di.Open + di.WPoStProvingPeriod
t.Logf("End for head.Height > %d", waitUntil)
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.DefaultPresealsPerBootstrapMiner)))
failed, err := client.StateMinerFaults(ctx, maddr, types.TipSetKey{})
require.NoError(t, err)
failedCount, err := failed.Count()
require.NoError(t, err)
require.Equal(t, failedCount, uint64(0))
t.Log("Drop some sectors")
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK)
require.NoError(t, err)
require.Greater(t, len(parts), 0)
secs := parts[0].AllSectors
n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)
var failedSectors []abi.SectorNumber
// Drop the partition
err = secs.ForEach(func(sid uint64) error {
failedSectors = append(failedSectors, abi.SectorNumber(sid))
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(mid),
Number: abi.SectorNumber(sid),
},
}, true)
})
require.NoError(t, err)
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
t.Log("Go through another PP, wait for sectors to become faulty")
waitUntil = di.Open + di.WPoStProvingPeriod
t.Logf("End for head.Height > %d", waitUntil)
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
failed, err = client.StateMinerFaults(ctx, maddr, types.TipSetKey{})
require.NoError(t, err)
failedCount, err = failed.Count()
require.NoError(t, err)
require.Equal(t, failedCount, uint64(2))
recovered, err := client.StateMinerRecoveries(ctx, maddr, types.TipSetKey{})
require.NoError(t, err)
recoveredCount, err := recovered.Count()
require.NoError(t, err)
require.Equal(t, recoveredCount, uint64(0))
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
t.Log("Make the sectors recoverable")
err = secs.ForEach(func(sid uint64) error {
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(mid),
Number: abi.SectorNumber(sid),
},
}, false)
})
require.NoError(t, err)
// Try to manually recover the sector
t.Log("Send recovery message")
_, err = miner.RecoverFault(ctx, failedSectors)
require.NoError(t, err)
currentHeight, err := client.ChainHead(ctx)
require.NoError(t, err)
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(currentHeight.Height()+abi.ChainEpoch(10)))
t.Logf("Now head.Height = %d", ts.Height())
failed, err = client.StateMinerFaults(ctx, maddr, types.TipSetKey{})
require.NoError(t, err)
failedCount, err = failed.Count()
require.NoError(t, err)
require.Equal(t, failedCount, uint64(2))
recovered, err = client.StateMinerRecoveries(ctx, maddr, types.TipSetKey{})
require.NoError(t, err)
recoveredCount, err = recovered.Count()
require.NoError(t, err)
require.Equal(t, recoveredCount, uint64(2))
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
t.Log("Go through another PP, wait for sectors to become faulty")
waitUntil = di.Open + di.WPoStProvingPeriod
t.Logf("End for head.Height > %d", waitUntil)
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
failed, err = client.StateMinerFaults(ctx, maddr, types.TipSetKey{})
require.NoError(t, err)
failedCount, err = failed.Count()
require.NoError(t, err)
require.Equal(t, failedCount, uint64(0))
recovered, err = client.StateMinerRecoveries(ctx, maddr, types.TipSetKey{})
require.NoError(t, err)
recoveredCount, err = recovered.Count()
require.NoError(t, err)
require.Equal(t, recoveredCount, uint64(0))
}

View File

@ -39,6 +39,7 @@ type ResolveOnce func(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, nam
// Resolver provides path resolution to IPFS // Resolver provides path resolution to IPFS
// It has a pointer to a DAGService, which is uses to resolve nodes. // It has a pointer to a DAGService, which is uses to resolve nodes.
// TODO: now that this is more modular, try to unify this code with the // TODO: now that this is more modular, try to unify this code with the
//
// the resolvers in namesys // the resolvers in namesys
type Resolver struct { type Resolver struct {
DAG ipld.NodeGetter DAG ipld.NodeGetter

View File

@ -27,10 +27,10 @@ func (e *pathError) Path() string {
} }
// A Path represents an ipfs content path: // A Path represents an ipfs content path:
// * /<cid>/path/to/file // - /<cid>/path/to/file
// * /ipfs/<cid> // - /ipfs/<cid>
// * /ipns/<cid>/path/to/folder // - /ipns/<cid>/path/to/folder
// * etc // - etc
type Path string type Path string
// ^^^ // ^^^

View File

@ -35,6 +35,7 @@ func NewLineCol(name string) Column {
} }
// Unlike text/tabwriter, this works with CLI escape codes, and allows for info // Unlike text/tabwriter, this works with CLI escape codes, and allows for info
//
// in separate lines // in separate lines
func New(cols ...Column) *TableWriter { func New(cols ...Column) *TableWriter {
return &TableWriter{ return &TableWriter{

View File

@ -49,6 +49,7 @@ import (
var log = logging.Logger("builder") var log = logging.Logger("builder")
// special is a type used to give keys to modules which // special is a type used to give keys to modules which
//
// can't really be identified by the returned type // can't really be identified by the returned type
type special struct{ id int } type special struct{ id int }
@ -73,6 +74,7 @@ var (
type invoke int type invoke int
// Invokes are called in the order they are defined. // Invokes are called in the order they are defined.
//
//nolint:golint //nolint:golint
const ( const (
// InitJournal at position 0 initializes the journal global var as soon as // InitJournal at position 0 initializes the journal global var as soon as

View File

@ -1323,6 +1323,27 @@ func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.Exten
return sm.Epp.ComputeProof(ctx, ssi, rand, poStEpoch, nv) return sm.Epp.ComputeProof(ctx, ssi, rand, poStEpoch, nv)
} }
func (sm *StorageMinerAPI) RecoverFault(ctx context.Context, sectors []abi.SectorNumber) ([]cid.Cid, error) {
allsectors, err := sm.Miner.ListSectors()
if err != nil {
return nil, xerrors.Errorf("could not get a list of all sectors from the miner: %w", err)
}
var found bool
for _, v := range sectors {
found = false
for _, s := range allsectors {
if v == s.SectorNumber {
found = true
break
}
}
if !found {
return nil, xerrors.Errorf("sectors %d not found in the sector list for miner", v)
}
}
return sm.WdPoSt.ManualFaultRecovery(ctx, sm.Miner.Address(), sectors)
}
func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubsystems, err error) { func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubsystems, err error) {
return sm.EnabledSubsystems, nil return sm.EnabledSubsystems, nil
} }

View File

@ -18,6 +18,7 @@ import (
) )
// TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting // TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting
//
// We should implement some wait-for-api logic // We should implement some wait-for-api logic
type ErrApi struct{ error } type ErrApi struct{ error }
@ -91,6 +92,7 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api
} }
// checkPrecommit checks that data commitment generated in the sealing process // checkPrecommit checks that data commitment generated in the sealing process
//
// matches pieces, and that the seal ticket isn't expired // matches pieces, and that the seal ticket isn't expired
func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, tsk types.TipSetKey, height abi.ChainEpoch, api SealingAPI) (err error) { func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, tsk types.TipSetKey, height abi.ChainEpoch, api SealingAPI) (err error) {
if err := checkPieces(ctx, maddr, si, api, false); err != nil { if err := checkPieces(ctx, maddr, si, api, false); err != nil {

View File

@ -261,7 +261,6 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) {
// those parameters and keys. To do this, run the following command: // those parameters and keys. To do this, run the following command:
// //
// go test -run=^TestDownloadParams // go test -run=^TestDownloadParams
//
func TestDownloadParams(t *testing.T) { func TestDownloadParams(t *testing.T) {
// defer requireFDsClosed(t, openFDs(t)) flaky likely cause of how go-embed works with param files // defer requireFDsClosed(t, openFDs(t)) flaky likely cause of how go-embed works with param files

View File

@ -11,6 +11,7 @@ import (
) )
// merge gaps between ranges which are close to each other // merge gaps between ranges which are close to each other
//
// TODO: more benchmarking to come up with more optimal number // TODO: more benchmarking to come up with more optimal number
const mergeGaps = 32 << 20 const mergeGaps = 32 << 20

View File

@ -96,6 +96,11 @@ func (a TaskType) WorkerType() string {
} }
} }
// SectorSized returns true if the task operates on a specific sector size
func (a TaskType) SectorSized() bool {
return a != TTDataCid
}
func (a TaskType) MuchLess(b TaskType) (bool, bool) { func (a TaskType) MuchLess(b TaskType) (bool, bool) {
oa, ob := order[a], order[b] oa, ob := order[a], order[b]
oneNegative := oa^ob < 0 oneNegative := oa^ob < 0

View File

@ -9,6 +9,7 @@ import (
) )
// ID identifies sector storage by UUID. One sector storage should map to one // ID identifies sector storage by UUID. One sector storage should map to one
//
// filesystem, local or networked / shared by multiple machines // filesystem, local or networked / shared by multiple machines
type ID string type ID string

View File

@ -6,6 +6,7 @@ import (
"strconv" "strconv"
"strings" "strings"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
@ -13,6 +14,8 @@ import (
"github.com/filecoin-project/lotus/storage/sealer/sealtasks" "github.com/filecoin-project/lotus/storage/sealer/sealtasks"
) )
var log = logging.Logger("resources")
type Resources struct { type Resources struct {
MinMemory uint64 `envname:"MIN_MEMORY"` // What Must be in RAM for decent perf MinMemory uint64 `envname:"MIN_MEMORY"` // What Must be in RAM for decent perf
MaxMemory uint64 `envname:"MAX_MEMORY"` // Memory required (swap + ram; peak memory usage during task execution) MaxMemory uint64 `envname:"MAX_MEMORY"` // Memory required (swap + ram; peak memory usage during task execution)
@ -32,7 +35,6 @@ type Resources struct {
} }
/* /*
Percent of threads to allocate to parallel tasks Percent of threads to allocate to parallel tasks
12 * 0.92 = 11 12 * 0.92 = 11
@ -41,7 +43,6 @@ type Resources struct {
32 * 0.92 = 29 32 * 0.92 = 29
64 * 0.92 = 58 64 * 0.92 = 58
128 * 0.92 = 117 128 * 0.92 = 117
*/ */
var ParallelNum uint64 = 92 var ParallelNum uint64 = 92
var ParallelDenom uint64 = 100 var ParallelDenom uint64 = 100
@ -572,7 +573,12 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
func init() { func init() {
ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately
ResourceTable[sealtasks.TTRegenSectorKey] = ResourceTable[sealtasks.TTReplicaUpdate] ResourceTable[sealtasks.TTRegenSectorKey] = ResourceTable[sealtasks.TTReplicaUpdate]
ResourceTable[sealtasks.TTDataCid] = ResourceTable[sealtasks.TTAddPiece]
// DataCid doesn't care about sector proof type; Use 32G AddPiece resource definition
ResourceTable[sealtasks.TTDataCid] = map[abi.RegisteredSealProof]Resources{}
for proof := range ResourceTable[sealtasks.TTAddPiece] {
ResourceTable[sealtasks.TTDataCid][proof] = ResourceTable[sealtasks.TTAddPiece][abi.RegisteredSealProof_StackedDrg32GiBV1]
}
// V1_1 is the same as V1 // V1_1 is the same as V1
for _, m := range ResourceTable { for _, m := range ResourceTable {
@ -609,6 +615,9 @@ func ParseResourceEnv(lookup func(key, def string) (string, bool)) (map[sealtask
} }
envval, found := lookup(taskType.Short()+"_"+shortSize+"_"+envname, fmt.Sprint(rr.Elem().Field(i).Interface())) envval, found := lookup(taskType.Short()+"_"+shortSize+"_"+envname, fmt.Sprint(rr.Elem().Field(i).Interface()))
if !found {
// see if a non-size-specific envvar is set
envval, found = lookup(taskType.Short()+"_"+envname, fmt.Sprint(rr.Elem().Field(i).Interface()))
if !found { if !found {
// special multicore SDR handling // special multicore SDR handling
if (taskType == sealtasks.TTPreCommit1 || taskType == sealtasks.TTUnseal) && envname == "MAX_PARALLELISM" { if (taskType == sealtasks.TTPreCommit1 || taskType == sealtasks.TTUnseal) && envname == "MAX_PARALLELISM" {
@ -626,6 +635,12 @@ func ParseResourceEnv(lookup func(key, def string) (string, bool)) (map[sealtask
continue continue
} }
} else {
if !taskType.SectorSized() {
log.Errorw("sector-size independent task resource var specified with sector-sized envvar", "env", taskType.Short()+"_"+shortSize+"_"+envname, "use", taskType.Short()+"_"+envname)
}
}
v := rr.Elem().Field(i).Addr().Interface() v := rr.Elem().Field(i).Addr().Interface()
switch fv := v.(type) { switch fv := v.(type) {
case *uint64: case *uint64:

View File

@ -12,9 +12,12 @@ import (
) )
func TestListResourceVars(t *testing.T) { func TestListResourceVars(t *testing.T) {
seen := map[string]struct{}{}
_, err := ParseResourceEnv(func(key, def string) (string, bool) { _, err := ParseResourceEnv(func(key, def string) (string, bool) {
if def != "" { _, s := seen[key]
if !s && def != "" {
fmt.Printf("%s=%s\n", key, def) fmt.Printf("%s=%s\n", key, def)
seen[key] = struct{}{}
} }
return "", false return "", false
@ -75,3 +78,44 @@ func TestListResourceSDRMulticoreOverride(t *testing.T) {
require.Equal(t, 9001, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) require.Equal(t, 9001, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
require.Equal(t, 9001, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) require.Equal(t, 9001, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
} }
func TestUnsizedSetAll(t *testing.T) {
rt, err := ParseResourceEnv(func(key, def string) (string, bool) {
if key == "UNS_MAX_PARALLELISM" {
return "2", true
}
return "", false
})
require.NoError(t, err)
require.Equal(t, 2, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
require.Equal(t, 2, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg32GiBV1].MaxParallelism)
require.Equal(t, 2, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg8MiBV1].MaxParallelism)
// check that defaults don't get mutated
require.Equal(t, 1, ResourceTable[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
}
func TestUnsizedNotPreferred(t *testing.T) {
rt, err := ParseResourceEnv(func(key, def string) (string, bool) {
if key == "DC_MAX_PARALLELISM" {
return "2", true
}
// test should also print a warning for DataCid as it's not sector-size dependent
if key == "DC_64G_MAX_PARALLELISM" {
return "1", true
}
return "", false
})
require.NoError(t, err)
require.Equal(t, 2, rt[sealtasks.TTDataCid][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
require.Equal(t, 2, rt[sealtasks.TTDataCid][stabi.RegisteredSealProof_StackedDrg32GiBV1].MaxParallelism)
require.Equal(t, 1, rt[sealtasks.TTDataCid][stabi.RegisteredSealProof_StackedDrg64GiBV1_1].MaxParallelism)
// check that defaults don't get mutated
require.Equal(t, 1, ResourceTable[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
}

View File

@ -717,3 +717,7 @@ func (s *WindowPoStScheduler) ComputePoSt(ctx context.Context, dlIdx uint64, ts
return s.runPoStCycle(ctx, true, *dl, ts) return s.runPoStCycle(ctx, true, *dl, ts)
} }
func (s *WindowPoStScheduler) ManualFaultRecovery(ctx context.Context, maddr address.Address, sectors []abi.SectorNumber) ([]cid.Cid, error) {
return s.declareManualRecoveries(ctx, maddr, sectors, types.TipSetKey{})
}

View File

@ -10,6 +10,7 @@ import (
"go.opencensus.io/trace" "go.opencensus.io/trace"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/builtin"
@ -49,6 +50,7 @@ func init() {
// on chain before returning. // on chain before returning.
// //
// TODO: the waiting should happen in the background. Right now this // TODO: the waiting should happen in the background. Right now this
//
// is blocking/delaying the actual generation and submission of WindowPoSts in // is blocking/delaying the actual generation and submission of WindowPoSts in
// this deadline! // this deadline!
func (s *WindowPoStScheduler) declareRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([][]miner.RecoveryDeclaration, []*types.SignedMessage, error) { func (s *WindowPoStScheduler) declareRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([][]miner.RecoveryDeclaration, []*types.SignedMessage, error) {
@ -205,6 +207,7 @@ func (s *WindowPoStScheduler) declareRecoveries(ctx context.Context, dlIdx uint6
// on chain before returning. // on chain before returning.
// //
// TODO: the waiting should happen in the background. Right now this // TODO: the waiting should happen in the background. Right now this
//
// is blocking/delaying the actual generation and submission of WindowPoSts in // is blocking/delaying the actual generation and submission of WindowPoSts in
// this deadline! // this deadline!
func (s *WindowPoStScheduler) declareFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) { func (s *WindowPoStScheduler) declareFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
@ -343,3 +346,111 @@ func (s *WindowPoStScheduler) asyncFaultRecover(di dline.Info, ts *types.TipSet)
} }
}() }()
} }
// declareRecoveries identifies sectors that were previously marked as faulty
// for our miner, but are now recovered (i.e. are now provable again) and
// still not reported as such.
//
// It then reports the recovery on chain via a `DeclareFaultsRecovered`
// message to our miner actor.
//
// This is always invoked ahead of time, before the deadline for the evaluated
// sectors arrives. That way, recoveries are declared in preparation for those
// sectors to be proven.
//
// If a declaration is made, it awaits for build.MessageConfidence confirmations
// on chain before returning.
func (s *WindowPoStScheduler) declareManualRecoveries(ctx context.Context, maddr address.Address, sectors []abi.SectorNumber, tsk types.TipSetKey) ([]cid.Cid, error) {
var RecoveryDecls []miner.RecoveryDeclaration
var RecoveryBatches [][]miner.RecoveryDeclaration
type ptx struct {
deadline uint64
partition uint64
}
smap := make(map[ptx][]uint64)
var mcids []cid.Cid
for _, sector := range sectors {
ptxID, err := s.api.StateSectorPartition(ctx, maddr, sector, types.TipSetKey{})
if err != nil {
return nil, xerrors.Errorf("failed to fetch partition and deadline details for sector %d: %w", sector, err)
}
ptxinfo := ptx{
deadline: ptxID.Deadline,
partition: ptxID.Partition,
}
slist := smap[ptxinfo]
sn := uint64(sector)
slist = append(slist, sn)
smap[ptxinfo] = slist
}
for i, v := range smap {
sectorinbit := bitfield.NewFromSet(v)
RecoveryDecls = append(RecoveryDecls, miner.RecoveryDeclaration{
Deadline: i.deadline,
Partition: i.partition,
Sectors: sectorinbit,
})
}
// Batch if maxPartitionsPerRecoveryMessage is set
if s.maxPartitionsPerRecoveryMessage > 0 {
// Create batched
for len(RecoveryDecls) > s.maxPartitionsPerPostMessage {
Batch := RecoveryDecls[len(RecoveryDecls)-s.maxPartitionsPerRecoveryMessage:]
RecoveryDecls = RecoveryDecls[:len(RecoveryDecls)-s.maxPartitionsPerPostMessage]
RecoveryBatches = append(RecoveryBatches, Batch)
}
// Add remaining as new batch
RecoveryBatches = append(RecoveryBatches, RecoveryDecls)
} else {
RecoveryBatches = append(RecoveryBatches, RecoveryDecls)
}
for _, Batch := range RecoveryBatches {
msg, err := s.manualRecoveryMsg(ctx, Batch)
if err != nil {
return nil, err
}
mcids = append(mcids, msg)
}
return mcids, nil
}
func (s *WindowPoStScheduler) manualRecoveryMsg(ctx context.Context, Recovery []miner.RecoveryDeclaration) (cid.Cid, error) {
params := &miner.DeclareFaultsRecoveredParams{
Recoveries: Recovery,
}
enc, aerr := actors.SerializeParams(params)
if aerr != nil {
return cid.Undef, xerrors.Errorf("could not serialize declare recoveries parameters: %w", aerr)
}
msg := &types.Message{
To: s.actor,
Method: builtin.MethodsMiner.DeclareFaultsRecovered,
Params: enc,
Value: types.NewInt(0),
}
spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
if err := s.prepareMessage(ctx, msg, spec); err != nil {
return cid.Undef, err
}
sm, err := s.api.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)})
if err != nil {
return cid.Undef, xerrors.Errorf("pushing message to mpool: %w", err)
}
return sm.Cid(), nil
}

View File

@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal"
@ -44,6 +45,7 @@ type NodeAPI interface {
StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error)
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error)
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*lminer.SectorLocation, error)
MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)

View File

@ -44,6 +44,7 @@ func getClientMode(groupSeq int64) ClientMode {
} }
// TODO Stress is currently WIP. We found blockers in Lotus that prevent us from // TODO Stress is currently WIP. We found blockers in Lotus that prevent us from
//
// making progress. See https://github.com/filecoin-project/lotus/issues/2297. // making progress. See https://github.com/filecoin-project/lotus/issues/2297.
func Stress(t *testkit.TestEnvironment) error { func Stress(t *testkit.TestEnvironment) error {
// Dispatch/forward non-client roles to defaults. // Dispatch/forward non-client roles to defaults.