Merge branch 'master' into sbansal/nonce-coordination-and-consensus-for-chain-nodes

This commit is contained in:
Shrenuj Bansal 2022-09-12 16:23:24 -04:00
commit a1f2fdb706
197 changed files with 1195 additions and 406 deletions

View File

@ -314,6 +314,11 @@ type StorageMiner interface {
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
// RecoverFault can be used to declare recoveries manually. It sends messages
// to the miner actor with details of recovered sectors and returns the CID of messages. It honors the
// maxPartitionsPerRecoveryMessage from the config
RecoverFault(ctx context.Context, sectors []abi.SectorNumber) ([]cid.Cid, error) //perm:admin
} }
var _ storiface.WorkerReturn = *new(StorageMiner) var _ storiface.WorkerReturn = *new(StorageMiner)

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package api package api
import ( import (

View File

@ -775,6 +775,8 @@ type StorageMinerStruct struct {
PledgeSector func(p0 context.Context) (abi.SectorID, error) `perm:"write"` PledgeSector func(p0 context.Context) (abi.SectorID, error) `perm:"write"`
RecoverFault func(p0 context.Context, p1 []abi.SectorNumber) ([]cid.Cid, error) `perm:"admin"`
ReturnAddPiece func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"` ReturnAddPiece func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"`
ReturnDataCid func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"` ReturnDataCid func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"`
@ -4644,6 +4646,17 @@ func (s *StorageMinerStub) PledgeSector(p0 context.Context) (abi.SectorID, error
return *new(abi.SectorID), ErrNotSupported return *new(abi.SectorID), ErrNotSupported
} }
func (s *StorageMinerStruct) RecoverFault(p0 context.Context, p1 []abi.SectorNumber) ([]cid.Cid, error) {
if s.Internal.RecoverFault == nil {
return *new([]cid.Cid), ErrNotSupported
}
return s.Internal.RecoverFault(p0, p1)
}
func (s *StorageMinerStub) RecoverFault(p0 context.Context, p1 []abi.SectorNumber) ([]cid.Cid, error) {
return *new([]cid.Cid), ErrNotSupported
}
func (s *StorageMinerStruct) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error { func (s *StorageMinerStruct) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error {
if s.Internal.ReturnAddPiece == nil { if s.Internal.ReturnAddPiece == nil {
return ErrNotSupported return ErrNotSupported

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package api package api
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package badgerbs package badgerbs
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package badgerbs package badgerbs
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package splitstore package splitstore
import ( import (

View File

@ -519,6 +519,7 @@ func (s *SplitStore) applyProtectors() error {
// - At this point we are ready to begin purging: // - At this point we are ready to begin purging:
// - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references) // - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references)
// - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live // - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
//
// - We then end the transaction and compact/gc the hotstore. // - We then end the transaction and compact/gc the hotstore.
func (s *SplitStore) compact(curTs *types.TipSet) { func (s *SplitStore) compact(curTs *types.TipSet) {
log.Info("waiting for active views to complete") log.Info("waiting for active views to complete")

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package splitstore package splitstore
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package blockstore package blockstore
import ( import (

View File

@ -12,10 +12,9 @@ type unionBlockstore []Blockstore
// Union returns an unioned blockstore. // Union returns an unioned blockstore.
// //
// * Reads return from the first blockstore that has the value, querying in the // - Reads return from the first blockstore that has the value, querying in the
// supplied order. // supplied order.
// * Writes (puts and deletes) are broadcast to all stores. // - Writes (puts and deletes) are broadcast to all stores.
//
func Union(stores ...Blockstore) Blockstore { func Union(stores ...Blockstore) Blockstore {
return unionBlockstore(stores) return unionBlockstore(stores)
} }

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package blockstore package blockstore
import ( import (

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package build package build
import ( import (

View File

@ -5,7 +5,6 @@
// //
// Its purpose is to unlock various degrees of flexibility and parametrization // Its purpose is to unlock various degrees of flexibility and parametrization
// when writing Testground plans for Lotus. // when writing Testground plans for Lotus.
//
package build package build
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package adt package adt
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package aerrors_test package aerrors_test
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package policy package policy
import ( import (

View File

@ -1,5 +1,5 @@
//stm: ignore // stm: ignore
//Only tests external library behavior, therefore it should not be annotated // Only tests external library behavior, therefore it should not be annotated
package drand package drand
import ( import (

View File

@ -23,6 +23,7 @@ type triggerID = uint64
type msgH = abi.ChainEpoch type msgH = abi.ChainEpoch
// triggerH is the block height at which the listener will be notified about the // triggerH is the block height at which the listener will be notified about the
//
// message (msgH+confidence) // message (msgH+confidence)
type triggerH = abi.ChainEpoch type triggerH = abi.ChainEpoch
@ -39,6 +40,7 @@ type EventHandler func(ctx context.Context, data eventData, prevTs, ts *types.Ti
// //
// If `done` is true, timeout won't be triggered // If `done` is true, timeout won't be triggered
// If `more` is false, no messages will be sent to EventHandler (RevertHandler // If `more` is false, no messages will be sent to EventHandler (RevertHandler
//
// may still be called) // may still be called)
type CheckFunc func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) type CheckFunc func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error)
@ -375,29 +377,29 @@ type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error)
// StateChanged registers a callback which is triggered when a specified state // StateChanged registers a callback which is triggered when a specified state
// change occurs or a timeout is reached. // change occurs or a timeout is reached.
// //
// * `CheckFunc` callback is invoked immediately with a recent tipset, it // - `CheckFunc` callback is invoked immediately with a recent tipset, it
// returns two booleans - `done`, and `more`. // returns two booleans - `done`, and `more`.
// //
// * `done` should be true when some on-chain state change we are waiting // - `done` should be true when some on-chain state change we are waiting
// for has happened. When `done` is set to true, timeout trigger is disabled. // for has happened. When `done` is set to true, timeout trigger is disabled.
// //
// * `more` should be false when we don't want to receive new notifications // - `more` should be false when we don't want to receive new notifications
// through StateChangeHandler. Note that notifications may still be delivered to // through StateChangeHandler. Note that notifications may still be delivered to
// RevertHandler // RevertHandler
// //
// * `StateChangeHandler` is called when the specified state change was observed // - `StateChangeHandler` is called when the specified state change was observed
// on-chain, and a confidence threshold was reached, or the specified `timeout` // on-chain, and a confidence threshold was reached, or the specified `timeout`
// height was reached with no state change observed. When this callback is // height was reached with no state change observed. When this callback is
// invoked on a timeout, `oldTs` and `states are set to nil. // invoked on a timeout, `oldTs` and `states are set to nil.
// This callback returns a boolean specifying whether further notifications // This callback returns a boolean specifying whether further notifications
// should be sent, like `more` return param from `CheckFunc` above. // should be sent, like `more` return param from `CheckFunc` above.
// //
// * `RevertHandler` is called after apply handler, when we drop the tipset // - `RevertHandler` is called after apply handler, when we drop the tipset
// containing the message. The tipset passed as the argument is the tipset // containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the event dropped may be re-applied // that is being dropped. Note that the event dropped may be re-applied
// in a different tipset in small amount of time. // in a different tipset in small amount of time.
// //
// * `StateMatchFunc` is called against each tipset state. If there is a match, // - `StateMatchFunc` is called against each tipset state. If there is a match,
// the state change is queued up until the confidence interval has elapsed (and // the state change is queued up until the confidence interval has elapsed (and
// `StateChangeHandler` is called) // `StateChangeHandler` is called)
func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error { func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error {
@ -503,31 +505,32 @@ type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.Ti
type MsgMatchFunc func(msg *types.Message) (matched bool, err error) type MsgMatchFunc func(msg *types.Message) (matched bool, err error)
// Called registers a callback which is triggered when a specified method is // Called registers a callback which is triggered when a specified method is
//
// called on an actor, or a timeout is reached. // called on an actor, or a timeout is reached.
// //
// * `CheckFunc` callback is invoked immediately with a recent tipset, it // - `CheckFunc` callback is invoked immediately with a recent tipset, it
// returns two booleans - `done`, and `more`. // returns two booleans - `done`, and `more`.
// //
// * `done` should be true when some on-chain action we are waiting for has // - `done` should be true when some on-chain action we are waiting for has
// happened. When `done` is set to true, timeout trigger is disabled. // happened. When `done` is set to true, timeout trigger is disabled.
// //
// * `more` should be false when we don't want to receive new notifications // - `more` should be false when we don't want to receive new notifications
// through MsgHandler. Note that notifications may still be delivered to // through MsgHandler. Note that notifications may still be delivered to
// RevertHandler // RevertHandler
// //
// * `MsgHandler` is called when the specified event was observed on-chain, // - `MsgHandler` is called when the specified event was observed on-chain,
// and a confidence threshold was reached, or the specified `timeout` height // and a confidence threshold was reached, or the specified `timeout` height
// was reached with no events observed. When this callback is invoked on a // was reached with no events observed. When this callback is invoked on a
// timeout, `msg` is set to nil. This callback returns a boolean specifying // timeout, `msg` is set to nil. This callback returns a boolean specifying
// whether further notifications should be sent, like `more` return param // whether further notifications should be sent, like `more` return param
// from `CheckFunc` above. // from `CheckFunc` above.
// //
// * `RevertHandler` is called after apply handler, when we drop the tipset // - `RevertHandler` is called after apply handler, when we drop the tipset
// containing the message. The tipset passed as the argument is the tipset // containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the message dropped may be re-applied // that is being dropped. Note that the message dropped may be re-applied
// in a different tipset in small amount of time. // in a different tipset in small amount of time.
// //
// * `MsgMatchFunc` is called against each message. If there is a match, the // - `MsgMatchFunc` is called against each message. If there is a match, the
// message is queued up until the confidence interval has elapsed (and // message is queued up until the confidence interval has elapsed (and
// `MsgHandler` is called) // `MsgHandler` is called)
func (me *messageEvents) Called(ctx context.Context, check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error { func (me *messageEvents) Called(ctx context.Context, check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error {

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package events package events
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package state package state
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package events package events
import ( import (

View File

@ -21,6 +21,7 @@ const (
) )
// FIXME: Bumped from original 800 to this to accommodate `syncFork()` // FIXME: Bumped from original 800 to this to accommodate `syncFork()`
//
// use of `GetBlocks()`. It seems the expectation of that API is to // use of `GetBlocks()`. It seems the expectation of that API is to
// fetch any amount of blocks leaving it to the internal logic here // fetch any amount of blocks leaving it to the internal logic here
// to partition and reassemble the requests if they go above the maximum. // to partition and reassemble the requests if they go above the maximum.
@ -147,10 +148,11 @@ type BSTipSet struct {
// `BlsIncludes`/`SecpkIncludes` matches `Bls`/`Secpk` messages // `BlsIncludes`/`SecpkIncludes` matches `Bls`/`Secpk` messages
// to blocks in the tipsets with the format: // to blocks in the tipsets with the format:
// `BlsIncludes[BI][MI]` // `BlsIncludes[BI][MI]`
// * BI: block index in the tipset. // - BI: block index in the tipset.
// * MI: message index in `Bls` list // - MI: message index in `Bls` list
// //
// FIXME: The logic to decompress this structure should belong // FIXME: The logic to decompress this structure should belong
//
// to itself, not to the consumer. // to itself, not to the consumer.
type CompactedMessages struct { type CompactedMessages struct {
Bls []*types.Message Bls []*types.Message

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package gen package gen
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package market package market
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package messagepool package messagepool
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package messagepool package messagepool
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package messagepool package messagepool
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package messagepool package messagepool
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package messagepool package messagepool
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package messagesigner package messagesigner
import ( import (

View File

@ -1,4 +1,4 @@
//stm:#unit // stm:#unit
package rand_test package rand_test
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package state package state
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #integration // stm: #integration
package stmgr_test package stmgr_test
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package stmgr_test package stmgr_test
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package store_test package store_test
import ( import (

View File

@ -9,10 +9,15 @@ import (
// WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer. // WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer.
// minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will // minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will
//
// wait for that long to coalesce more head changes. // wait for that long to coalesce more head changes.
//
// maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change // maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change
//
// more than that. // more than that.
//
// mergeInterval is the interval that triggers additional coalesce delay; if the last head change was // mergeInterval is the interval that triggers additional coalesce delay; if the last head change was
//
// within the merge interval when the coalesce timer fires, then the coalesce time is extended // within the merge interval when the coalesce timer fires, then the coalesce time is extended
// by min delay and up to max delay total. // by min delay and up to max delay total.
func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee { func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee {

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package store package store
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package store_test package store_test
import ( import (

View File

@ -453,6 +453,7 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
// The "fast forward" case is covered in this logic as a valid fork of length 0. // The "fast forward" case is covered in this logic as a valid fork of length 0.
// //
// FIXME: We may want to replace some of the logic in `syncFork()` with this. // FIXME: We may want to replace some of the logic in `syncFork()` with this.
//
// `syncFork()` counts the length on both sides of the fork at the moment (we // `syncFork()` counts the length on both sides of the fork at the moment (we
// need to settle on that) but here we just enforce it on the `synced` side. // need to settle on that) but here we just enforce it on the `synced` side.
func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) { func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) {

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package store_test package store_test
import ( import (

View File

@ -159,8 +159,11 @@ func FetchSignedMessagesByCids(
} }
// Fetch `cids` from the block service, apply `cb` on each of them. Used // Fetch `cids` from the block service, apply `cb` on each of them. Used
//
// by the fetch message functions above. // by the fetch message functions above.
//
// We check that each block is received only once and we do not received // We check that each block is received only once and we do not received
//
// blocks we did not request. // blocks we did not request.
func fetchCids( func fetchCids(
ctx context.Context, ctx context.Context,

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package sub package sub
import ( import (

View File

@ -60,14 +60,14 @@ var (
// Syncer is in charge of running the chain synchronization logic. As such, it // Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others: // is tasked with these functions, amongst others:
// //
// * Fast-forwards the chain as it learns of new TipSets from the network via // - Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager. // the SyncManager.
// * Applies the fork choice rule to select the correct side when confronted // - Applies the fork choice rule to select the correct side when confronted
// with a fork in the network. // with a fork in the network.
// * Requests block headers and messages from other peers when not available // - Requests block headers and messages from other peers when not available
// in our BlockStore. // in our BlockStore.
// * Tracks blocks marked as bad in a cache. // - Tracks blocks marked as bad in a cache.
// * Keeps the BlockStore and ChainStore consistent with our view of the world, // - Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been // the latter of which in turn informs other components when a reorg has been
// committed. // committed.
// //

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package chain package chain
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package chain_test package chain_test
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package types package types
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package types package types
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package types package types
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package types package types
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package types package types
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package types package types
import ( import (

View File

@ -99,11 +99,11 @@ func tipsetSortFunc(blks []*BlockHeader) func(i, j int) bool {
} }
// Checks: // Checks:
// * A tipset is composed of at least one block. (Because of our variable // - A tipset is composed of at least one block. (Because of our variable
// number of blocks per tipset, determined by randomness, we do not impose // number of blocks per tipset, determined by randomness, we do not impose
// an upper limit.) // an upper limit.)
// * All blocks have the same height. // - All blocks have the same height.
// * All blocks have the same parents (same number of them and matching CIDs). // - All blocks have the same parents (same number of them and matching CIDs).
func NewTipSet(blks []*BlockHeader) (*TipSet, error) { func NewTipSet(blks []*BlockHeader) (*TipSet, error) {
if len(blks) == 0 { if len(blks) == 0 {
return nil, xerrors.Errorf("NewTipSet called with zero length array of blocks") return nil, xerrors.Errorf("NewTipSet called with zero length array of blocks")

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package types package types
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package types package types
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package chain package chain
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package vectors package vectors
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package vm package vm
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package vm package vm
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package vm package vm
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package vm package vm
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package wallet package wallet
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package wallet package wallet
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package cli package cli
import ( import (

View File

@ -69,6 +69,7 @@ var CommonCommands = []*cli.Command{
var Commands = []*cli.Command{ var Commands = []*cli.Command{
WithCategory("basic", sendCmd), WithCategory("basic", sendCmd),
WithCategory("basic", walletCmd), WithCategory("basic", walletCmd),
WithCategory("basic", infoCmd),
WithCategory("basic", clientCmd), WithCategory("basic", clientCmd),
WithCategory("basic", multisigCmd), WithCategory("basic", multisigCmd),
WithCategory("basic", filplusCmd), WithCategory("basic", filplusCmd),

230
cli/info.go Normal file
View File

@ -0,0 +1,230 @@
package cli
import (
"context"
"fmt"
"math"
"os"
"sort"
"strings"
"text/tabwriter"
"time"
"github.com/dustin/go-humanize"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
)
var infoCmd = &cli.Command{
Name: "info",
Usage: "Print node info",
Action: infoCmdAct,
}
func infoCmdAct(cctx *cli.Context) error {
fullapi, acloser, err := GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer acloser()
ctx := ReqContext(cctx)
network, err := fullapi.StateGetNetworkParams(ctx)
if err != nil {
return err
}
fmt.Printf("Network: %s\n", network.NetworkName)
fmt.Print("Chain: ")
err = SyncBasefeeCheck(ctx, fullapi)
if err != nil {
return err
}
status, err := fullapi.NodeStatus(ctx, true)
if err != nil {
return err
}
fmt.Printf(" [epoch %s]\n", color.MagentaString(("%d"), status.SyncStatus.Epoch))
fmt.Printf("Peers to: [publish messages %d] [publish blocks %d]\n", status.PeerStatus.PeersToPublishMsgs, status.PeerStatus.PeersToPublishBlocks)
//Chain health calculated as percentage: amount of blocks in last finality / very healthy amount of blocks in a finality (900 epochs * 5 blocks per tipset)
health := (100 * (900 * status.ChainStatus.BlocksPerTipsetLastFinality) / (900 * 5))
switch {
case health > 85:
fmt.Printf("Chain health: %.f%% [%s]\n", health, color.GreenString("healthy"))
case health < 85:
fmt.Printf("Chain health: %.f%% [%s]\n", health, color.RedString("unhealthy"))
}
fmt.Println()
addr, err := fullapi.WalletDefaultAddress(ctx)
if err == nil {
fmt.Printf("Default address: \n")
balance, err := fullapi.WalletBalance(ctx, addr)
if err != nil {
return err
}
fmt.Printf(" %s [%s]\n", addr.String(), types.FIL(balance).Short())
} else {
fmt.Printf("Default address: address not set\n")
}
fmt.Println()
addrs, err := fullapi.WalletList(ctx)
if err != nil {
return err
}
totalBalance := big.Zero()
for _, addr := range addrs {
totbal, err := fullapi.WalletBalance(ctx, addr)
if err != nil {
return err
}
totalBalance = big.Add(totalBalance, totbal)
}
switch {
case len(addrs) <= 1:
fmt.Printf("Wallet: %v address\n", len(addrs))
case len(addrs) > 1:
fmt.Printf("Wallet: %v addresses\n", len(addrs))
}
fmt.Printf(" Total balance: %s\n", types.FIL(totalBalance).Short())
mbLockedSum := big.Zero()
mbAvailableSum := big.Zero()
for _, addr := range addrs {
mbal, err := fullapi.StateMarketBalance(ctx, addr, types.EmptyTSK)
if err != nil {
if strings.Contains(err.Error(), "actor not found") {
continue
} else {
return err
}
}
mbLockedSum = big.Add(mbLockedSum, mbal.Locked)
mbAvailableSum = big.Add(mbAvailableSum, mbal.Escrow)
}
fmt.Printf(" Market locked: %s\n", types.FIL(mbLockedSum).Short())
fmt.Printf(" Market available: %s\n", types.FIL(mbAvailableSum).Short())
fmt.Println()
chs, err := fullapi.PaychList(ctx)
if err != nil {
return err
}
switch {
case len(chs) <= 1:
fmt.Printf("Payment Channels: %v channel\n", len(chs))
case len(chs) > 1:
fmt.Printf("Payment Channels: %v channels\n", len(chs))
}
fmt.Println()
localDeals, err := fullapi.ClientListDeals(ctx)
if err != nil {
return err
}
var totalSize uint64
byState := map[storagemarket.StorageDealStatus][]uint64{}
for _, deal := range localDeals {
totalSize += deal.Size
byState[deal.State] = append(byState[deal.State], deal.Size)
}
fmt.Printf("Deals: %d, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize)))
type stateStat struct {
state storagemarket.StorageDealStatus
count int
bytes uint64
}
stateStats := make([]stateStat, 0, len(byState))
for state, deals := range byState {
if state == storagemarket.StorageDealActive {
state = math.MaxUint64 // for sort
}
st := stateStat{
state: state,
count: len(deals),
}
for _, b := range deals {
st.bytes += b
}
stateStats = append(stateStats, st)
}
sort.Slice(stateStats, func(i, j int) bool {
return int64(stateStats[i].state) < int64(stateStats[j].state)
})
for _, st := range stateStats {
if st.state == math.MaxUint64 {
st.state = storagemarket.StorageDealActive
}
fmt.Printf(" %s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes)))
}
fmt.Println()
tw := tabwriter.NewWriter(os.Stdout, 6, 6, 2, ' ', 0)
s, err := fullapi.NetBandwidthStats(ctx)
if err != nil {
return err
}
fmt.Printf("Bandwidth:\n")
fmt.Fprintf(tw, "\tTotalIn\tTotalOut\tRateIn\tRateOut\n")
fmt.Fprintf(tw, "\t%s\t%s\t%s/s\t%s/s\n", humanize.Bytes(uint64(s.TotalIn)), humanize.Bytes(uint64(s.TotalOut)), humanize.Bytes(uint64(s.RateIn)), humanize.Bytes(uint64(s.RateOut)))
return tw.Flush()
}
func SyncBasefeeCheck(ctx context.Context, fullapi v1api.FullNode) error {
head, err := fullapi.ChainHead(ctx)
if err != nil {
return err
}
switch {
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs
fmt.Printf("[%s]", color.GreenString("sync ok"))
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs
fmt.Printf("[%s]", color.YellowString("sync slow (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
default:
fmt.Printf("[%s]", color.RedString("sync behind! (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
}
basefee := head.MinTicketBlock().ParentBaseFee
gasCol := []color.Attribute{color.FgBlue}
switch {
case basefee.GreaterThan(big.NewInt(7000_000_000)): // 7 nFIL
gasCol = []color.Attribute{color.BgRed, color.FgBlack}
case basefee.GreaterThan(big.NewInt(3000_000_000)): // 3 nFIL
gasCol = []color.Attribute{color.FgRed}
case basefee.GreaterThan(big.NewInt(750_000_000)): // 750 uFIL
gasCol = []color.Attribute{color.FgYellow}
case basefee.GreaterThan(big.NewInt(100_000_000)): // 100 uFIL
gasCol = []color.Attribute{color.FgGreen}
}
fmt.Printf(" [basefee %s]", color.New(gasCol...).Sprint(types.FIL(basefee).Short()))
return nil
}

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package cli package cli
import ( import (

View File

@ -3,6 +3,7 @@ package cli
import ( import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"strings"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -152,6 +153,9 @@ var sendCmd = &cli.Command{
sm, err := InteractiveSend(ctx, cctx, srv, proto) sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil { if err != nil {
if strings.Contains(err.Error(), "no current EF") {
return xerrors.Errorf("transaction rejected on ledger: %w", err)
}
return err return err
} }

View File

@ -1,5 +1,5 @@
//stm: ignore // stm: ignore
//stm: #unit // stm: #unit
package cli package cli
import ( import (

View File

@ -1,5 +1,5 @@
//stm: ignore // stm: ignore
//stm: #unit // stm: #unit
package cli package cli
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package cli package cli
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package cli package cli
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package main package main
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package main package main
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package main package main
import ( import (

View File

@ -55,6 +55,10 @@ var actorSetAddrsCmd = &cli.Command{
Aliases: []string{"set-addrs"}, Aliases: []string{"set-addrs"},
Usage: "set addresses that your miner can be publicly dialed on", Usage: "set addresses that your miner can be publicly dialed on",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
Usage: "optionally specify the account to send the message from",
},
&cli.Int64Flag{ &cli.Int64Flag{
Name: "gas-limit", Name: "gas-limit",
Usage: "set gas limit", Usage: "set gas limit",
@ -117,6 +121,25 @@ var actorSetAddrsCmd = &cli.Command{
return err return err
} }
fromAddr := minfo.Worker
if from := cctx.String("from"); from != "" {
addr, err := address.NewFromString(from)
if err != nil {
return err
}
fromAddr = addr
}
fromId, err := api.StateLookupID(ctx, fromAddr, types.EmptyTSK)
if err != nil {
return err
}
if !isController(minfo, fromId) {
return xerrors.Errorf("sender isn't a controller of miner: %s", fromId)
}
params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs}) params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs})
if err != nil { if err != nil {
return err return err
@ -126,7 +149,7 @@ var actorSetAddrsCmd = &cli.Command{
smsg, err := api.MpoolPushMessage(ctx, &types.Message{ smsg, err := api.MpoolPushMessage(ctx, &types.Message{
To: maddr, To: maddr,
From: minfo.Worker, From: fromId,
Value: types.NewInt(0), Value: types.NewInt(0),
GasLimit: gasLimit, GasLimit: gasLimit,
Method: builtin.MethodsMiner.ChangeMultiaddrs, Method: builtin.MethodsMiner.ChangeMultiaddrs,

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package main package main
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #integration // stm: #integration
package main package main
import ( import (

View File

@ -25,7 +25,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
@ -70,7 +70,7 @@ func infoCmdAct(cctx *cli.Context) error {
} }
defer closer() defer closer()
fullapi, acloser, err := lcli.GetFullNodeAPI(cctx) fullapi, acloser, err := lcli.GetFullNodeAPIV1(cctx)
if err != nil { if err != nil {
return err return err
} }
@ -94,34 +94,11 @@ func infoCmdAct(cctx *cli.Context) error {
fmt.Print("Chain: ") fmt.Print("Chain: ")
head, err := fullapi.ChainHead(ctx) err = lcli.SyncBasefeeCheck(ctx, fullapi)
if err != nil { if err != nil {
return err return err
} }
switch {
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs
fmt.Printf("[%s]", color.GreenString("sync ok"))
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs
fmt.Printf("[%s]", color.YellowString("sync slow (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
default:
fmt.Printf("[%s]", color.RedString("sync behind! (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
}
basefee := head.MinTicketBlock().ParentBaseFee
gasCol := []color.Attribute{color.FgBlue}
switch {
case basefee.GreaterThan(big.NewInt(7000_000_000)): // 7 nFIL
gasCol = []color.Attribute{color.BgRed, color.FgBlack}
case basefee.GreaterThan(big.NewInt(3000_000_000)): // 3 nFIL
gasCol = []color.Attribute{color.FgRed}
case basefee.GreaterThan(big.NewInt(750_000_000)): // 750 uFIL
gasCol = []color.Attribute{color.FgYellow}
case basefee.GreaterThan(big.NewInt(100_000_000)): // 100 uFIL
gasCol = []color.Attribute{color.FgGreen}
}
fmt.Printf(" [basefee %s]", color.New(gasCol...).Sprint(types.FIL(basefee).Short()))
fmt.Println() fmt.Println()
alerts, err := minerApi.LogAlerts(ctx) alerts, err := minerApi.LogAlerts(ctx)
@ -152,7 +129,7 @@ func infoCmdAct(cctx *cli.Context) error {
return nil return nil
} }
func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.FullNode, nodeApi api.StorageMiner) error { func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v1api.FullNode, nodeApi api.StorageMiner) error {
maddr, err := getActorAddress(ctx, cctx) maddr, err := getActorAddress(ctx, cctx)
if err != nil { if err != nil {
return err return err
@ -615,7 +592,7 @@ func colorTokenAmount(format string, amount abi.TokenAmount) {
} }
} }
func producedBlocks(ctx context.Context, count int, maddr address.Address, napi v0api.FullNode) error { func producedBlocks(ctx context.Context, count int, maddr address.Address, napi v1api.FullNode) error {
var err error var err error
head, err := napi.ChainHead(ctx) head, err := napi.ChainHead(ctx)
if err != nil { if err != nil {

View File

@ -7,10 +7,12 @@ import (
"os" "os"
"strconv" "strconv"
"strings" "strings"
"sync"
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -19,6 +21,7 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -37,6 +40,7 @@ var provingCmd = &cli.Command{
provingCheckProvableCmd, provingCheckProvableCmd,
workersCmd(false), workersCmd(false),
provingComputeCmd, provingComputeCmd,
provingRecoverFaultsCmd,
}, },
} }
@ -644,3 +648,82 @@ It will not send any messages to the chain.`,
return nil return nil
}, },
} }
var provingRecoverFaultsCmd = &cli.Command{
Name: "recover-faults",
Usage: "Manually recovers faulty sectors on chain",
ArgsUsage: "<faulty sectors>",
Flags: []cli.Flag{
&cli.IntFlag{
Name: "confidence",
Usage: "number of block confirmations to wait for",
Value: int(build.MessageConfidence),
},
},
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() < 1 {
return xerrors.Errorf("must pass at least 1 sector number")
}
arglist := cctx.Args().Slice()
var sectors []abi.SectorNumber
for _, v := range arglist {
s, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return xerrors.Errorf("failed to convert sectors, please check the arguments: %w", err)
}
sectors = append(sectors, abi.SectorNumber(s))
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
msgs, err := nodeApi.RecoverFault(ctx, sectors)
if err != nil {
return err
}
// wait for msgs to get mined into a block
var wg sync.WaitGroup
wg.Add(len(msgs))
results := make(chan error, len(msgs))
for _, msg := range msgs {
go func(m cid.Cid) {
defer wg.Done()
wait, err := api.StateWaitMsg(ctx, m, uint64(cctx.Int("confidence")))
if err != nil {
results <- xerrors.Errorf("Timeout waiting for message to land on chain %s", wait.Message)
return
}
if wait.Receipt.ExitCode != 0 {
results <- xerrors.Errorf("Failed to execute message %s: %w", wait.Message, wait.Receipt.ExitCode.Error())
return
}
results <- nil
return
}(msg)
}
wg.Wait()
close(results)
for v := range results {
if v != nil {
fmt.Println("Failed to execute the message %w", v)
}
}
return nil
},
}

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package stages package stages
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package main package main
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #chaos // stm: #chaos
package chaos package chaos
import ( import (

View File

@ -1,4 +1,4 @@
//stm: ignore // stm: ignore
// This file does not test any behaviors by itself; rather, it runs other test files // This file does not test any behaviors by itself; rather, it runs other test files
// Therefore, this file should not be annotated. // Therefore, this file should not be annotated.
package conformance package conformance

View File

@ -276,6 +276,7 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP
// messages that originate from secp256k senders, leaving all // messages that originate from secp256k senders, leaving all
// others untouched. // others untouched.
// TODO: generate a signature in the DSL so that it's encoded in // TODO: generate a signature in the DSL so that it's encoded in
//
// the test vector. // the test vector.
func toChainMsg(msg *types.Message) (ret types.ChainMsg) { func toChainMsg(msg *types.Message) (ret types.ChainMsg) {
ret = msg ret = msg

View File

@ -106,6 +106,8 @@
* [PiecesListPieces](#PiecesListPieces) * [PiecesListPieces](#PiecesListPieces)
* [Pledge](#Pledge) * [Pledge](#Pledge)
* [PledgeSector](#PledgeSector) * [PledgeSector](#PledgeSector)
* [Recover](#Recover)
* [RecoverFault](#RecoverFault)
* [Return](#Return) * [Return](#Return)
* [ReturnAddPiece](#ReturnAddPiece) * [ReturnAddPiece](#ReturnAddPiece)
* [ReturnDataCid](#ReturnDataCid) * [ReturnDataCid](#ReturnDataCid)
@ -2265,6 +2267,36 @@ Response:
} }
``` ```
## Recover
### RecoverFault
RecoverFault can be used to declare recoveries manually. It sends messages
to the miner actor with details of recovered sectors and returns the CID of messages. It honors the
maxPartitionsPerRecoveryMessage from the config
Perms: admin
Inputs:
```json
[
[
123,
124
]
]
```
Response:
```json
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
]
```
## Return ## Return
@ -4397,26 +4429,26 @@ Response:
}, },
"seal/v0/datacid": { "seal/v0/datacid": {
"0": { "0": {
"MinMemory": 2048, "MinMemory": 4294967296,
"MaxMemory": 2048, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 2048, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"1": { "1": {
"MinMemory": 8388608, "MinMemory": 4294967296,
"MaxMemory": 8388608, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 8388608, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"2": { "2": {
"MinMemory": 1073741824, "MinMemory": 4294967296,
"MaxMemory": 1073741824, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -4433,8 +4465,8 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"4": { "4": {
"MinMemory": 8589934592, "MinMemory": 4294967296,
"MaxMemory": 8589934592, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -4442,26 +4474,26 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"5": { "5": {
"MinMemory": 2048, "MinMemory": 4294967296,
"MaxMemory": 2048, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 2048, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"6": { "6": {
"MinMemory": 8388608, "MinMemory": 4294967296,
"MaxMemory": 8388608, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 8388608, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"7": { "7": {
"MinMemory": 1073741824, "MinMemory": 4294967296,
"MaxMemory": 1073741824, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -4478,8 +4510,8 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"9": { "9": {
"MinMemory": 8589934592, "MinMemory": 4294967296,
"MaxMemory": 8589934592, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,

View File

@ -579,26 +579,26 @@ Response:
}, },
"seal/v0/datacid": { "seal/v0/datacid": {
"0": { "0": {
"MinMemory": 2048, "MinMemory": 4294967296,
"MaxMemory": 2048, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 2048, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"1": { "1": {
"MinMemory": 8388608, "MinMemory": 4294967296,
"MaxMemory": 8388608, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 8388608, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"2": { "2": {
"MinMemory": 1073741824, "MinMemory": 4294967296,
"MaxMemory": 1073741824, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -615,8 +615,8 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"4": { "4": {
"MinMemory": 8589934592, "MinMemory": 4294967296,
"MaxMemory": 8589934592, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -624,26 +624,26 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"5": { "5": {
"MinMemory": 2048, "MinMemory": 4294967296,
"MaxMemory": 2048, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 2048, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"6": { "6": {
"MinMemory": 8388608, "MinMemory": 4294967296,
"MaxMemory": 8388608, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
"BaseMinMemory": 8388608, "BaseMinMemory": 1073741824,
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"7": { "7": {
"MinMemory": 1073741824, "MinMemory": 4294967296,
"MaxMemory": 1073741824, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,
@ -660,8 +660,8 @@ Response:
"MaxConcurrent": 0 "MaxConcurrent": 0
}, },
"9": { "9": {
"MinMemory": 8589934592, "MinMemory": 4294967296,
"MaxMemory": 8589934592, "MaxMemory": 4294967296,
"GPUUtilization": 0, "GPUUtilization": 0,
"MaxParallelism": 1, "MaxParallelism": 1,
"MaxParallelismGPU": 0, "MaxParallelismGPU": 0,

View File

@ -2100,6 +2100,7 @@ COMMANDS:
check Check sectors provable check Check sectors provable
workers list workers workers list workers
compute Compute simulated proving tasks compute Compute simulated proving tasks
recover-faults Manually recovers faulty sectors on chain
help, h Shows a list of commands or help for one command help, h Shows a list of commands or help for one command
OPTIONS: OPTIONS:
@ -2210,6 +2211,19 @@ OPTIONS:
``` ```
``` ```
### lotus-miner proving recover-faults
```
NAME:
lotus-miner proving recover-faults - Manually recovers faulty sectors on chain
USAGE:
lotus-miner proving recover-faults [command options] <faulty sectors>
OPTIONS:
--confidence value number of block confirmations to wait for (default: 5)
```
## lotus-miner storage ## lotus-miner storage
``` ```
NAME: NAME:

View File

@ -18,6 +18,7 @@ COMMANDS:
BASIC: BASIC:
send Send funds between accounts send Send funds between accounts
wallet Manage wallet wallet Manage wallet
info Print node info
client Make deals, store data, retrieve data client Make deals, store data, retrieve data
msig Interact with a multisig wallet msig Interact with a multisig wallet
filplus Interact with the verified registry actor used by Filplus filplus Interact with the verified registry actor used by Filplus
@ -398,6 +399,22 @@ OPTIONS:
``` ```
## lotus info
```
NAME:
lotus info - Print node info
USAGE:
lotus info [command options] [arguments...]
CATEGORY:
BASIC
OPTIONS:
--help, -h show help (default: false)
```
## lotus client ## lotus client
``` ```
NAME: NAME:

View File

@ -1,4 +1,4 @@
//stm: #unit // stm: #unit
package gateway package gateway
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #integration // stm: #integration
package itests package itests
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #integration // stm: #integration
package itests package itests
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #integration // stm: #integration
package itests package itests
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #integration // stm: #integration
package itests package itests
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #integration // stm: #integration
package itests package itests
import ( import (

View File

@ -1,4 +1,4 @@
//stm: #integration // stm: #integration
package itests package itests
import ( import (

Some files were not shown because too many files have changed in this diff Show More