Merge remote-tracking branch 'origin/master' into feat/async-restartable-workers

This commit is contained in:
Łukasz Magiera 2020-10-27 03:31:07 +01:00
commit e1da874258
40 changed files with 1129 additions and 465 deletions

View File

@ -1,5 +1,58 @@
# Lotus changelog
# 1.1.2 / 2020-10-24
This is a patch release of Lotus that builds on the fixes involving worker keys that was introduced in v1.1.1. Miners and node operators should update to this release as soon as possible in order to ensure their blocks are propagated and validated.
## Changes
- Handle worker key changes correctly in runtime (https://github.com/filecoin-project/lotus/pull/4579)
# 1.1.1 / 2020-10-24
This is a patch release of Lotus that addresses some issues caused by when miners change their worker keys. Miners and node operators should update to this release as soon as possible, especially any miner who has changed their worker key recently.
## Changes
- Miner finder for interactive client deal CLI (https://github.com/filecoin-project/lotus/pull/4504)
- Disable blockstore bloom filter (https://github.com/filecoin-project/lotus/pull/4512)
- Add api for getting status given a code (https://github.com/filecoin-project/lotus/pull/4210)
- add batch api for push messages (https://github.com/filecoin-project/lotus/pull/4236)
- add measure datastore wrapper around bench chain datastore (https://github.com/filecoin-project/lotus/pull/4302)
- Look at block base fee for PCR (https://github.com/filecoin-project/lotus/pull/4313)
- Add a shed util to determine % of power that has won a block (https://github.com/filecoin-project/lotus/pull/4318)
- Shed/borked cmd (https://github.com/filecoin-project/lotus/pull/4339)
- optimize mining code (https://github.com/filecoin-project/lotus/pull/4379)
- heaviestTipSet reurning nil is a ok (https://github.com/filecoin-project/lotus/pull/4523)
- Remove most v0 actor imports (https://github.com/filecoin-project/lotus/pull/4383)
- Small chain export optimization (https://github.com/filecoin-project/lotus/pull/4536)
- Add block list to pcr (https://github.com/filecoin-project/lotus/pull/4314)
- Fix circ supply default in conformance (https://github.com/filecoin-project/lotus/pull/4449)
- miner: fix init --create-worker-key (https://github.com/filecoin-project/lotus/pull/4475)
- make push and addLocal atomic (https://github.com/filecoin-project/lotus/pull/4500)
- add some methods that oni needs (https://github.com/filecoin-project/lotus/pull/4501)
- MinerGetBaseInfo: if miner is not found in lookback, check current (https://github.com/filecoin-project/lotus/pull/4508)
- Delete wallet from local wallet cache (https://github.com/filecoin-project/lotus/pull/4526)
- Fix lotus-shed ledger list (https://github.com/filecoin-project/lotus/pull/4521)
- Manage sectors by size instead of proof type (https://github.com/filecoin-project/lotus/pull/4511)
- Feat/api request metrics wrapper (https://github.com/filecoin-project/lotus/pull/4516)
- Fix chain sync stopping to sync (https://github.com/filecoin-project/lotus/pull/4541)
- Use the correct lookback for the worker key when creating blocks (https://github.com/filecoin-project/lotus/pull/4539)
- Cleanup test initialization and always validate VRFs in tests (https://github.com/filecoin-project/lotus/pull/4538)
- Add a market WithdrawBalance CLI (https://github.com/filecoin-project/lotus/pull/4524)
- wallet list: Add market balance and ID address flags (https://github.com/filecoin-project/lotus/pull/4555)
- tvx simulate command; tvx extract --ignore-sanity-checks (https://github.com/filecoin-project/lotus/pull/4554)
- lotus-lite: CLI tests for `lotus client` commands (https://github.com/filecoin-project/lotus/pull/4497)
- lite-mode - market storage and retrieval clients (https://github.com/filecoin-project/lotus/pull/4263)
- Chore: update drand to v1.2.0 (https://github.com/filecoin-project/lotus/pull/4420)
- Fix random test failures (https://github.com/filecoin-project/lotus/pull/4559)
- Fix flaky TestTimedBSSimple (https://github.com/filecoin-project/lotus/pull/4561)
- Make wallet market withdraw usable with miner addresses (https://github.com/filecoin-project/lotus/pull/4556)
- Fix flaky TestChainExportImportFull (https://github.com/filecoin-project/lotus/pull/4564)
- Use older randomness for the PoSt commit on specs-actors version 2 (https://github.com/filecoin-project/lotus/pull/4563)
- shed: Commad to decode messages (https://github.com/filecoin-project/lotus/pull/4565)
- Fetch worker key from correct block on sync (https://github.com/filecoin-project/lotus/pull/4573)
# 1.1.0 / 2020-10-20
This is a mandatory release that introduces the first post-liftoff upgrade to the Filecoin network. The changes that break consensus are an upgrade to specs-actors v2.2.0 at epoch 170000.

View File

@ -312,6 +312,8 @@ type FullNode interface {
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
// which are stuck due to insufficient funds
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error

View File

@ -5,8 +5,10 @@ import (
"context"
"time"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/piecestore"
@ -83,6 +85,10 @@ type StorageMiner interface {
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error)
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
// MinerRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
DealsList(ctx context.Context) ([]MarketDeal, error)

View File

@ -173,6 +173,7 @@ type FullNodeStruct struct {
ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
ClientRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"`
ClientCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"`
ClientRetrieveTryRestartInsufficientFunds func(ctx context.Context, paymentChannel address.Address) error `perm:"write"`
StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"`
@ -286,6 +287,8 @@ type StorageMinerStruct struct {
MarketGetRetrievalAsk func(ctx context.Context) (*retrievalmarket.Ask, error) `perm:"read"`
MarketListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
MarketDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
MarketRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"`
MarketCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"`
PledgeSector func(context.Context) error `perm:"write"`
@ -597,6 +600,10 @@ func (c *FullNodeStruct) ClientRestartDataTransfer(ctx context.Context, transfer
return c.Internal.ClientRestartDataTransfer(ctx, transferID, otherPeer, isInitiator)
}
func (c *FullNodeStruct) ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
return c.Internal.ClientCancelDataTransfer(ctx, transferID, otherPeer, isInitiator)
}
func (c *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error {
return c.Internal.ClientRetrieveTryRestartInsufficientFunds(ctx, paymentChannel)
}
@ -1381,6 +1388,14 @@ func (c *StorageMinerStruct) MarketDataTransferUpdates(ctx context.Context) (<-c
return c.Internal.MarketDataTransferUpdates(ctx)
}
func (c *StorageMinerStruct) MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
return c.Internal.MarketRestartDataTransfer(ctx, transferID, otherPeer, isInitiator)
}
func (c *StorageMinerStruct) MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
return c.Internal.MarketCancelDataTransfer(ctx, transferID, otherPeer, isInitiator)
}
func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error {
return c.Internal.DealsImportData(ctx, dealPropCid, file)
}

View File

@ -29,7 +29,7 @@ func buildType() string {
}
// BuildVersion is the local build version, set by build system
const BuildVersion = "1.1.0"
const BuildVersion = "1.1.2"
func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit

View File

@ -77,6 +77,7 @@ type State interface {
DeadlinesChanged(State) (bool, error)
Info() (MinerInfo, error)
MinerInfoChanged(State) (bool, error)
DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error)

View File

@ -277,6 +277,15 @@ func (s *state0) DeadlinesChanged(other State) (bool, error) {
return !s.State.Deadlines.Equals(other0.Deadlines), nil
}
func (s *state0) MinerInfoChanged(other State) (bool, error) {
other0, ok := other.(*state0)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Info.Equals(other0.State.Info), nil
}
func (s *state0) Info() (MinerInfo, error) {
info, err := s.State.GetInfo(s.store)
if err != nil {

View File

@ -276,6 +276,15 @@ func (s *state2) DeadlinesChanged(other State) (bool, error) {
return !s.State.Deadlines.Equals(other2.Deadlines), nil
}
func (s *state2) MinerInfoChanged(other State) (bool, error) {
other0, ok := other.(*state2)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Info.Equals(other0.State.Info), nil
}
func (s *state2) Info() (MinerInfo, error) {
info, err := s.State.GetInfo(s.store)
if err != nil {

View File

@ -56,9 +56,9 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer
}
func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
return func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime2.Syscalls {
return func(ctx context.Context, rt *vm.Runtime) runtime2.Syscalls {
return &fakedSigSyscalls{
base(ctx, cstate, cst),
base(ctx, rt),
}
}
}

View File

@ -27,16 +27,11 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA
return nil, xerrors.Errorf("failed to load tipset state: %w", err)
}
lbts, err := stmgr.GetLookbackTipSetForRound(ctx, sm, pts, bt.Epoch)
_, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, sm, pts, bt.Epoch)
if err != nil {
return nil, xerrors.Errorf("getting lookback miner actor state: %w", err)
}
lbst, _, err := sm.TipSetState(ctx, lbts)
if err != nil {
return nil, err
}
worker, err := stmgr.GetMinerWorkerRaw(ctx, sm, lbst, bt.Miner)
if err != nil {
return nil, xerrors.Errorf("failed to get miner worker: %w", err)

View File

@ -64,6 +64,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: types.NewInt(0),
LookbackState: LookbackStateGetterForTipset(sm, ts),
}
vmi, err := sm.newVM(ctx, vmopt)
@ -178,6 +179,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: ts.Blocks()[0].ParentBaseFee,
LookbackState: LookbackStateGetterForTipset(sm, ts),
}
vmi, err := sm.newVM(ctx, vmopt)
if err != nil {

View File

@ -241,6 +241,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: baseFee,
LookbackState: LookbackStateGetterForTipset(sm, ts),
}
return sm.newVM(ctx, vmopt)

View File

@ -366,6 +366,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: ts.Blocks()[0].ParentBaseFee,
LookbackState: LookbackStateGetterForTipset(sm, ts),
}
vmi, err := sm.newVM(ctx, vmopt)
if err != nil {
@ -391,7 +392,17 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
return root, trace, nil
}
func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, error) {
func LookbackStateGetterForTipset(sm *StateManager, ts *types.TipSet) vm.LookbackStateGetter {
return func(ctx context.Context, round abi.ChainEpoch) (*state.StateTree, error) {
_, st, err := GetLookbackTipSetForRound(ctx, sm, ts, round)
if err != nil {
return nil, err
}
return sm.StateTree(st)
}
}
func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, cid.Cid, error) {
var lbr abi.ChainEpoch
lb := policy.GetWinningPoStSectorSetLookback(sm.GetNtwkVersion(ctx, round))
if round > lb {
@ -399,16 +410,33 @@ func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.
}
// more null blocks than our lookback
if lbr > ts.Height() {
return ts, nil
if lbr >= ts.Height() {
// This should never happen at this point, but may happen before
// network version 3 (where the lookback was only 10 blocks).
st, _, err := sm.TipSetState(ctx, ts)
if err != nil {
return nil, cid.Undef, err
}
return ts, st, nil
}
lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, lbr, ts, true)
// Get the tipset after the lookback tipset, or the next non-null one.
nextTs, err := sm.ChainStore().GetTipsetByHeight(ctx, lbr+1, ts, false)
if err != nil {
return nil, xerrors.Errorf("failed to get lookback tipset: %w", err)
return nil, cid.Undef, xerrors.Errorf("failed to get lookback tipset+1: %w", err)
}
return lbts, nil
if lbr > nextTs.Height() {
return nil, cid.Undef, xerrors.Errorf("failed to find non-null tipset %s (%d) which is known to exist, found %s (%d)", ts.Key(), ts.Height(), nextTs.Key(), nextTs.Height())
}
lbts, err := sm.ChainStore().GetTipSetFromKey(nextTs.Parents())
if err != nil {
return nil, cid.Undef, xerrors.Errorf("failed to resolve lookback tipset: %w", err)
}
return lbts, nextTs.ParentState(), nil
}
func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) {
@ -436,17 +464,11 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule
rbase = entries[len(entries)-1]
}
lbts, err := GetLookbackTipSetForRound(ctx, sm, ts, round)
lbts, lbst, err := GetLookbackTipSetForRound(ctx, sm, ts, round)
if err != nil {
return nil, xerrors.Errorf("getting lookback miner actor state: %w", err)
}
// TODO: load the state instead of computing it?
lbst, _, err := sm.TipSetState(ctx, lbts)
if err != nil {
return nil, err
}
act, err := sm.LoadActorRaw(ctx, maddr, lbst)
if xerrors.Is(err, types.ErrActorNotFound) {
_, err := sm.LoadActor(ctx, maddr, ts)

View File

@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
"sync"
"time"
"golang.org/x/xerrors"
@ -27,14 +26,11 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/bufbstore"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl/client"
@ -227,9 +223,6 @@ type BlockValidator struct {
// necessary for block validation
chain *store.ChainStore
stmgr *stmgr.StateManager
mx sync.Mutex
keycache map[string]address.Address
}
func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.StateManager, blacklist func(peer.ID)) *BlockValidator {
@ -242,7 +235,6 @@ func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.State
recvBlocks: newBlockReceiptCache(),
chain: chain,
stmgr: stmgr,
keycache: make(map[string]address.Address),
}
}
@ -436,60 +428,25 @@ func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockM
}
func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *types.BlockHeader) (address.Address, error) {
addr := bh.Miner
bv.mx.Lock()
key, ok := bv.keycache[addr.String()]
bv.mx.Unlock()
if !ok {
// TODO I have a feeling all this can be simplified by cleverer DI to use the API
ts := bv.chain.GetHeaviestTipSet()
st, _, err := bv.stmgr.TipSetState(ctx, ts)
if err != nil {
return address.Undef, err
}
buf := bufbstore.NewBufferedBstore(bv.chain.Blockstore())
cst := cbor.NewCborStore(buf)
state, err := state.LoadStateTree(cst, st)
if err != nil {
return address.Undef, err
}
act, err := state.GetActor(addr)
if err != nil {
return address.Undef, err
}
mst, err := miner.Load(bv.chain.Store(ctx), act)
if err != nil {
return address.Undef, err
}
info, err := mst.Info()
if err != nil {
return address.Undef, err
}
worker := info.Worker
key, err = bv.stmgr.ResolveToKeyAddress(ctx, worker, ts)
if err != nil {
return address.Undef, err
}
bv.mx.Lock()
bv.keycache[addr.String()] = key
bv.mx.Unlock()
}
// we check that the miner met the minimum power at the lookback tipset
baseTs := bv.chain.GetHeaviestTipSet()
lbts, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height)
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height)
if err != nil {
log.Warnf("failed to load lookback tipset for incoming block: %s", err)
return address.Undef, ErrSoftFailure
}
key, err := stmgr.GetMinerWorkerRaw(ctx, bv.stmgr, lbst, bh.Miner)
if err != nil {
log.Warnf("failed to resolve worker key for miner %s: %s", bh.Miner, err)
return address.Undef, ErrSoftFailure
}
// NOTE: we check to see if the miner was eligible in the lookback
// tipset - 1 for historical reasons. DO NOT use the lookback state
// returned by GetLookbackTipSetForRound.
eligible, err := stmgr.MinerEligibleToMine(ctx, bv.stmgr, bh.Miner, baseTs, lbts)
if err != nil {
log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err)

View File

@ -730,16 +730,11 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
}
lbts, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
if err != nil {
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
}
lbst, _, err := syncer.sm.TipSetState(ctx, lbts)
if err != nil {
return xerrors.Errorf("failed to compute lookback tipset state (epoch %d): %w", lbts.Height(), err)
}
prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs)
if err != nil {
return xerrors.Errorf("failed to get latest beacon entry: %w", err)

View File

@ -34,15 +34,18 @@ func init() {
// Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there
type SyscallBuilder func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime2.Syscalls
type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime2.Syscalls
func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
return func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime2.Syscalls {
return func(ctx context.Context, rt *Runtime) runtime2.Syscalls {
return &syscallShim{
ctx: ctx,
cstate: cstate,
cst: cst,
actor: rt.Receiver(),
cstate: rt.state,
cst: rt.cst,
lbState: rt.vm.lbStateGet,
verifier: verifier,
}
@ -52,6 +55,8 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
type syscallShim struct {
ctx context.Context
lbState LookbackStateGetter
actor address.Address
cstate *state.StateTree
cst cbor.IpldStore
verifier ffiwrapper.Verifier
@ -184,26 +189,7 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse
}
func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error {
// get appropriate miner actor
act, err := ss.cstate.GetActor(blk.Miner)
if err != nil {
return err
}
// use that to get the miner state
mas, err := miner.Load(adt.WrapStore(ss.ctx, ss.cst), act)
if err != nil {
return err
}
info, err := mas.Info()
if err != nil {
return err
}
// and use to get resolved workerKey
waddr, err := ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker)
waddr, err := ss.workerKeyAtLookback(blk.Height)
if err != nil {
return err
}
@ -215,6 +201,31 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error {
return nil
}
func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Address, error) {
lbState, err := ss.lbState(ss.ctx, height)
if err != nil {
return address.Undef, err
}
// get appropriate miner actor
act, err := lbState.GetActor(ss.actor)
if err != nil {
return address.Undef, err
}
// use that to get the miner state
mas, err := miner.Load(adt.WrapStore(ss.ctx, ss.cst), act)
if err != nil {
return address.Undef, err
}
info, err := mas.Info()
if err != nil {
return address.Undef, err
}
return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker)
}
func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error {
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof)
if err != nil {

View File

@ -134,11 +134,6 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks},
Atlas: vm.cst.Atlas,
}
rt.Syscalls = pricedSyscalls{
under: vm.Syscalls(ctx, vm.cstate, rt.cst),
chargeGas: rt.chargeGasFunc(1),
pl: rt.pricelist,
}
vmm := *msg
resF, ok := rt.ResolveAddress(msg.From)
@ -156,6 +151,12 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
rt.Message = &Message{msg: vmm}
}
rt.Syscalls = pricedSyscalls{
under: vm.Syscalls(ctx, rt),
chargeGas: rt.chargeGasFunc(1),
pl: rt.pricelist,
}
return rt
}
@ -169,6 +170,7 @@ func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtim
type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error)
type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
type LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error)
type VM struct {
cstate *state.StateTree
@ -181,6 +183,7 @@ type VM struct {
circSupplyCalc CircSupplyCalculator
ntwkVersion NtwkVersionGetter
baseFee abi.TokenAmount
lbStateGet LookbackStateGetter
Syscalls SyscallBuilder
}
@ -194,6 +197,7 @@ type VMOpts struct {
CircSupplyCalc CircSupplyCalculator
NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter
BaseFee abi.TokenAmount
LookbackState LookbackStateGetter
}
func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
@ -216,6 +220,7 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
ntwkVersion: opts.NtwkVersion,
Syscalls: opts.Syscalls,
baseFee: opts.BaseFee,
lbStateGet: opts.LookbackState,
}, nil
}

View File

@ -283,6 +283,10 @@ func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) er
w.lk.Lock()
defer w.lk.Unlock()
if err := w.keystore.Delete(KTrashPrefix + k.Address.String()); err != nil && !xerrors.Is(err, types.ErrKeyInfoNotFound) {
return xerrors.Errorf("failed to purge trashed key %s: %w", addr, err)
}
if err := w.keystore.Put(KTrashPrefix+k.Address.String(), k.KeyInfo); err != nil {
return xerrors.Errorf("failed to mark key %s as trashed: %w", addr, err)
}

View File

@ -84,6 +84,7 @@ var clientCmd = &cli.Command{
WithCategory("data", clientImportCmd),
WithCategory("data", clientDropCmd),
WithCategory("data", clientLocalCmd),
WithCategory("data", clientStat),
WithCategory("retrieval", clientFindCmd),
WithCategory("retrieval", clientRetrieveCmd),
WithCategory("util", clientCommPCmd),
@ -91,6 +92,7 @@ var clientCmd = &cli.Command{
WithCategory("util", clientInfoCmd),
WithCategory("util", clientListTransfers),
WithCategory("util", clientRestartTransfer),
WithCategory("util", clientCancelTransfer),
},
}
@ -1638,6 +1640,39 @@ var clientInfoCmd = &cli.Command{
},
}
var clientStat = &cli.Command{
Name: "stat",
Usage: "Print information about a locally stored file (piece size, etc)",
ArgsUsage: "<cid>",
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
if !cctx.Args().Present() || cctx.NArg() != 1 {
return fmt.Errorf("must specify cid of data")
}
dataCid, err := cid.Parse(cctx.Args().First())
if err != nil {
return fmt.Errorf("parsing data cid: %w", err)
}
ds, err := api.ClientDealSize(ctx, dataCid)
if err != nil {
return err
}
fmt.Printf("Piece Size : %v\n", ds.PieceSize)
fmt.Printf("Payload Size: %v\n", ds.PayloadSize)
return nil
},
}
var clientRestartTransfer = &cli.Command{
Name: "restart-transfer",
Usage: "Force restart a stalled data transfer",
@ -1698,6 +1733,66 @@ var clientRestartTransfer = &cli.Command{
},
}
var clientCancelTransfer = &cli.Command{
Name: "cancel-transfer",
Usage: "Force cancel a data transfer",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "peerid",
Usage: "narrow to transfer with specific peer",
},
&cli.BoolFlag{
Name: "initiator",
Usage: "specify only transfers where peer is/is not initiator",
Value: true,
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() {
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64)
if err != nil {
return fmt.Errorf("Error reading transfer ID: %w", err)
}
transferID := datatransfer.TransferID(transferUint)
initiator := cctx.Bool("initiator")
var other peer.ID
if pidstr := cctx.String("peerid"); pidstr != "" {
p, err := peer.Decode(pidstr)
if err != nil {
return err
}
other = p
} else {
channels, err := api.ClientListDataTransfers(ctx)
if err != nil {
return err
}
found := false
for _, channel := range channels {
if channel.IsInitiator == initiator && channel.TransferID == transferID {
other = channel.OtherPeer
found = true
break
}
}
if !found {
return errors.New("unable to find matching data transfer")
}
}
return api.ClientCancelDataTransfer(ctx, transferID, other, initiator)
},
}
var clientListTransfers = &cli.Command{
Name: "list-transfers",
Usage: "List ongoing data transfers for deals",
@ -1715,6 +1810,10 @@ var clientListTransfers = &cli.Command{
Name: "watch",
Usage: "watch deal updates in real-time, rather than a one time list",
},
&cli.BoolFlag{
Name: "show-failed",
Usage: "show failed/cancelled transfers",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@ -1732,7 +1831,7 @@ var clientListTransfers = &cli.Command{
completed := cctx.Bool("completed")
color := cctx.Bool("color")
watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed")
if watch {
channelUpdates, err := api.ClientDataTransferUpdates(ctx)
if err != nil {
@ -1744,7 +1843,7 @@ var clientListTransfers = &cli.Command{
tm.MoveCursor(1, 1)
OutputDataTransferChannels(tm.Screen, channels, completed, color)
OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed)
tm.Flush()
@ -1769,13 +1868,13 @@ var clientListTransfers = &cli.Command{
}
}
}
OutputDataTransferChannels(os.Stdout, channels, completed, color)
OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed)
return nil
},
}
// OutputDataTransferChannels generates table output for a list of channels
func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool) {
func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool, showFailed bool) {
sort.Slice(channels, func(i, j int) bool {
return channels[i].TransferID < channels[j].TransferID
})
@ -1785,6 +1884,9 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
if !completed && channel.Status == datatransfer.Completed {
continue
}
if !showFailed && (channel.Status == datatransfer.Failed || channel.Status == datatransfer.Cancelled) {
continue
}
if channel.IsSender {
sendingChannels = append(sendingChannels, channel)
} else {

View File

@ -264,8 +264,19 @@ var msigInspectCmd = &cli.Command{
}
fmt.Fprintf(cctx.App.Writer, "Threshold: %d / %d\n", threshold, len(signers))
fmt.Fprintln(cctx.App.Writer, "Signers:")
signerTable := tabwriter.NewWriter(cctx.App.Writer, 8, 4, 2, ' ', 0)
fmt.Fprintf(signerTable, "ID\tAddress\n")
for _, s := range signers {
fmt.Fprintf(cctx.App.Writer, "\t%s\n", s)
signerActor, err := api.StateAccountKey(ctx, s, types.EmptyTSK)
if err != nil {
fmt.Fprintf(signerTable, "%s\t%s\n", s, "N/A")
} else {
fmt.Fprintf(signerTable, "%s\t%s\n", s, signerActor)
}
}
if err := signerTable.Flush(); err != nil {
return xerrors.Errorf("flushing output: %+v", err)
}
pending := make(map[int64]multisig.Transaction)
@ -296,27 +307,33 @@ var msigInspectCmd = &cli.Command{
target += " (self)"
}
targAct, err := api.StateGetActor(ctx, tx.To, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to resolve 'To' address of multisig transaction %d: %w", txid, err)
}
method := stmgr.MethodsMap[targAct.Code][tx.Method]
paramStr := fmt.Sprintf("%x", tx.Params)
if decParams && tx.Method != 0 {
ptyp := reflect.New(method.Params.Elem()).Interface().(cbg.CBORUnmarshaler)
if err := ptyp.UnmarshalCBOR(bytes.NewReader(tx.Params)); err != nil {
return xerrors.Errorf("failed to decode parameters of transaction %d: %w", txid, err)
if err != nil {
if tx.Method == 0 {
fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "Send", tx.Method, paramStr)
} else {
fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "new account, unknown method", tx.Method, paramStr)
}
} else {
method := stmgr.MethodsMap[targAct.Code][tx.Method]
if decParams && tx.Method != 0 {
ptyp := reflect.New(method.Params.Elem()).Interface().(cbg.CBORUnmarshaler)
if err := ptyp.UnmarshalCBOR(bytes.NewReader(tx.Params)); err != nil {
return xerrors.Errorf("failed to decode parameters of transaction %d: %w", txid, err)
}
b, err := json.Marshal(ptyp)
if err != nil {
return xerrors.Errorf("could not json marshal parameter type: %w", err)
}
paramStr = string(b)
}
b, err := json.Marshal(ptyp)
if err != nil {
return xerrors.Errorf("could not json marshal parameter type: %w", err)
}
paramStr = string(b)
fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), method.Name, tx.Method, paramStr)
}
fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), method.Name, tx.Method, paramStr)
}
if err := w.Flush(); err != nil {
return xerrors.Errorf("flushing output: %+v", err)

View File

@ -1,9 +1,7 @@
package cli
import (
"bytes"
"context"
"flag"
"fmt"
"os"
"regexp"
@ -12,24 +10,21 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
clitest "github.com/filecoin-project/lotus/cli/test"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/actors/policy"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/types"
builder "github.com/filecoin-project/lotus/node/test"
)
func init() {
@ -42,24 +37,24 @@ func init() {
// commands
func TestPaymentChannels(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime)
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
paymentReceiver := nodes[1]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := newMockCLI(t)
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
receiverCLI := mockCLI.client(paymentReceiver.ListenAddr)
mockCLI := clitest.NewMockCLI(t, Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
// creator: paych add-funds <creator> <receiver> <amount>
channelAmt := "100000"
cmd := []string{creatorAddr.String(), receiverAddr.String(), channelAmt}
chstr := creatorCLI.runCmd(paychAddFundsCmd, cmd)
chstr := creatorCLI.RunCmd("paych", "add-funds", creatorAddr.String(), receiverAddr.String(), channelAmt)
chAddr, err := address.NewFromString(chstr)
require.NoError(t, err)
@ -67,16 +62,13 @@ func TestPaymentChannels(t *testing.T) {
// creator: paych voucher create <channel> <amount>
voucherAmt := 100
vamt := strconv.Itoa(voucherAmt)
cmd = []string{chAddr.String(), vamt}
voucher := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
voucher := creatorCLI.RunCmd("paych", "voucher", "create", chAddr.String(), vamt)
// receiver: paych voucher add <channel> <voucher>
cmd = []string{chAddr.String(), voucher}
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher)
// creator: paych settle <channel>
cmd = []string{chAddr.String()}
creatorCLI.runCmd(paychSettleCmd, cmd)
creatorCLI.RunCmd("paych", "settle", chAddr.String())
// Wait for the chain to reach the settle height
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
@ -85,8 +77,7 @@ func TestPaymentChannels(t *testing.T) {
waitForHeight(ctx, t, paymentReceiver, sa)
// receiver: paych collect <channel>
cmd = []string{chAddr.String()}
receiverCLI.runCmd(paychCloseCmd, cmd)
receiverCLI.RunCmd("paych", "collect", chAddr.String())
}
type voucherSpec struct {
@ -98,20 +89,21 @@ type voucherSpec struct {
// TestPaymentChannelStatus tests the payment channel status CLI command
func TestPaymentChannelStatus(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime)
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := newMockCLI(t)
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
mockCLI := clitest.NewMockCLI(t, Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
cmd := []string{creatorAddr.String(), receiverAddr.String()}
out := creatorCLI.runCmd(paychStatusByFromToCmd, cmd)
// creator: paych status-by-from-to <creator> <receiver>
out := creatorCLI.RunCmd("paych", "status-by-from-to", creatorAddr.String(), receiverAddr.String())
fmt.Println(out)
noChannelState := "Channel does not exist"
require.Regexp(t, regexp.MustCompile(noChannelState), out)
@ -120,14 +112,17 @@ func TestPaymentChannelStatus(t *testing.T) {
create := make(chan string)
go func() {
// creator: paych add-funds <creator> <receiver> <amount>
cmd := []string{creatorAddr.String(), receiverAddr.String(), fmt.Sprintf("%d", channelAmt)}
create <- creatorCLI.runCmd(paychAddFundsCmd, cmd)
create <- creatorCLI.RunCmd(
"paych",
"add-funds",
creatorAddr.String(),
receiverAddr.String(),
fmt.Sprintf("%d", channelAmt))
}()
// Wait for the output to stop being "Channel does not exist"
for regexp.MustCompile(noChannelState).MatchString(out) {
cmd := []string{creatorAddr.String(), receiverAddr.String()}
out = creatorCLI.runCmd(paychStatusByFromToCmd, cmd)
out = creatorCLI.RunCmd("paych", "status-by-from-to", creatorAddr.String(), receiverAddr.String())
}
fmt.Println(out)
@ -147,8 +142,7 @@ func TestPaymentChannelStatus(t *testing.T) {
// Wait for create channel to complete
chstr := <-create
cmd = []string{chstr}
out = creatorCLI.runCmd(paychStatusCmd, cmd)
out = creatorCLI.RunCmd("paych", "status", chstr)
fmt.Println(out)
// Output should have the channel address
require.Regexp(t, regexp.MustCompile("Channel.*"+chstr), out)
@ -160,11 +154,9 @@ func TestPaymentChannelStatus(t *testing.T) {
// creator: paych voucher create <channel> <amount>
voucherAmt := uint64(10)
cmd = []string{chAddr.String(), fmt.Sprintf("%d", voucherAmt)}
creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
creatorCLI.RunCmd("paych", "voucher", "create", chAddr.String(), fmt.Sprintf("%d", voucherAmt))
cmd = []string{chstr}
out = creatorCLI.runCmd(paychStatusCmd, cmd)
out = creatorCLI.RunCmd("paych", "status", chstr)
fmt.Println(out)
voucherAmtAtto := types.BigMul(types.NewInt(voucherAmt), types.NewInt(build.FilecoinPrecision))
voucherAmtStr := fmt.Sprintf("%d", voucherAmtAtto)
@ -176,24 +168,24 @@ func TestPaymentChannelStatus(t *testing.T) {
// channel voucher commands
func TestPaymentChannelVouchers(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime)
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
paymentReceiver := nodes[1]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := newMockCLI(t)
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
receiverCLI := mockCLI.client(paymentReceiver.ListenAddr)
mockCLI := clitest.NewMockCLI(t, Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
// creator: paych add-funds <creator> <receiver> <amount>
channelAmt := "100000"
cmd := []string{creatorAddr.String(), receiverAddr.String(), channelAmt}
chstr := creatorCLI.runCmd(paychAddFundsCmd, cmd)
chstr := creatorCLI.RunCmd("paych", "add-funds", creatorAddr.String(), receiverAddr.String(), channelAmt)
chAddr, err := address.NewFromString(chstr)
require.NoError(t, err)
@ -203,39 +195,33 @@ func TestPaymentChannelVouchers(t *testing.T) {
// creator: paych voucher create <channel> <amount>
// Note: implied --lane=0
voucherAmt1 := 100
cmd = []string{chAddr.String(), strconv.Itoa(voucherAmt1)}
voucher1 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
voucher1 := creatorCLI.RunCmd("paych", "voucher", "create", chAddr.String(), strconv.Itoa(voucherAmt1))
vouchers = append(vouchers, voucherSpec{serialized: voucher1, lane: 0, amt: voucherAmt1})
// creator: paych voucher create <channel> <amount> --lane=5
lane5 := "--lane=5"
voucherAmt2 := 50
cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt2)}
voucher2 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
voucher2 := creatorCLI.RunCmd("paych", "voucher", "create", lane5, chAddr.String(), strconv.Itoa(voucherAmt2))
vouchers = append(vouchers, voucherSpec{serialized: voucher2, lane: 5, amt: voucherAmt2})
// creator: paych voucher create <channel> <amount> --lane=5
voucherAmt3 := 70
cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt3)}
voucher3 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
voucher3 := creatorCLI.RunCmd("paych", "voucher", "create", lane5, chAddr.String(), strconv.Itoa(voucherAmt3))
vouchers = append(vouchers, voucherSpec{serialized: voucher3, lane: 5, amt: voucherAmt3})
// creator: paych voucher create <channel> <amount> --lane=5
voucherAmt4 := 80
cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt4)}
voucher4 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
voucher4 := creatorCLI.RunCmd("paych", "voucher", "create", lane5, chAddr.String(), strconv.Itoa(voucherAmt4))
vouchers = append(vouchers, voucherSpec{serialized: voucher4, lane: 5, amt: voucherAmt4})
// creator: paych voucher list <channel> --export
cmd = []string{"--export", chAddr.String()}
list := creatorCLI.runCmd(paychVoucherListCmd, cmd)
list := creatorCLI.RunCmd("paych", "voucher", "list", "--export", chAddr.String())
// Check that voucher list output is correct on creator
checkVoucherOutput(t, list, vouchers)
// creator: paych voucher best-spendable <channel>
cmd = []string{"--export", chAddr.String()}
bestSpendable := creatorCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
bestSpendable := creatorCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String())
// Check that best spendable output is correct on creator
bestVouchers := []voucherSpec{
@ -245,31 +231,25 @@ func TestPaymentChannelVouchers(t *testing.T) {
checkVoucherOutput(t, bestSpendable, bestVouchers)
// receiver: paych voucher add <voucher>
cmd = []string{chAddr.String(), voucher1}
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher1)
// receiver: paych voucher add <voucher>
cmd = []string{chAddr.String(), voucher2}
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher2)
// receiver: paych voucher add <voucher>
cmd = []string{chAddr.String(), voucher3}
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher3)
// receiver: paych voucher add <voucher>
cmd = []string{chAddr.String(), voucher4}
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
receiverCLI.RunCmd("paych", "voucher", "add", chAddr.String(), voucher4)
// receiver: paych voucher list <channel> --export
cmd = []string{"--export", chAddr.String()}
list = receiverCLI.runCmd(paychVoucherListCmd, cmd)
list = receiverCLI.RunCmd("paych", "voucher", "list", "--export", chAddr.String())
// Check that voucher list output is correct on receiver
checkVoucherOutput(t, list, vouchers)
// receiver: paych voucher best-spendable <channel>
cmd = []string{"--export", chAddr.String()}
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
bestSpendable = receiverCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String())
// Check that best spendable output is correct on receiver
bestVouchers = []voucherSpec{
@ -279,12 +259,10 @@ func TestPaymentChannelVouchers(t *testing.T) {
checkVoucherOutput(t, bestSpendable, bestVouchers)
// receiver: paych voucher submit <channel> <voucher>
cmd = []string{chAddr.String(), voucher1}
receiverCLI.runCmd(paychVoucherSubmitCmd, cmd)
receiverCLI.RunCmd("paych", "voucher", "submit", chAddr.String(), voucher1)
// receiver: paych voucher best-spendable <channel>
cmd = []string{"--export", chAddr.String()}
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
bestSpendable = receiverCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String())
// Check that best spendable output no longer includes submitted voucher
bestVouchers = []voucherSpec{
@ -295,12 +273,10 @@ func TestPaymentChannelVouchers(t *testing.T) {
// There are three vouchers in lane 5: 50, 70, 80
// Submit the voucher for 50. Best spendable should still be 80.
// receiver: paych voucher submit <channel> <voucher>
cmd = []string{chAddr.String(), voucher2}
receiverCLI.runCmd(paychVoucherSubmitCmd, cmd)
receiverCLI.RunCmd("paych", "voucher", "submit", chAddr.String(), voucher2)
// receiver: paych voucher best-spendable <channel>
cmd = []string{"--export", chAddr.String()}
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
bestSpendable = receiverCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String())
// Check that best spendable output still includes the voucher for 80
bestVouchers = []voucherSpec{
@ -310,12 +286,10 @@ func TestPaymentChannelVouchers(t *testing.T) {
// Submit the voucher for 80
// receiver: paych voucher submit <channel> <voucher>
cmd = []string{chAddr.String(), voucher4}
receiverCLI.runCmd(paychVoucherSubmitCmd, cmd)
receiverCLI.RunCmd("paych", "voucher", "submit", chAddr.String(), voucher4)
// receiver: paych voucher best-spendable <channel>
cmd = []string{"--export", chAddr.String()}
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
bestSpendable = receiverCLI.RunCmd("paych", "voucher", "best-spendable", "--export", chAddr.String())
// Check that best spendable output no longer includes submitted voucher
bestVouchers = []voucherSpec{}
@ -326,22 +300,27 @@ func TestPaymentChannelVouchers(t *testing.T) {
// is greater than what's left in the channel, voucher create fails
func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime)
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := newMockCLI(t)
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
mockCLI := clitest.NewMockCLI(t, Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
// creator: paych add-funds <creator> <receiver> <amount>
channelAmt := 100
cmd := []string{creatorAddr.String(), receiverAddr.String(), fmt.Sprintf("%d", channelAmt)}
chstr := creatorCLI.runCmd(paychAddFundsCmd, cmd)
chstr := creatorCLI.RunCmd(
"paych",
"add-funds",
creatorAddr.String(),
receiverAddr.String(),
fmt.Sprintf("%d", channelAmt))
chAddr, err := address.NewFromString(chstr)
require.NoError(t, err)
@ -349,15 +328,25 @@ func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
// creator: paych voucher create <channel> <amount> --lane=1
voucherAmt1 := 60
lane1 := "--lane=1"
cmd = []string{lane1, chAddr.String(), strconv.Itoa(voucherAmt1)}
voucher1 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
voucher1 := creatorCLI.RunCmd(
"paych",
"voucher",
"create",
lane1,
chAddr.String(),
strconv.Itoa(voucherAmt1))
fmt.Println(voucher1)
// creator: paych voucher create <channel> <amount> --lane=2
lane2 := "--lane=2"
voucherAmt2 := 70
cmd = []string{lane2, chAddr.String(), strconv.Itoa(voucherAmt2)}
_, err = creatorCLI.runCmdRaw(paychVoucherCreateCmd, cmd)
_, err = creatorCLI.RunCmdRaw(
"paych",
"voucher",
"create",
lane2,
chAddr.String(),
strconv.Itoa(voucherAmt2))
// Should fail because channel doesn't have required amount
require.Error(t, err)
@ -388,129 +377,6 @@ func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) {
}
}
func startTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) {
n, sn := builder.RPCMockSbBuilder(t, test.TwoFull, test.OneMiner)
paymentCreator := n[0]
paymentReceiver := n[1]
miner := sn[0]
// Get everyone connected
addrs, err := paymentCreator.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := paymentReceiver.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
// Start mining blocks
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
bm.MineBlocks()
// Send some funds to register the receiver
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
test.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
// Get the creator's address
creatorAddr, err := paymentCreator.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
// Create mock CLI
return n, []address.Address{creatorAddr, receiverAddr}
}
type mockCLI struct {
t *testing.T
cctx *cli.Context
out *bytes.Buffer
}
// TODO: refactor to use the methods in cli/test/mockcli.go
func newMockCLI(t *testing.T) *mockCLI {
// Create a CLI App with an --api-url flag so that we can specify which node
// the command should be executed against
app := cli.NewApp()
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "api-url",
Hidden: true,
},
}
var out bytes.Buffer
app.Writer = &out
app.Setup()
cctx := cli.NewContext(app, &flag.FlagSet{}, nil)
return &mockCLI{t: t, cctx: cctx, out: &out}
}
func (c *mockCLI) client(addr multiaddr.Multiaddr) *mockCLIClient {
return &mockCLIClient{t: c.t, addr: addr, cctx: c.cctx, out: c.out}
}
// mockCLIClient runs commands against a particular node
type mockCLIClient struct {
t *testing.T
addr multiaddr.Multiaddr
cctx *cli.Context
out *bytes.Buffer
}
func (c *mockCLIClient) runCmd(cmd *cli.Command, input []string) string {
out, err := c.runCmdRaw(cmd, input)
require.NoError(c.t, err)
return out
}
func (c *mockCLIClient) runCmdRaw(cmd *cli.Command, input []string) (string, error) {
// prepend --api-url=<node api listener address>
apiFlag := "--api-url=" + c.addr.String()
input = append([]string{apiFlag}, input...)
fs := c.flagSet(cmd)
err := fs.Parse(input)
require.NoError(c.t, err)
err = cmd.Action(cli.NewContext(c.cctx.App, fs, c.cctx))
// Get the output
str := strings.TrimSpace(c.out.String())
c.out.Reset()
return str, err
}
func (c *mockCLIClient) flagSet(cmd *cli.Command) *flag.FlagSet {
// Apply app level flags (so we can process --api-url flag)
fs := &flag.FlagSet{}
for _, f := range c.cctx.App.Flags {
err := f.Apply(fs)
if err != nil {
c.t.Fatal(err)
}
}
// Apply command level flags
for _, f := range cmd.Flags {
err := f.Apply(fs)
if err != nil {
c.t.Fatal(err)
}
}
return fs
}
// waitForHeight waits for the node to reach the given chain epoch
func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height abi.ChainEpoch) {
atHeight := make(chan struct{})

View File

@ -25,8 +25,8 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
defer cancel()
// Create mock CLI
mockCLI := newMockCLI(t, cmds)
clientCLI := mockCLI.client(clientNode.ListenAddr)
mockCLI := NewMockCLI(t, cmds)
clientCLI := mockCLI.Client(clientNode.ListenAddr)
// Get the miner address
addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK)
@ -37,10 +37,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
fmt.Println("Miner:", minerAddr)
// client query-ask <miner addr>
cmd := []string{
"client", "query-ask", minerAddr.String(),
}
out := clientCLI.runCmd(cmd)
out := clientCLI.RunCmd("client", "query-ask", minerAddr.String())
require.Regexp(t, regexp.MustCompile("Ask:"), out)
// Create a deal (non-interactive)
@ -50,10 +47,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
dataCid := res.Root
price := "1000000attofil"
duration := fmt.Sprintf("%d", build.MinDealDuration)
cmd = []string{
"client", "deal", dataCid.String(), minerAddr.String(), price, duration,
}
out = clientCLI.runCmd(cmd)
out = clientCLI.RunCmd("client", "deal", dataCid.String(), minerAddr.String(), price, duration)
fmt.Println("client deal", out)
// Create a deal (interactive)
@ -67,9 +61,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
require.NoError(t, err)
dataCid2 := res.Root
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
cmd = []string{
"client", "deal",
}
cmd := []string{"client", "deal"}
interactiveCmds := []string{
dataCid2.String(),
duration,
@ -77,15 +69,14 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
"no",
"yes",
}
out = clientCLI.runInteractiveCmd(cmd, interactiveCmds)
out = clientCLI.RunInteractiveCmd(cmd, interactiveCmds)
fmt.Println("client deal:\n", out)
// Wait for provider to start sealing deal
dealStatus := ""
for dealStatus != "StorageDealSealing" {
// client list-deals
cmd = []string{"client", "list-deals"}
out = clientCLI.runCmd(cmd)
out = clientCLI.RunCmd("client", "list-deals")
fmt.Println("list-deals:\n", out)
lines := strings.Split(out, "\n")
@ -106,10 +97,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-client")
require.NoError(t, err)
path := filepath.Join(tmpdir, "outfile.dat")
cmd = []string{
"client", "retrieve", dataCid.String(), path,
}
out = clientCLI.runCmd(cmd)
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
fmt.Println("retrieve:\n", out)
require.Regexp(t, regexp.MustCompile("Success"), out)
}

View File

@ -11,14 +11,14 @@ import (
lcli "github.com/urfave/cli/v2"
)
type mockCLI struct {
type MockCLI struct {
t *testing.T
cmds []*lcli.Command
cctx *lcli.Context
out *bytes.Buffer
}
func newMockCLI(t *testing.T, cmds []*lcli.Command) *mockCLI {
func NewMockCLI(t *testing.T, cmds []*lcli.Command) *MockCLI {
// Create a CLI App with an --api-url flag so that we can specify which node
// the command should be executed against
app := &lcli.App{
@ -36,15 +36,15 @@ func newMockCLI(t *testing.T, cmds []*lcli.Command) *mockCLI {
app.Setup()
cctx := lcli.NewContext(app, &flag.FlagSet{}, nil)
return &mockCLI{t: t, cmds: cmds, cctx: cctx, out: &out}
return &MockCLI{t: t, cmds: cmds, cctx: cctx, out: &out}
}
func (c *mockCLI) client(addr multiaddr.Multiaddr) *mockCLIClient {
return &mockCLIClient{t: c.t, cmds: c.cmds, addr: addr, cctx: c.cctx, out: c.out}
func (c *MockCLI) Client(addr multiaddr.Multiaddr) *MockCLIClient {
return &MockCLIClient{t: c.t, cmds: c.cmds, addr: addr, cctx: c.cctx, out: c.out}
}
// mockCLIClient runs commands against a particular node
type mockCLIClient struct {
// MockCLIClient runs commands against a particular node
type MockCLIClient struct {
t *testing.T
cmds []*lcli.Command
addr multiaddr.Multiaddr
@ -52,42 +52,48 @@ type mockCLIClient struct {
out *bytes.Buffer
}
func (c *mockCLIClient) run(cmd []string, params []string, args []string) string {
// Add parameter --api-url=<node api listener address>
apiFlag := "--api-url=" + c.addr.String()
params = append([]string{apiFlag}, params...)
err := c.cctx.App.Run(append(append(cmd, params...), args...))
require.NoError(c.t, err)
// Get the output
str := strings.TrimSpace(c.out.String())
c.out.Reset()
return str
}
func (c *mockCLIClient) runCmd(input []string) string {
cmd := c.cmdByNameSub(input[0], input[1])
out, err := c.runCmdRaw(cmd, input[2:])
func (c *MockCLIClient) RunCmd(input ...string) string {
out, err := c.RunCmdRaw(input...)
require.NoError(c.t, err)
return out
}
func (c *mockCLIClient) cmdByNameSub(name string, sub string) *lcli.Command {
for _, c := range c.cmds {
if c.Name == name {
for _, s := range c.Subcommands {
if s.Name == sub {
return s
}
}
// Given an input, find the corresponding command or sub-command.
// eg "paych add-funds"
func (c *MockCLIClient) cmdByNameSub(input []string) (*lcli.Command, []string) {
name := input[0]
for _, cmd := range c.cmds {
if cmd.Name == name {
return c.findSubcommand(cmd, input[1:])
}
}
return nil
return nil, []string{}
}
func (c *mockCLIClient) runCmdRaw(cmd *lcli.Command, input []string) (string, error) {
func (c *MockCLIClient) findSubcommand(cmd *lcli.Command, input []string) (*lcli.Command, []string) {
// If there are no sub-commands, return the current command
if len(cmd.Subcommands) == 0 {
return cmd, input
}
// Check each sub-command for a match against the name
subName := input[0]
for _, subCmd := range cmd.Subcommands {
if subCmd.Name == subName {
// Found a match, recursively search for sub-commands
return c.findSubcommand(subCmd, input[1:])
}
}
return nil, []string{}
}
func (c *MockCLIClient) RunCmdRaw(input ...string) (string, error) {
cmd, input := c.cmdByNameSub(input)
if cmd == nil {
panic("Could not find command " + input[0] + " " + input[1])
}
// prepend --api-url=<node api listener address>
apiFlag := "--api-url=" + c.addr.String()
input = append([]string{apiFlag}, input...)
@ -104,7 +110,7 @@ func (c *mockCLIClient) runCmdRaw(cmd *lcli.Command, input []string) (string, er
return str, err
}
func (c *mockCLIClient) flagSet(cmd *lcli.Command) *flag.FlagSet {
func (c *MockCLIClient) flagSet(cmd *lcli.Command) *flag.FlagSet {
// Apply app level flags (so we can process --api-url flag)
fs := &flag.FlagSet{}
for _, f := range c.cctx.App.Flags {
@ -123,11 +129,11 @@ func (c *mockCLIClient) flagSet(cmd *lcli.Command) *flag.FlagSet {
return fs
}
func (c *mockCLIClient) runInteractiveCmd(cmd []string, interactive []string) string {
func (c *MockCLIClient) RunInteractiveCmd(cmd []string, interactive []string) string {
c.toStdin(strings.Join(interactive, "\n") + "\n")
return c.runCmd(cmd)
return c.RunCmd(cmd...)
}
func (c *mockCLIClient) toStdin(s string) {
func (c *MockCLIClient) toStdin(s string) {
c.cctx.App.Metadata["stdin"] = bytes.NewBufferString(s)
}

View File

@ -18,8 +18,8 @@ func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNod
ctx := context.Background()
// Create mock CLI
mockCLI := newMockCLI(t, cmds)
clientCLI := mockCLI.client(clientNode.ListenAddr)
mockCLI := NewMockCLI(t, cmds)
clientCLI := mockCLI.Client(clientNode.ListenAddr)
// Create some wallets on the node to use for testing multisig
var walletAddrs []address.Address
@ -39,7 +39,7 @@ func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNod
paramDuration := "--duration=50"
paramRequired := fmt.Sprintf("--required=%d", threshold)
paramValue := fmt.Sprintf("--value=%dattofil", amtAtto)
cmd := []string{
out := clientCLI.RunCmd(
"msig", "create",
paramRequired,
paramDuration,
@ -47,8 +47,7 @@ func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNod
walletAddrs[0].String(),
walletAddrs[1].String(),
walletAddrs[2].String(),
}
out := clientCLI.runCmd(cmd)
)
fmt.Println(out)
// Extract msig robust address from output
@ -62,18 +61,16 @@ func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNod
// Propose to add a new address to the msig
// msig add-propose --from=<addr> <msig> <addr>
paramFrom := fmt.Sprintf("--from=%s", walletAddrs[0])
cmd = []string{
out = clientCLI.RunCmd(
"msig", "add-propose",
paramFrom,
msigRobustAddr,
walletAddrs[3].String(),
}
out = clientCLI.runCmd(cmd)
)
fmt.Println(out)
// msig inspect <msig>
cmd = []string{"msig", "inspect", "--vesting", "--decode-params", msigRobustAddr}
out = clientCLI.runCmd(cmd)
out = clientCLI.RunCmd("msig", "inspect", "--vesting", "--decode-params", msigRobustAddr)
fmt.Println(out)
// Expect correct balance
@ -87,7 +84,7 @@ func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNod
// msig add-approve --from=<addr> <msig> <addr> 0 <addr> false
txnID := "0"
paramFrom = fmt.Sprintf("--from=%s", walletAddrs[1])
cmd = []string{
out = clientCLI.RunCmd(
"msig", "add-approve",
paramFrom,
msigRobustAddr,
@ -95,7 +92,6 @@ func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNod
txnID,
walletAddrs[3].String(),
"false",
}
out = clientCLI.runCmd(cmd)
)
fmt.Println(out)
}

View File

@ -5,6 +5,9 @@ import (
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api/test"
test2 "github.com/filecoin-project/lotus/node/test"
@ -39,3 +42,46 @@ func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Dura
// Create mock CLI
return full, fullAddr
}
func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) {
n, sn := test2.RPCMockSbBuilder(t, test.TwoFull, test.OneMiner)
fullNode1 := n[0]
fullNode2 := n[1]
miner := sn[0]
// Get everyone connected
addrs, err := fullNode1.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := fullNode2.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
// Start mining blocks
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
bm.MineBlocks()
// Send some funds to register the second node
fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
test.SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
// Get the first node's address
fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
// Create mock CLI
return n, []address.Address{fullNodeAddr1, fullNodeAddr2}
}

View File

@ -40,6 +40,8 @@ var actorCmd = &cli.Command{
actorSetPeeridCmd,
actorSetOwnerCmd,
actorControl,
actorProposeChangeWorker,
actorConfirmChangeWorker,
},
}
@ -698,3 +700,221 @@ var actorSetOwnerCmd = &cli.Command{
return nil
},
}
var actorProposeChangeWorker = &cli.Command{
Name: "propose-change-worker",
Usage: "Propose a worker address change",
ArgsUsage: "[address]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",
Usage: "Actually send transaction performing the action",
Value: false,
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() {
return fmt.Errorf("must pass address of new worker address")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
na, err := address.NewFromString(cctx.Args().First())
if err != nil {
return err
}
newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
if err != nil {
return err
}
maddr, err := nodeApi.ActorAddress(ctx)
if err != nil {
return err
}
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
if mi.NewWorker.Empty() {
if mi.Worker == newAddr {
return fmt.Errorf("worker address already set to %s", na)
}
} else {
if mi.NewWorker == newAddr {
return fmt.Errorf("change to worker address %s already pending", na)
}
}
if !cctx.Bool("really-do-it") {
fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action")
return nil
}
cwp := &miner2.ChangeWorkerAddressParams{
NewWorker: newAddr,
NewControlAddrs: mi.ControlAddresses,
}
sp, err := actors.SerializeParams(cwp)
if err != nil {
return xerrors.Errorf("serializing params: %w", err)
}
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
From: mi.Owner,
To: maddr,
Method: miner.Methods.ChangeWorkerAddress,
Value: big.Zero(),
Params: sp,
}, nil)
if err != nil {
return xerrors.Errorf("mpool push: %w", err)
}
fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid())
// wait for it to get mined into a block
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
if err != nil {
return err
}
// check it executed successfully
if wait.Receipt.ExitCode != 0 {
fmt.Fprintln(cctx.App.Writer, "Propose worker change failed!")
return err
}
mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet)
if err != nil {
return err
}
if mi.NewWorker != newAddr {
return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker)
}
fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na)
fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch)
return nil
},
}
var actorConfirmChangeWorker = &cli.Command{
Name: "confirm-change-worker",
Usage: "Confirm a worker address change",
ArgsUsage: "[address]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",
Usage: "Actually send transaction performing the action",
Value: false,
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() {
return fmt.Errorf("must pass address of new worker address")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
na, err := address.NewFromString(cctx.Args().First())
if err != nil {
return err
}
newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
if err != nil {
return err
}
maddr, err := nodeApi.ActorAddress(ctx)
if err != nil {
return err
}
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
if mi.NewWorker.Empty() {
return xerrors.Errorf("no worker key change proposed")
} else if mi.NewWorker != newAddr {
return xerrors.Errorf("worker key %s does not match current worker key proposal %s", newAddr, mi.NewWorker)
}
if head, err := api.ChainHead(ctx); err != nil {
return xerrors.Errorf("failed to get the chain head: %w", err)
} else if head.Height() < mi.WorkerChangeEpoch {
return xerrors.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, head.Height())
}
if !cctx.Bool("really-do-it") {
fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action")
return nil
}
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
From: mi.Owner,
To: maddr,
Method: miner.Methods.ConfirmUpdateWorkerKey,
Value: big.Zero(),
}, nil)
if err != nil {
return xerrors.Errorf("mpool push: %w", err)
}
fmt.Fprintln(cctx.App.Writer, "Confirm Message CID:", smsg.Cid())
// wait for it to get mined into a block
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
if err != nil {
return err
}
// check it executed successfully
if wait.Receipt.ExitCode != 0 {
fmt.Fprintln(cctx.App.Writer, "Worker change failed!")
return err
}
mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet)
if err != nil {
return err
}
if mi.Worker != newAddr {
return fmt.Errorf("Confirmed worker address change not reflected on chain: expected '%s', found '%s'", newAddr, mi.Worker)
}
return nil
},
}

View File

@ -0,0 +1,164 @@
package main
import (
"bytes"
"context"
"flag"
"fmt"
"regexp"
"strconv"
"sync/atomic"
"testing"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/node/repo"
builder "github.com/filecoin-project/lotus/node/test"
)
func TestWorkerKeyChange(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_ = logging.SetLogLevel("*", "INFO")
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
lotuslog.SetupLogLevels()
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("pubsub", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
blocktime := 1 * time.Millisecond
n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithUpgradeAt(1), test.FullNodeWithUpgradeAt(1)}, test.OneMiner)
client1 := n[0]
client2 := n[1]
// Connect the nodes.
addrinfo, err := client1.NetAddrsListen(ctx)
require.NoError(t, err)
err = client2.NetConnect(ctx, addrinfo)
require.NoError(t, err)
output := bytes.NewBuffer(nil)
run := func(cmd *cli.Command, args ...string) error {
app := cli.NewApp()
app.Metadata = map[string]interface{}{
"repoType": repo.StorageMiner,
"testnode-full": n[0],
"testnode-storage": sn[0],
}
app.Writer = output
build.RunningNodeType = build.NodeMiner
fs := flag.NewFlagSet("", flag.ContinueOnError)
for _, f := range cmd.Flags {
if err := f.Apply(fs); err != nil {
return err
}
}
require.NoError(t, fs.Parse(args))
cctx := cli.NewContext(app, fs, nil)
return cmd.Action(cctx)
}
// setup miner
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, test.MineNext); err != nil {
t.Error(err)
}
}
}()
defer func() {
atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining")
<-done
}()
newKey, err := client1.WalletNew(ctx, types.KTBLS)
require.NoError(t, err)
// Initialize wallet.
test.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0))
require.NoError(t, run(actorProposeChangeWorker, "--really-do-it", newKey.String()))
result := output.String()
output.Reset()
require.Contains(t, result, fmt.Sprintf("Worker key change to %s successfully proposed.", newKey))
epochRe := regexp.MustCompile("at or after height (?P<epoch>[0-9]+) to complete")
matches := epochRe.FindStringSubmatch(result)
require.NotNil(t, matches)
targetEpoch, err := strconv.Atoi(matches[1])
require.NoError(t, err)
require.NotZero(t, targetEpoch)
// Too early.
require.Error(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
output.Reset()
for {
head, err := client1.ChainHead(ctx)
require.NoError(t, err)
if head.Height() >= abi.ChainEpoch(targetEpoch) {
break
}
build.Clock.Sleep(10 * blocktime)
}
require.NoError(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
output.Reset()
head, err := client1.ChainHead(ctx)
require.NoError(t, err)
// Wait for finality (worker key switch).
targetHeight := head.Height() + policy.ChainFinality
for {
head, err := client1.ChainHead(ctx)
require.NoError(t, err)
if head.Height() >= targetHeight {
break
}
build.Clock.Sleep(10 * blocktime)
}
// Make sure the other node can catch up.
for i := 0; i < 20; i++ {
head, err := client2.ChainHead(ctx)
require.NoError(t, err)
if head.Height() >= targetHeight {
return
}
build.Clock.Sleep(10 * blocktime)
}
t.Fatal("failed to reach target epoch on the second miner")
}

View File

@ -2,6 +2,7 @@ package main
import (
"bufio"
"errors"
"fmt"
"io"
"os"
@ -13,8 +14,10 @@ import (
tm "github.com/buger/goterm"
"github.com/docker/go-units"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multibase"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
@ -569,6 +572,128 @@ var dataTransfersCmd = &cli.Command{
Usage: "Manage data transfers",
Subcommands: []*cli.Command{
transfersListCmd,
marketRestartTransfer,
marketCancelTransfer,
},
}
var marketRestartTransfer = &cli.Command{
Name: "restart",
Usage: "Force restart a stalled data transfer",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "peerid",
Usage: "narrow to transfer with specific peer",
},
&cli.BoolFlag{
Name: "initiator",
Usage: "specify only transfers where peer is/is not initiator",
Value: false,
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() {
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64)
if err != nil {
return fmt.Errorf("Error reading transfer ID: %w", err)
}
transferID := datatransfer.TransferID(transferUint)
initiator := cctx.Bool("initiator")
var other peer.ID
if pidstr := cctx.String("peerid"); pidstr != "" {
p, err := peer.Decode(pidstr)
if err != nil {
return err
}
other = p
} else {
channels, err := nodeApi.MarketListDataTransfers(ctx)
if err != nil {
return err
}
found := false
for _, channel := range channels {
if channel.IsInitiator == initiator && channel.TransferID == transferID {
other = channel.OtherPeer
found = true
break
}
}
if !found {
return errors.New("unable to find matching data transfer")
}
}
return nodeApi.MarketRestartDataTransfer(ctx, transferID, other, initiator)
},
}
var marketCancelTransfer = &cli.Command{
Name: "cancel",
Usage: "Force cancel a data transfer",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "peerid",
Usage: "narrow to transfer with specific peer",
},
&cli.BoolFlag{
Name: "initiator",
Usage: "specify only transfers where peer is/is not initiator",
Value: false,
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() {
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64)
if err != nil {
return fmt.Errorf("Error reading transfer ID: %w", err)
}
transferID := datatransfer.TransferID(transferUint)
initiator := cctx.Bool("initiator")
var other peer.ID
if pidstr := cctx.String("peerid"); pidstr != "" {
p, err := peer.Decode(pidstr)
if err != nil {
return err
}
other = p
} else {
channels, err := nodeApi.MarketListDataTransfers(ctx)
if err != nil {
return err
}
found := false
for _, channel := range channels {
if channel.IsInitiator == initiator && channel.TransferID == transferID {
other = channel.OtherPeer
found = true
break
}
}
if !found {
return errors.New("unable to find matching data transfer")
}
}
return nodeApi.MarketCancelDataTransfer(ctx, transferID, other, initiator)
},
}
@ -589,6 +714,10 @@ var transfersListCmd = &cli.Command{
Name: "watch",
Usage: "watch deal updates in real-time, rather than a one time list",
},
&cli.BoolFlag{
Name: "show-failed",
Usage: "show failed/cancelled transfers",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
@ -606,7 +735,7 @@ var transfersListCmd = &cli.Command{
completed := cctx.Bool("completed")
color := cctx.Bool("color")
watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed")
if watch {
channelUpdates, err := api.MarketDataTransferUpdates(ctx)
if err != nil {
@ -618,7 +747,7 @@ var transfersListCmd = &cli.Command{
tm.MoveCursor(1, 1)
lcli.OutputDataTransferChannels(tm.Screen, channels, completed, color)
lcli.OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed)
tm.Flush()
@ -643,7 +772,7 @@ var transfersListCmd = &cli.Command{
}
}
}
lcli.OutputDataTransferChannels(os.Stdout, channels, completed, color)
lcli.OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed)
return nil
},
}

View File

@ -219,7 +219,7 @@ var sectorsListCmd = &cli.Command{
tablewriter.Col("Deals"),
tablewriter.Col("DealWeight"),
tablewriter.NewLineCol("Error"),
tablewriter.NewLineCol("EarlyExpiration"))
tablewriter.NewLineCol("RecoveryTimeout"))
fast := cctx.Bool("fast")
@ -281,7 +281,7 @@ var sectorsListCmd = &cli.Command{
}
if st.Early > 0 {
m["EarlyExpiration"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
m["RecoveryTimeout"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
}
}
}

View File

@ -33,6 +33,7 @@
* [ChainTipSetWeight](#ChainTipSetWeight)
* [Client](#Client)
* [ClientCalcCommP](#ClientCalcCommP)
* [ClientCancelDataTransfer](#ClientCancelDataTransfer)
* [ClientDataTransferUpdates](#ClientDataTransferUpdates)
* [ClientDealSize](#ClientDealSize)
* [ClientFindData](#ClientFindData)
@ -859,6 +860,23 @@ Response:
}
```
### ClientCancelDataTransfer
ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
Perms: write
Inputs:
```json
[
3,
"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
true
]
```
Response: `{}`
### ClientDataTransferUpdates
There are not yet any comments for this method.

2
go.mod
View File

@ -22,7 +22,7 @@ require (
github.com/dustin/go-humanize v1.0.0
github.com/elastic/go-sysinfo v1.3.0
github.com/fatih/color v1.9.0
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f
github.com/filecoin-project/go-address v0.0.4
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect
github.com/filecoin-project/go-bitfield v0.2.1

View File

@ -858,6 +858,14 @@ func (a *API) ClientRestartDataTransfer(ctx context.Context, transferID datatran
return a.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID})
}
func (a *API) ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
selfPeer := a.Host.ID()
if isInitiator {
return a.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID})
}
return a.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID})
}
func newDealInfo(v storagemarket.ClientDeal) api.DealInfo {
return api.DealInfo{
ProposalCid: v.ProposalCid,

View File

@ -11,6 +11,7 @@ import (
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/peer"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@ -402,6 +403,22 @@ func (sm *StorageMinerAPI) MarketListDataTransfers(ctx context.Context) ([]api.D
return apiChannels, nil
}
func (sm *StorageMinerAPI) MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
selfPeer := sm.Host.ID()
if isInitiator {
return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID})
}
return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID})
}
func (sm *StorageMinerAPI) MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
selfPeer := sm.Host.ID()
if isInitiator {
return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID})
}
return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID})
}
func (sm *StorageMinerAPI) MarketDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) {
channels := make(chan api.DataTransferChannel)

View File

@ -187,22 +187,12 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st
ctx := helpers.LifecycleCtx(mctx, lc)
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
fps, err := storage.NewWindowedPoStScheduler(api, fc, sealer, sealer, j, maddr)
if err != nil {
return nil, err
}
worker, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK)
if err != nil {
return nil, err
}
fps, err := storage.NewWindowedPoStScheduler(api, fc, sealer, sealer, j, maddr, worker)
if err != nil {
return nil, err
}
sm, err := storage.NewMiner(api, maddr, worker, h, ds, sealer, sc, verif, gsd, fc, j)
sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, gsd, fc, j)
if err != nil {
return nil, err
}

View File

@ -663,6 +663,7 @@ func TestPaychGetMergeAddFunds(t *testing.T) {
defer addFundsSent.Done()
// Request add funds - should block until create channel has completed
var err error
addFundsCh1, addFundsMcid1, err = mgr.GetPaych(ctx, from, to, addFundsAmt1)
require.NoError(t, err)
}()
@ -671,6 +672,7 @@ func TestPaychGetMergeAddFunds(t *testing.T) {
defer addFundsSent.Done()
// Request add funds again - should merge with waiting add funds request
var err error
addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2)
require.NoError(t, err)
}()
@ -766,6 +768,7 @@ func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) {
defer addFundsSent.Done()
// Request add funds again - should merge with waiting add funds request
var err error
addFundsCh2, addFundsMcid2, err = mgr.GetPaych(ctx, from, to, addFundsAmt2)
require.NoError(t, err)
}()
@ -861,7 +864,6 @@ func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) {
// Request add funds again - should merge with waiting add funds request
_, _, addFundsErr2 = mgr.GetPaych(addFundsCtx2, from, to, big.NewInt(3))
require.NoError(t, err)
}()
// Wait for add funds requests to be queued up
waitForQueueSize(t, mgr, from, to, 2)
@ -950,6 +952,7 @@ func TestPaychAvailableFunds(t *testing.T) {
defer addFundsSent.Done()
// Request add funds - should block until create channel has completed
var err error
_, addFundsMcid, err = mgr.GetPaych(ctx, from, to, addFundsAmt)
require.NoError(t, err)
}()

View File

@ -101,10 +101,13 @@ type mergedFundsReq struct {
func newMergedFundsReq(reqs []*fundsReq) *mergedFundsReq {
ctx, cancel := context.WithCancel(context.Background())
rqs := make([]*fundsReq, len(reqs))
copy(rqs, reqs)
m := &mergedFundsReq{
ctx: ctx,
cancel: cancel,
reqs: reqs,
reqs: rqs,
}
for _, r := range m.reqs {
@ -201,7 +204,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable
// Merge all pending requests into one.
// For example if there are pending requests for 3, 2, 4 then
// amt = 3 + 2 + 4 = 9
merged := newMergedFundsReq(ca.fundsReqQueue[:])
merged := newMergedFundsReq(ca.fundsReqQueue)
amt := merged.sum()
if amt.IsZero() {
// Note: The amount can be zero if requests are cancelled as we're

View File

@ -49,8 +49,7 @@ type Miner struct {
sc sealing.SectorIDCounter
verif ffiwrapper.Verifier
maddr address.Address
worker address.Address
maddr address.Address
getSealConfig dtypes.GetSealingConfigFunc
sealing *sealing.Sealing
@ -112,7 +111,7 @@ type storageMinerApi interface {
WalletHas(context.Context, address.Address) (bool, error)
}
func NewMiner(api storageMinerApi, maddr, worker address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal) (*Miner, error) {
func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal) (*Miner, error) {
m := &Miner{
api: api,
feeCfg: feeCfg,
@ -123,7 +122,6 @@ func NewMiner(api storageMinerApi, maddr, worker address.Address, h host.Host, d
verif: verif,
maddr: maddr,
worker: worker,
getSealConfig: gsd,
journal: journal,
sealingEvtType: journal.RegisterEventType("storage", "sealing_states"),
@ -175,7 +173,17 @@ func (m *Miner) Stop(ctx context.Context) error {
}
func (m *Miner) runPreflightChecks(ctx context.Context) error {
has, err := m.api.WalletHas(ctx, m.worker)
mi, err := m.api.StateMinerInfo(ctx, m.maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to resolve miner info: %w", err)
}
workerKey, err := m.api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to resolve worker key: %w", err)
}
has, err := m.api.WalletHas(ctx, workerKey)
if err != nil {
return xerrors.Errorf("failed to check wallet for worker key: %w", err)
}
@ -184,7 +192,7 @@ func (m *Miner) runPreflightChecks(ctx context.Context) error {
return errors.New("key for worker not found in local wallet")
}
log.Infof("starting up miner %s, worker addr %s", m.maddr, m.worker)
log.Infof("starting up miner %s, worker addr %s", m.maddr, workerKey)
return nil
}

View File

@ -292,13 +292,14 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
msg := &types.Message{
To: s.actor,
From: s.worker,
Method: miner.Methods.DeclareFaultsRecovered,
Params: enc,
Value: types.NewInt(0),
}
spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
s.setSender(ctx, msg, spec)
if err := s.setSender(ctx, msg, spec); err != nil {
return recoveries, nil, err
}
sm, err := s.api.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)})
if err != nil {
@ -376,13 +377,14 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64,
msg := &types.Message{
To: s.actor,
From: s.worker,
Method: miner.Methods.DeclareFaults,
Params: enc,
Value: types.NewInt(0), // TODO: Is there a fee?
}
spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
s.setSender(ctx, msg, spec)
if err := s.setSender(ctx, msg, spec); err != nil {
return faults, nil, err
}
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
if err != nil {
@ -716,13 +718,14 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi
msg := &types.Message{
To: s.actor,
From: s.worker,
Method: miner.Methods.SubmitWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
}
spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
s.setSender(ctx, msg, spec)
if err := s.setSender(ctx, msg, spec); err != nil {
return nil, err
}
// TODO: consider maybe caring about the output
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
@ -750,21 +753,18 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi
return sm, nil
}
func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) {
func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) error {
mi, err := s.api.StateMinerInfo(ctx, s.actor, types.EmptyTSK)
if err != nil {
log.Errorw("error getting miner info", "error", err)
// better than just failing
msg.From = s.worker
return
return xerrors.Errorf("error getting miner info: %w", err)
}
// use the worker as a fallback
msg.From = mi.Worker
gm, err := s.api.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK)
if err != nil {
log.Errorw("estimating gas", "error", err)
msg.From = s.worker
return
return nil
}
*msg = *gm
@ -773,9 +773,9 @@ func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message,
pa, err := AddressFor(ctx, s.api, mi, PoStAddr, minFunds)
if err != nil {
log.Errorw("error selecting address for window post", "error", err)
msg.From = s.worker
return
return nil
}
msg.From = pa
return nil
}

View File

@ -123,7 +123,6 @@ func TestWDPostDoPost(t *testing.T) {
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
postAct := tutils.NewIDAddr(t, 100)
workerAct := tutils.NewIDAddr(t, 101)
mockStgMinerAPI := newMockStorageMinerAPI()
@ -164,7 +163,6 @@ func TestWDPostDoPost(t *testing.T) {
faultTracker: &mockFaultTracker{},
proofType: proofType,
actor: postAct,
worker: workerAct,
journal: journal.NilJournal(),
}

View File

@ -31,8 +31,7 @@ type WindowPoStScheduler struct {
partitionSectors uint64
ch *changeHandler
actor address.Address
worker address.Address
actor address.Address
evtTypes [4]journal.EventType
journal journal.Journal
@ -41,7 +40,7 @@ type WindowPoStScheduler struct {
// failLk sync.Mutex
}
func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb storage.Prover, ft sectorstorage.FaultTracker, j journal.Journal, actor address.Address, worker address.Address) (*WindowPoStScheduler, error) {
func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb storage.Prover, ft sectorstorage.FaultTracker, j journal.Journal, actor address.Address) (*WindowPoStScheduler, error) {
mi, err := api.StateMinerInfo(context.TODO(), actor, types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("getting sector size: %w", err)
@ -60,8 +59,7 @@ func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb
proofType: rt,
partitionSectors: mi.WindowPoStPartitionSectors,
actor: actor,
worker: worker,
actor: actor,
evtTypes: [...]journal.EventType{
evtTypeWdPoStScheduler: j.RegisterEventType("wdpost", "scheduler"),
evtTypeWdPoStProofs: j.RegisterEventType("wdpost", "proofs_processed"),