Merge remote-tracking branch 'origin/master' into feat/nv13-1.11
This commit is contained in:
commit
c3e8eddb9b
@ -35,13 +35,13 @@ type MsgMeta struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Wallet interface {
|
type Wallet interface {
|
||||||
WalletNew(context.Context, types.KeyType) (address.Address, error)
|
WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:admin
|
||||||
WalletHas(context.Context, address.Address) (bool, error)
|
WalletHas(context.Context, address.Address) (bool, error) //perm:admin
|
||||||
WalletList(context.Context) ([]address.Address, error)
|
WalletList(context.Context) ([]address.Address, error) //perm:admin
|
||||||
|
|
||||||
WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error)
|
WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) //perm:admin
|
||||||
|
|
||||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error)
|
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||||
WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
|
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
|
||||||
WalletDelete(context.Context, address.Address) error
|
WalletDelete(context.Context, address.Address) error //perm:admin
|
||||||
}
|
}
|
||||||
|
@ -739,19 +739,19 @@ type StorageMinerStub struct {
|
|||||||
|
|
||||||
type WalletStruct struct {
|
type WalletStruct struct {
|
||||||
Internal struct {
|
Internal struct {
|
||||||
WalletDelete func(p0 context.Context, p1 address.Address) error ``
|
WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"`
|
||||||
|
|
||||||
WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) ``
|
WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
WalletHas func(p0 context.Context, p1 address.Address) (bool, error) ``
|
WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"admin"`
|
||||||
|
|
||||||
WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) ``
|
WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"`
|
||||||
|
|
||||||
WalletList func(p0 context.Context) ([]address.Address, error) ``
|
WalletList func(p0 context.Context) ([]address.Address, error) `perm:"admin"`
|
||||||
|
|
||||||
WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) ``
|
WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"admin"`
|
||||||
|
|
||||||
WalletSign func(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) ``
|
WalletSign func(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) `perm:"admin"`
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -18,6 +19,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||||
@ -51,7 +53,7 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, sta
|
|||||||
}
|
}
|
||||||
|
|
||||||
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||||
res, data, err := CreateClientFile(ctx, client, rseed)
|
res, data, err := CreateClientFile(ctx, client, rseed, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -63,7 +65,7 @@ func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode,
|
|||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
waitDealSealed(t, ctx, miner, client, deal, false)
|
waitDealSealed(t, ctx, miner, client, deal, false, false, nil)
|
||||||
|
|
||||||
// Retrieval
|
// Retrieval
|
||||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
info, err := client.ClientGetDealInfo(ctx, *deal)
|
||||||
@ -72,8 +74,11 @@ func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode,
|
|||||||
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
|
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api.ImportRes, []byte, error) {
|
func CreateClientFile(ctx context.Context, client api.FullNode, rseed, size int) (*api.ImportRes, []byte, error) {
|
||||||
data := make([]byte, 1600)
|
if size == 0 {
|
||||||
|
size = 1600
|
||||||
|
}
|
||||||
|
data := make([]byte, size)
|
||||||
rand.New(rand.NewSource(int64(rseed))).Read(data)
|
rand.New(rand.NewSource(int64(rseed))).Read(data)
|
||||||
|
|
||||||
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
|
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
|
||||||
@ -119,7 +124,7 @@ func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duratio
|
|||||||
|
|
||||||
// Starts a deal and waits until it's published
|
// Starts a deal and waits until it's published
|
||||||
runDealTillPublish := func(rseed int) {
|
runDealTillPublish := func(rseed int) {
|
||||||
res, _, err := CreateClientFile(s.ctx, s.client, rseed)
|
res, _, err := CreateClientFile(s.ctx, s.client, rseed, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
upds, err := client.ClientGetDealUpdates(s.ctx)
|
upds, err := client.ClientGetDealUpdates(s.ctx)
|
||||||
@ -186,68 +191,109 @@ func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duratio
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBatchDealInput(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
func TestBatchDealInput(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||||
publishPeriod := 10 * time.Second
|
run := func(piece, deals, expectSectors int) func(t *testing.T) {
|
||||||
maxDealsPerMsg := uint64(4)
|
return func(t *testing.T) {
|
||||||
|
publishPeriod := 10 * time.Second
|
||||||
|
maxDealsPerMsg := uint64(deals)
|
||||||
|
|
||||||
// Set max deals per publish deals message to maxDealsPerMsg
|
// Set max deals per publish deals message to maxDealsPerMsg
|
||||||
minerDef := []StorageMiner{{
|
minerDef := []StorageMiner{{
|
||||||
Full: 0,
|
Full: 0,
|
||||||
Opts: node.Options(
|
Opts: node.Options(
|
||||||
node.Override(
|
node.Override(
|
||||||
new(*storageadapter.DealPublisher),
|
new(*storageadapter.DealPublisher),
|
||||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||||
Period: publishPeriod,
|
Period: publishPeriod,
|
||||||
MaxDealsPerMsg: maxDealsPerMsg,
|
MaxDealsPerMsg: maxDealsPerMsg,
|
||||||
})),
|
})),
|
||||||
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
|
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
|
||||||
return func() (sealiface.Config, error) {
|
return func() (sealiface.Config, error) {
|
||||||
return sealiface.Config{
|
return sealiface.Config{
|
||||||
MaxWaitDealsSectors: 1,
|
MaxWaitDealsSectors: 2,
|
||||||
MaxSealingSectors: 1,
|
MaxSealingSectors: 1,
|
||||||
MaxSealingSectorsForDeals: 2,
|
MaxSealingSectorsForDeals: 3,
|
||||||
AlwaysKeepUnsealedCopy: true,
|
AlwaysKeepUnsealedCopy: true,
|
||||||
}, nil
|
WaitDealsDelay: time.Hour,
|
||||||
}, nil
|
}, nil
|
||||||
}),
|
}, nil
|
||||||
),
|
}),
|
||||||
Preseal: PresealGenesis,
|
),
|
||||||
}}
|
Preseal: PresealGenesis,
|
||||||
|
}}
|
||||||
|
|
||||||
// Create a connect client and miner node
|
// Create a connect client and miner node
|
||||||
n, sn := b(t, OneFull, minerDef)
|
n, sn := b(t, OneFull, minerDef)
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||||
miner := sn[0]
|
miner := sn[0]
|
||||||
s := connectAndStartMining(t, b, blocktime, client, miner)
|
s := connectAndStartMining(t, b, blocktime, client, miner)
|
||||||
defer s.blockMiner.Stop()
|
defer s.blockMiner.Stop()
|
||||||
|
|
||||||
// Starts a deal and waits until it's published
|
err := miner.MarketSetAsk(s.ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
|
||||||
runDealTillSeal := func(rseed int) {
|
require.NoError(t, err)
|
||||||
res, _, err := CreateClientFile(s.ctx, s.client, rseed)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
|
checkNoPadding := func() {
|
||||||
waitDealSealed(t, s.ctx, s.miner, s.client, dc, false)
|
sl, err := sn[0].SectorsList(s.ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sort.Slice(sl, func(i, j int) bool {
|
||||||
|
return sl[i] < sl[j]
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, snum := range sl {
|
||||||
|
si, err := sn[0].SectorsStatus(s.ctx, snum, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
|
||||||
|
|
||||||
|
for _, deal := range si.Deals {
|
||||||
|
if deal == 0 {
|
||||||
|
fmt.Printf("sector %d had a padding piece!\n", snum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Starts a deal and waits until it's published
|
||||||
|
runDealTillSeal := func(rseed int) {
|
||||||
|
res, _, err := CreateClientFile(s.ctx, s.client, rseed, piece)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
|
||||||
|
waitDealSealed(t, s.ctx, s.miner, s.client, dc, false, true, checkNoPadding)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run maxDealsPerMsg deals in parallel
|
||||||
|
done := make(chan struct{}, maxDealsPerMsg)
|
||||||
|
for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ {
|
||||||
|
rseed := rseed
|
||||||
|
go func() {
|
||||||
|
runDealTillSeal(rseed)
|
||||||
|
done <- struct{}{}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for maxDealsPerMsg of the deals to be published
|
||||||
|
for i := 0; i < int(maxDealsPerMsg); i++ {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
|
||||||
|
checkNoPadding()
|
||||||
|
|
||||||
|
sl, err := sn[0].SectorsList(s.ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, len(sl), expectSectors)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run maxDealsPerMsg+1 deals in parallel
|
t.Run("4-p1600B", run(1600, 4, 4))
|
||||||
done := make(chan struct{}, maxDealsPerMsg+1)
|
t.Run("4-p513B", run(513, 4, 2))
|
||||||
for rseed := 1; rseed <= int(maxDealsPerMsg+1); rseed++ {
|
if !testing.Short() {
|
||||||
rseed := rseed
|
t.Run("32-p257B", run(257, 32, 8))
|
||||||
go func() {
|
t.Run("32-p10B", run(10, 32, 2))
|
||||||
runDealTillSeal(rseed)
|
|
||||||
done <- struct{}{}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for maxDealsPerMsg of the deals to be published
|
// fixme: this appears to break data-transfer / markets in some really creative ways
|
||||||
for i := 0; i < int(maxDealsPerMsg); i++ {
|
//t.Run("128-p10B", run(10, 128, 8))
|
||||||
<-done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sl, err := sn[0].SectorsList(s.ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.GreaterOrEqual(t, len(sl), 4)
|
|
||||||
require.LessOrEqual(t, len(sl), 5)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||||
@ -303,12 +349,12 @@ func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
|
|||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true)
|
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true, false, nil)
|
||||||
|
|
||||||
deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0)
|
deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0)
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false)
|
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false, false, nil)
|
||||||
|
|
||||||
// Retrieval
|
// Retrieval
|
||||||
info, err := s.client.ClientGetDealInfo(s.ctx, *deal2)
|
info, err := s.client.ClientGetDealInfo(s.ctx, *deal2)
|
||||||
@ -364,7 +410,7 @@ func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client
|
|||||||
return deal
|
return deal
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal bool) {
|
func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
di, err := client.ClientGetDealInfo(ctx, *deal)
|
di, err := client.ClientGetDealInfo(ctx, *deal)
|
||||||
@ -376,7 +422,9 @@ loop:
|
|||||||
if noseal {
|
if noseal {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
startSealingWaiting(t, ctx, miner)
|
if !noSealStart {
|
||||||
|
startSealingWaiting(t, ctx, miner)
|
||||||
|
}
|
||||||
case storagemarket.StorageDealProposalRejected:
|
case storagemarket.StorageDealProposalRejected:
|
||||||
t.Fatal("deal rejected")
|
t.Fatal("deal rejected")
|
||||||
case storagemarket.StorageDealFailing:
|
case storagemarket.StorageDealFailing:
|
||||||
@ -387,8 +435,25 @@ loop:
|
|||||||
fmt.Println("COMPLETE", di)
|
fmt.Println("COMPLETE", di)
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
|
||||||
|
mds, err := miner.MarketListIncompleteDeals(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var minerState storagemarket.StorageDealStatus
|
||||||
|
for _, md := range mds {
|
||||||
|
if md.DealID == di.DealID {
|
||||||
|
minerState = md.State
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
||||||
time.Sleep(time.Second / 2)
|
time.Sleep(time.Second / 2)
|
||||||
|
if cb != nil {
|
||||||
|
cb()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -430,7 +495,7 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod
|
|||||||
si, err := miner.SectorsStatus(ctx, snum, false)
|
si, err := miner.SectorsStatus(ctx, snum, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Logf("Sector state: %s", si.State)
|
t.Logf("Sector %d state: %s", snum, si.State)
|
||||||
if si.State == api.SectorState(sealing.WaitDeals) {
|
if si.State == api.SectorState(sealing.WaitDeals) {
|
||||||
require.NoError(t, miner.SectorStartSealing(ctx, snum))
|
require.NoError(t, miner.SectorStartSealing(ctx, snum))
|
||||||
}
|
}
|
||||||
|
@ -194,7 +194,7 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
|
|||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
waitDealSealed(t, ctx, provider, client, deal, false)
|
waitDealSealed(t, ctx, provider, client, deal, false, false, nil)
|
||||||
|
|
||||||
<-minedTwo
|
<-minedTwo
|
||||||
|
|
||||||
|
@ -144,8 +144,10 @@ func (e *hcEvents) processHeadChangeEvent(rev, app []*types.TipSet) error {
|
|||||||
|
|
||||||
// Queue up calls until there have been enough blocks to reach
|
// Queue up calls until there have been enough blocks to reach
|
||||||
// confidence on the message calls
|
// confidence on the message calls
|
||||||
for tid, data := range newCalls {
|
for tid, calls := range newCalls {
|
||||||
e.queueForConfidence(tid, data, nil, ts)
|
for _, data := range calls {
|
||||||
|
e.queueForConfidence(tid, data, nil, ts)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for at := e.lastTs.Height(); at <= ts.Height(); at++ {
|
for at := e.lastTs.Height(); at <= ts.Height(); at++ {
|
||||||
@ -474,7 +476,7 @@ func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) mes
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if there are any new actor calls
|
// Check if there are any new actor calls
|
||||||
func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventData, error) {
|
func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID][]eventData, error) {
|
||||||
pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here
|
pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("getting parent tipset in checkNewCalls: %s", err)
|
log.Errorf("getting parent tipset in checkNewCalls: %s", err)
|
||||||
@ -485,7 +487,7 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventDat
|
|||||||
defer me.lk.RUnlock()
|
defer me.lk.RUnlock()
|
||||||
|
|
||||||
// For each message in the tipset
|
// For each message in the tipset
|
||||||
res := make(map[triggerID]eventData)
|
res := make(map[triggerID][]eventData)
|
||||||
me.messagesForTs(pts, func(msg *types.Message) {
|
me.messagesForTs(pts, func(msg *types.Message) {
|
||||||
// TODO: provide receipts
|
// TODO: provide receipts
|
||||||
|
|
||||||
@ -500,7 +502,7 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventDat
|
|||||||
// If there was a match, include the message in the results for the
|
// If there was a match, include the message in the results for the
|
||||||
// trigger
|
// trigger
|
||||||
if matched {
|
if matched {
|
||||||
res[tid] = msg
|
res[tid] = append(res[tid], msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -1323,3 +1323,62 @@ func TestStateChangedTimeout(t *testing.T) {
|
|||||||
fcs.advance(0, 5, nil)
|
fcs.advance(0, 5, nil)
|
||||||
require.False(t, called)
|
require.False(t, called)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCalledMultiplePerEpoch(t *testing.T) {
|
||||||
|
fcs := &fakeCS{
|
||||||
|
t: t,
|
||||||
|
h: 1,
|
||||||
|
|
||||||
|
msgs: map[cid.Cid]fakeMsg{},
|
||||||
|
blkMsgs: map[cid.Cid]cid.Cid{},
|
||||||
|
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
|
||||||
|
}
|
||||||
|
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
|
||||||
|
|
||||||
|
events := NewEvents(context.Background(), fcs)
|
||||||
|
|
||||||
|
t0123, err := address.NewFromString("t0123")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
at := 0
|
||||||
|
|
||||||
|
err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) {
|
||||||
|
return false, true, nil
|
||||||
|
}, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) {
|
||||||
|
switch at {
|
||||||
|
case 0:
|
||||||
|
require.Equal(t, uint64(1), msg.Nonce)
|
||||||
|
require.Equal(t, abi.ChainEpoch(4), ts.Height())
|
||||||
|
case 1:
|
||||||
|
require.Equal(t, uint64(2), msg.Nonce)
|
||||||
|
require.Equal(t, abi.ChainEpoch(4), ts.Height())
|
||||||
|
default:
|
||||||
|
t.Fatal("apply should only get called twice, at: ", at)
|
||||||
|
}
|
||||||
|
at++
|
||||||
|
return true, nil
|
||||||
|
}, func(_ context.Context, ts *types.TipSet) error {
|
||||||
|
switch at {
|
||||||
|
case 2:
|
||||||
|
require.Equal(t, abi.ChainEpoch(4), ts.Height())
|
||||||
|
case 3:
|
||||||
|
require.Equal(t, abi.ChainEpoch(4), ts.Height())
|
||||||
|
default:
|
||||||
|
t.Fatal("revert should only get called twice, at: ", at)
|
||||||
|
}
|
||||||
|
at++
|
||||||
|
return nil
|
||||||
|
}, 3, 20, matchAddrMethod(t0123, 5))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fcs.advance(0, 10, map[int]cid.Cid{
|
||||||
|
1: fcs.fakeMsgs(fakeMsg{
|
||||||
|
bmsgs: []*types.Message{
|
||||||
|
{To: t0123, From: t0123, Method: 5, Nonce: 1},
|
||||||
|
{To: t0123, From: t0123, Method: 5, Nonce: 2},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
|
||||||
|
fcs.advance(9, 1, nil)
|
||||||
|
}
|
||||||
|
183
cli/state.go
183
cli/state.go
@ -3,6 +3,8 @@ package cli
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"html/template"
|
"html/template"
|
||||||
@ -22,7 +24,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -31,7 +32,6 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -1521,7 +1521,7 @@ func printMsg(ctx context.Context, api v0api.FullNode, msg cid.Cid, mw *lapi.Msg
|
|||||||
var StateCallCmd = &cli.Command{
|
var StateCallCmd = &cli.Command{
|
||||||
Name: "call",
|
Name: "call",
|
||||||
Usage: "Invoke a method on an actor locally",
|
Usage: "Invoke a method on an actor locally",
|
||||||
ArgsUsage: "[toAddress methodId <param1 param2 ...> (optional)]",
|
ArgsUsage: "[toAddress methodId params (optional)]",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "from",
|
Name: "from",
|
||||||
@ -1535,8 +1535,13 @@ var StateCallCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "ret",
|
Name: "ret",
|
||||||
Usage: "specify how to parse output (auto, raw, addr, big)",
|
Usage: "specify how to parse output (raw, decoded, base64, hex)",
|
||||||
Value: "auto",
|
Value: "decoded",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "encoding",
|
||||||
|
Value: "base64",
|
||||||
|
Usage: "specify params encoding to parse (base64, hex)",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
@ -1577,14 +1582,23 @@ var StateCallCmd = &cli.Command{
|
|||||||
return fmt.Errorf("failed to parse 'value': %s", err)
|
return fmt.Errorf("failed to parse 'value': %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
act, err := api.StateGetActor(ctx, toa, ts.Key())
|
var params []byte
|
||||||
if err != nil {
|
// If params were passed in, decode them
|
||||||
return fmt.Errorf("failed to lookup target actor: %s", err)
|
if cctx.Args().Len() > 2 {
|
||||||
}
|
switch cctx.String("encoding") {
|
||||||
|
case "base64":
|
||||||
params, err := parseParamsForMethod(act.Code, method, cctx.Args().Slice()[2:])
|
params, err = base64.StdEncoding.DecodeString(cctx.Args().Get(2))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to parse params: %s", err)
|
return xerrors.Errorf("decoding base64 value: %w", err)
|
||||||
|
}
|
||||||
|
case "hex":
|
||||||
|
params, err = hex.DecodeString(cctx.Args().Get(2))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("decoding hex value: %w", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret, err := api.StateCall(ctx, &types.Message{
|
ret, err := api.StateCall(ctx, &types.Message{
|
||||||
@ -1595,137 +1609,42 @@ var StateCallCmd = &cli.Command{
|
|||||||
Params: params,
|
Params: params,
|
||||||
}, ts.Key())
|
}, ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("state call failed: %s", err)
|
return fmt.Errorf("state call failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ret.MsgRct.ExitCode != 0 {
|
if ret.MsgRct.ExitCode != 0 {
|
||||||
return fmt.Errorf("invocation failed (exit: %d, gasUsed: %d): %s", ret.MsgRct.ExitCode, ret.MsgRct.GasUsed, ret.Error)
|
return fmt.Errorf("invocation failed (exit: %d, gasUsed: %d): %s", ret.MsgRct.ExitCode, ret.MsgRct.GasUsed, ret.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := formatOutput(cctx.String("ret"), ret.MsgRct.Return)
|
fmt.Println("Call receipt:")
|
||||||
if err != nil {
|
fmt.Printf("Exit code: %d\n", ret.MsgRct.ExitCode)
|
||||||
return fmt.Errorf("failed to format output: %s", err)
|
fmt.Printf("Gas Used: %d\n", ret.MsgRct.GasUsed)
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("gas used: %d\n", ret.MsgRct.GasUsed)
|
switch cctx.String("ret") {
|
||||||
fmt.Printf("return: %s\n", s)
|
case "decoded":
|
||||||
|
act, err := api.StateGetActor(ctx, toa, ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
retStr, err := jsonReturn(act.Code, abi.MethodNum(method), ret.MsgRct.Return)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("decoding return: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Return:\n%s\n", retStr)
|
||||||
|
case "raw":
|
||||||
|
fmt.Printf("Return: \n%s\n", ret.MsgRct.Return)
|
||||||
|
case "hex":
|
||||||
|
fmt.Printf("Return: \n%x\n", ret.MsgRct.Return)
|
||||||
|
case "base64":
|
||||||
|
fmt.Printf("Return: \n%s\n", base64.StdEncoding.EncodeToString(ret.MsgRct.Return))
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatOutput(t string, val []byte) (string, error) {
|
|
||||||
switch t {
|
|
||||||
case "raw", "hex":
|
|
||||||
return fmt.Sprintf("%x", val), nil
|
|
||||||
case "address", "addr", "a":
|
|
||||||
a, err := address.NewFromBytes(val)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return a.String(), nil
|
|
||||||
case "big", "int", "bigint":
|
|
||||||
bi := types.BigFromBytes(val)
|
|
||||||
return bi.String(), nil
|
|
||||||
case "fil":
|
|
||||||
bi := types.FIL(types.BigFromBytes(val))
|
|
||||||
return bi.String(), nil
|
|
||||||
case "pid", "peerid", "peer":
|
|
||||||
pid, err := peer.IDFromBytes(val)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return pid.Pretty(), nil
|
|
||||||
case "auto":
|
|
||||||
if len(val) == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
a, err := address.NewFromBytes(val)
|
|
||||||
if err == nil {
|
|
||||||
return "address: " + a.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
pid, err := peer.IDFromBytes(val)
|
|
||||||
if err == nil {
|
|
||||||
return "peerID: " + pid.Pretty(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
bi := types.BigFromBytes(val)
|
|
||||||
return "bigint: " + bi.String(), nil
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("unrecognized output type: %q", t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, error) {
|
|
||||||
if len(args) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: consider moving this to a dedicated helper
|
|
||||||
actMeta, ok := stmgr.MethodsMap[act]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unknown actor %s", act)
|
|
||||||
}
|
|
||||||
|
|
||||||
methodMeta, ok := actMeta[abi.MethodNum(method)]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unknown method %d for actor %s", method, act)
|
|
||||||
}
|
|
||||||
|
|
||||||
paramObj := methodMeta.Params.Elem()
|
|
||||||
if paramObj.NumField() != len(args) {
|
|
||||||
return nil, fmt.Errorf("not enough arguments given to call that method (expecting %d)", paramObj.NumField())
|
|
||||||
}
|
|
||||||
|
|
||||||
p := reflect.New(paramObj)
|
|
||||||
for i := 0; i < len(args); i++ {
|
|
||||||
switch paramObj.Field(i).Type {
|
|
||||||
case reflect.TypeOf(address.Address{}):
|
|
||||||
a, err := address.NewFromString(args[i])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse address: %s", err)
|
|
||||||
}
|
|
||||||
p.Elem().Field(i).Set(reflect.ValueOf(a))
|
|
||||||
case reflect.TypeOf(uint64(0)):
|
|
||||||
val, err := strconv.ParseUint(args[i], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
p.Elem().Field(i).Set(reflect.ValueOf(val))
|
|
||||||
case reflect.TypeOf(abi.ChainEpoch(0)):
|
|
||||||
val, err := strconv.ParseInt(args[i], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
p.Elem().Field(i).Set(reflect.ValueOf(abi.ChainEpoch(val)))
|
|
||||||
case reflect.TypeOf(big.Int{}):
|
|
||||||
val, err := big.FromString(args[i])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
p.Elem().Field(i).Set(reflect.ValueOf(val))
|
|
||||||
case reflect.TypeOf(peer.ID("")):
|
|
||||||
pid, err := peer.Decode(args[i])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse peer ID: %s", err)
|
|
||||||
}
|
|
||||||
p.Elem().Field(i).Set(reflect.ValueOf(pid))
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported type for call (TODO): %s", paramObj.Field(i).Type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m := p.Interface().(cbg.CBORMarshaler)
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if err := m.MarshalCBOR(buf); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to marshal param object: %s", err)
|
|
||||||
}
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var StateCircSupplyCmd = &cli.Command{
|
var StateCircSupplyCmd = &cli.Command{
|
||||||
Name: "circulating-supply",
|
Name: "circulating-supply",
|
||||||
Usage: "Get the exact current circulating supply of Filecoin",
|
Usage: "Get the exact current circulating supply of Filecoin",
|
||||||
|
@ -44,7 +44,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
|
|||||||
|
|
||||||
// Create a deal (non-interactive)
|
// Create a deal (non-interactive)
|
||||||
// client deal --start-epoch=<start epoch> <cid> <miner addr> 1000000attofil <duration>
|
// client deal --start-epoch=<start epoch> <cid> <miner addr> 1000000attofil <duration>
|
||||||
res, _, err := test.CreateClientFile(ctx, clientNode, 1)
|
res, _, err := test.CreateClientFile(ctx, clientNode, 1, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
|
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
|
||||||
dataCid := res.Root
|
dataCid := res.Root
|
||||||
@ -60,7 +60,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
|
|||||||
// <miner addr>
|
// <miner addr>
|
||||||
// "no" (verified client)
|
// "no" (verified client)
|
||||||
// "yes" (confirm deal)
|
// "yes" (confirm deal)
|
||||||
res, _, err = test.CreateClientFile(ctx, clientNode, 2)
|
res, _, err = test.CreateClientFile(ctx, clientNode, 2, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dataCid2 := res.Root
|
dataCid2 := res.Root
|
||||||
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
|
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
|
||||||
|
@ -2,27 +2,33 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api/v0api"
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
|
|
||||||
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-jsonrpc"
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
|
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,17 +36,33 @@ var log = logging.Logger("main")
|
|||||||
|
|
||||||
const FlagWalletRepo = "wallet-repo"
|
const FlagWalletRepo = "wallet-repo"
|
||||||
|
|
||||||
|
type jwtPayload struct {
|
||||||
|
Allow []auth.Permission
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
lotuslog.SetupLogLevels()
|
lotuslog.SetupLogLevels()
|
||||||
|
|
||||||
local := []*cli.Command{
|
local := []*cli.Command{
|
||||||
runCmd,
|
runCmd,
|
||||||
|
getApiKeyCmd,
|
||||||
}
|
}
|
||||||
|
|
||||||
app := &cli.App{
|
app := &cli.App{
|
||||||
Name: "lotus-wallet",
|
Name: "lotus-wallet",
|
||||||
Usage: "Basic external wallet",
|
Usage: "Basic external wallet",
|
||||||
Version: build.UserVersion(),
|
Version: build.UserVersion(),
|
||||||
|
Description: `
|
||||||
|
lotus-wallet provides a remote wallet service for lotus.
|
||||||
|
|
||||||
|
To configure your lotus node to use a remote wallet:
|
||||||
|
* Run 'lotus-wallet get-api-key' to generate API key
|
||||||
|
* Start lotus-wallet using 'lotus-wallet run' (see --help for additional flags)
|
||||||
|
* Edit lotus config (~/.lotus/config.toml)
|
||||||
|
* Find the '[Wallet]' section
|
||||||
|
* Set 'RemoteBackend' to '[api key]:http://[wallet ip]:[wallet port]'
|
||||||
|
(the default port is 1777)
|
||||||
|
* Start (or restart) the lotus daemon`,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: FlagWalletRepo,
|
Name: FlagWalletRepo,
|
||||||
@ -65,6 +87,35 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var getApiKeyCmd = &cli.Command{
|
||||||
|
Name: "get-api-key",
|
||||||
|
Usage: "Generate API Key",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
lr, ks, err := openRepo(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer lr.Close() // nolint
|
||||||
|
|
||||||
|
p := jwtPayload{
|
||||||
|
Allow: []auth.Permission{api.PermAdmin},
|
||||||
|
}
|
||||||
|
|
||||||
|
authKey, err := modules.APISecret(ks, lr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("setting up api secret: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
k, err := jwt.Sign(&p, (*jwt.HMACSHA)(authKey))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("jwt sign: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(string(k))
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var runCmd = &cli.Command{
|
var runCmd = &cli.Command{
|
||||||
Name: "run",
|
Name: "run",
|
||||||
Usage: "Start lotus wallet",
|
Usage: "Start lotus wallet",
|
||||||
@ -86,7 +137,13 @@ var runCmd = &cli.Command{
|
|||||||
Name: "offline",
|
Name: "offline",
|
||||||
Usage: "don't query chain state in interactive mode",
|
Usage: "don't query chain state in interactive mode",
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "disable-auth",
|
||||||
|
Usage: "(insecure) disable api auth",
|
||||||
|
Hidden: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
Description: "For setup instructions see 'lotus-wallet --help'",
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
log.Info("Starting lotus wallet")
|
log.Info("Starting lotus wallet")
|
||||||
|
|
||||||
@ -101,31 +158,11 @@ var runCmd = &cli.Command{
|
|||||||
log.Fatalf("Cannot register the view: %v", err)
|
log.Fatalf("Cannot register the view: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
repoPath := cctx.String(FlagWalletRepo)
|
lr, ks, err := openRepo(cctx)
|
||||||
r, err := repo.NewFS(repoPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ok, err := r.Exists()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
if err := r.Init(repo.Worker); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lr, err := r.Lock(repo.Wallet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ks, err := lr.KeyStore()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer lr.Close() // nolint
|
||||||
|
|
||||||
lw, err := wallet.NewWallet(ks)
|
lw, err := wallet.NewWallet(ks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -167,19 +204,43 @@ var runCmd = &cli.Command{
|
|||||||
w = &LoggedWallet{under: w}
|
w = &LoggedWallet{under: w}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rpcApi := metrics.MetricedWalletAPI(w)
|
||||||
|
if !cctx.Bool("disable-auth") {
|
||||||
|
rpcApi = api.PermissionedWalletAPI(rpcApi)
|
||||||
|
}
|
||||||
|
|
||||||
rpcServer := jsonrpc.NewServer()
|
rpcServer := jsonrpc.NewServer()
|
||||||
rpcServer.Register("Filecoin", metrics.MetricedWalletAPI(w))
|
rpcServer.Register("Filecoin", rpcApi)
|
||||||
|
|
||||||
mux.Handle("/rpc/v0", rpcServer)
|
mux.Handle("/rpc/v0", rpcServer)
|
||||||
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
||||||
|
|
||||||
/*ah := &auth.Handler{
|
var handler http.Handler = mux
|
||||||
Verify: nodeApi.AuthVerify,
|
|
||||||
Next: mux.ServeHTTP,
|
if !cctx.Bool("disable-auth") {
|
||||||
}*/
|
authKey, err := modules.APISecret(ks, lr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("setting up api secret: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
authVerify := func(ctx context.Context, token string) ([]auth.Permission, error) {
|
||||||
|
var payload jwtPayload
|
||||||
|
if _, err := jwt.Verify([]byte(token), (*jwt.HMACSHA)(authKey), &payload); err != nil {
|
||||||
|
return nil, xerrors.Errorf("JWT Verification failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return payload.Allow, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("API auth enabled, use 'lotus-wallet get-api-key' to get API key")
|
||||||
|
handler = &auth.Handler{
|
||||||
|
Verify: authVerify,
|
||||||
|
Next: mux.ServeHTTP,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
srv := &http.Server{
|
srv := &http.Server{
|
||||||
Handler: mux,
|
Handler: handler,
|
||||||
BaseContext: func(listener net.Listener) context.Context {
|
BaseContext: func(listener net.Listener) context.Context {
|
||||||
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-wallet"))
|
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-wallet"))
|
||||||
return ctx
|
return ctx
|
||||||
@ -203,3 +264,33 @@ var runCmd = &cli.Command{
|
|||||||
return srv.Serve(nl)
|
return srv.Serve(nl)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func openRepo(cctx *cli.Context) (repo.LockedRepo, types.KeyStore, error) {
|
||||||
|
repoPath := cctx.String(FlagWalletRepo)
|
||||||
|
r, err := repo.NewFS(repoPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, err := r.Exists()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
if err := r.Init(repo.Worker); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lr, err := r.Lock(repo.Wallet)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ks, err := lr.KeyStore()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return lr, ks, nil
|
||||||
|
}
|
||||||
|
@ -1756,13 +1756,14 @@ NAME:
|
|||||||
lotus state call - Invoke a method on an actor locally
|
lotus state call - Invoke a method on an actor locally
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
lotus state call [command options] [toAddress methodId <param1 param2 ...> (optional)]
|
lotus state call [command options] [toAddress methodId params (optional)]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--from value (default: "f00")
|
--from value (default: "f00")
|
||||||
--value value specify value field for invocation (default: "0")
|
--value value specify value field for invocation (default: "0")
|
||||||
--ret value specify how to parse output (auto, raw, addr, big) (default: "auto")
|
--ret value specify how to parse output (raw, decoded, base64, hex) (default: "decoded")
|
||||||
--help, -h show help (default: false)
|
--encoding value specify params encoding to parse (base64, hex) (default: "base64")
|
||||||
|
--help, -h show help (default: false)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -20,6 +20,8 @@ We're happy to announce Lotus X.Y.Z...
|
|||||||
|
|
||||||
## ✅ Release Checklist
|
## ✅ Release Checklist
|
||||||
|
|
||||||
|
**Note for whomever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end.
|
||||||
|
|
||||||
First steps:
|
First steps:
|
||||||
|
|
||||||
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
|
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
|
||||||
@ -40,6 +42,9 @@ Testing an RC:
|
|||||||
- [ ] Testground tests
|
- [ ] Testground tests
|
||||||
|
|
||||||
- [ ] **Stage 1 - Internal Testing**
|
- [ ] **Stage 1 - Internal Testing**
|
||||||
|
- Binaries
|
||||||
|
- [ ] Ensure the RC release has downloadable binaries
|
||||||
|
- [ ] Validate the binary is able to run on at least one platform
|
||||||
- Upgrade our testnet infra
|
- Upgrade our testnet infra
|
||||||
- [ ] 1 bootstrap node
|
- [ ] 1 bootstrap node
|
||||||
- [ ] 1 miner
|
- [ ] 1 miner
|
||||||
@ -100,7 +105,8 @@ Testing an RC:
|
|||||||
|
|
||||||
- [ ] **Post-Release**
|
- [ ] **Post-Release**
|
||||||
- [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master).
|
- [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master).
|
||||||
- [ ] Create an issue using this release issue template for the _next_ release.
|
- [ ] Update [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) with any improvements determined from this latest release iteration.
|
||||||
|
- [ ] Create an issue using [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) for the _next_ release.
|
||||||
|
|
||||||
## ❤️ Contributors
|
## ❤️ Contributors
|
||||||
|
|
||||||
|
10
extern/storage-sealing/fsm.go
vendored
10
extern/storage-sealing/fsm.go
vendored
@ -51,6 +51,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
|||||||
AddPiece: planOne(
|
AddPiece: planOne(
|
||||||
on(SectorPieceAdded{}, WaitDeals),
|
on(SectorPieceAdded{}, WaitDeals),
|
||||||
apply(SectorStartPacking{}),
|
apply(SectorStartPacking{}),
|
||||||
|
apply(SectorAddPiece{}),
|
||||||
on(SectorAddPieceFailed{}, AddPieceFailed),
|
on(SectorAddPieceFailed{}, AddPieceFailed),
|
||||||
),
|
),
|
||||||
Packing: planOne(on(SectorPacked{}, GetTicket)),
|
Packing: planOne(on(SectorPacked{}, GetTicket)),
|
||||||
@ -217,6 +218,8 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
|||||||
|
|
||||||
func (m *Sealing) logEvents(events []statemachine.Event, state *SectorInfo) {
|
func (m *Sealing) logEvents(events []statemachine.Event, state *SectorInfo) {
|
||||||
for _, event := range events {
|
for _, event := range events {
|
||||||
|
log.Debugw("sector event", "sector", state.SectorNumber, "type", fmt.Sprintf("%T", event.User), "event", event.User)
|
||||||
|
|
||||||
e, err := json.Marshal(event)
|
e, err := json.Marshal(event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("marshaling event for logging: %+v", err)
|
log.Errorf("marshaling event for logging: %+v", err)
|
||||||
@ -227,6 +230,10 @@ func (m *Sealing) logEvents(events []statemachine.Event, state *SectorInfo) {
|
|||||||
continue // don't log on every fsm restart
|
continue // don't log on every fsm restart
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(e) > 8000 {
|
||||||
|
e = []byte(string(e[:8000]) + "... truncated")
|
||||||
|
}
|
||||||
|
|
||||||
l := Log{
|
l := Log{
|
||||||
Timestamp: uint64(time.Now().Unix()),
|
Timestamp: uint64(time.Now().Unix()),
|
||||||
Message: string(e),
|
Message: string(e),
|
||||||
@ -566,6 +573,7 @@ func onReturning(mut mutator) func() (mutator, func(*SectorInfo) (bool, error))
|
|||||||
|
|
||||||
func planOne(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err error))) func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
|
func planOne(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err error))) func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
|
||||||
return func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
|
return func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
|
||||||
|
eloop:
|
||||||
for i, event := range events {
|
for i, event := range events {
|
||||||
if gm, ok := event.User.(globalMutator); ok {
|
if gm, ok := event.User.(globalMutator); ok {
|
||||||
gm.applyGlobal(state)
|
gm.applyGlobal(state)
|
||||||
@ -588,6 +596,8 @@ func planOne(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err e
|
|||||||
if err != nil || !more {
|
if err != nil || !more {
|
||||||
return uint64(i + 1), err
|
return uint64(i + 1), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
continue eloop
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ok := event.User.(Ignorable)
|
_, ok := event.User.(Ignorable)
|
||||||
|
47
extern/storage-sealing/input.go
vendored
47
extern/storage-sealing/input.go
vendored
@ -27,6 +27,18 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e
|
|||||||
|
|
||||||
m.inputLk.Lock()
|
m.inputLk.Lock()
|
||||||
|
|
||||||
|
if m.creating != nil && *m.creating == sector.SectorNumber {
|
||||||
|
m.creating = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sid := m.minerSectorID(sector.SectorNumber)
|
||||||
|
|
||||||
|
if len(m.assignedPieces[sid]) > 0 {
|
||||||
|
m.inputLk.Unlock()
|
||||||
|
// got assigned more pieces in the AddPiece state
|
||||||
|
return ctx.Send(SectorAddPiece{})
|
||||||
|
}
|
||||||
|
|
||||||
started, err := m.maybeStartSealing(ctx, sector, used)
|
started, err := m.maybeStartSealing(ctx, sector, used)
|
||||||
if err != nil || started {
|
if err != nil || started {
|
||||||
delete(m.openSectors, m.minerSectorID(sector.SectorNumber))
|
delete(m.openSectors, m.minerSectorID(sector.SectorNumber))
|
||||||
@ -36,16 +48,16 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.openSectors[m.minerSectorID(sector.SectorNumber)] = &openSector{
|
if _, has := m.openSectors[sid]; !has {
|
||||||
used: used,
|
m.openSectors[sid] = &openSector{
|
||||||
maybeAccept: func(cid cid.Cid) error {
|
used: used,
|
||||||
// todo check deal start deadline (configurable)
|
maybeAccept: func(cid cid.Cid) error {
|
||||||
|
// todo check deal start deadline (configurable)
|
||||||
|
m.assignedPieces[sid] = append(m.assignedPieces[sid], cid)
|
||||||
|
|
||||||
sid := m.minerSectorID(sector.SectorNumber)
|
return ctx.Send(SectorAddPiece{})
|
||||||
m.assignedPieces[sid] = append(m.assignedPieces[sid], cid)
|
},
|
||||||
|
}
|
||||||
return ctx.Send(SectorAddPiece{})
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
@ -350,11 +362,19 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
avail := abi.PaddedPieceSize(ssize).Unpadded() - m.openSectors[mt.sector].used
|
||||||
|
|
||||||
|
if mt.size > avail {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
err := m.openSectors[mt.sector].maybeAccept(mt.deal)
|
err := m.openSectors[mt.sector].maybeAccept(mt.deal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.pendingPieces[mt.deal].accepted(mt.sector.Number, 0, err) // non-error case in handleAddPiece
|
m.pendingPieces[mt.deal].accepted(mt.sector.Number, 0, err) // non-error case in handleAddPiece
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m.openSectors[mt.sector].used += mt.padding + mt.size
|
||||||
|
|
||||||
m.pendingPieces[mt.deal].assigned = true
|
m.pendingPieces[mt.deal].assigned = true
|
||||||
delete(toAssign, mt.deal)
|
delete(toAssign, mt.deal)
|
||||||
|
|
||||||
@ -362,8 +382,6 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e
|
|||||||
log.Errorf("sector %d rejected deal %s: %+v", mt.sector, mt.deal, err)
|
log.Errorf("sector %d rejected deal %s: %+v", mt.sector, mt.deal, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(m.openSectors, mt.sector)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(toAssign) > 0 {
|
if len(toAssign) > 0 {
|
||||||
@ -376,6 +394,10 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSealProof) error {
|
func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSealProof) error {
|
||||||
|
if m.creating != nil {
|
||||||
|
return nil // new sector is being created right now
|
||||||
|
}
|
||||||
|
|
||||||
cfg, err := m.getConfig()
|
cfg, err := m.getConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting storage config: %w", err)
|
return xerrors.Errorf("getting storage config: %w", err)
|
||||||
@ -394,6 +416,8 @@ func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSeal
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m.creating = &sid
|
||||||
|
|
||||||
log.Infow("Creating sector", "number", sid, "type", "deal", "proofType", sp)
|
log.Infow("Creating sector", "number", sid, "type", "deal", "proofType", sp)
|
||||||
return m.sectors.Send(uint64(sid), SectorStart{
|
return m.sectors.Send(uint64(sid), SectorStart{
|
||||||
ID: sid,
|
ID: sid,
|
||||||
@ -422,6 +446,7 @@ func (m *Sealing) createSector(ctx context.Context, cfg sealiface.Config, sp abi
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) StartPacking(sid abi.SectorNumber) error {
|
func (m *Sealing) StartPacking(sid abi.SectorNumber) error {
|
||||||
|
log.Infow("starting to seal deal sector", "sector", sid, "trigger", "user")
|
||||||
return m.sectors.Send(uint64(sid), SectorStartPacking{})
|
return m.sectors.Send(uint64(sid), SectorStartPacking{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
1
extern/storage-sealing/sealing.go
vendored
1
extern/storage-sealing/sealing.go
vendored
@ -93,6 +93,7 @@ type Sealing struct {
|
|||||||
sectorTimers map[abi.SectorID]*time.Timer
|
sectorTimers map[abi.SectorID]*time.Timer
|
||||||
pendingPieces map[cid.Cid]*pendingPiece
|
pendingPieces map[cid.Cid]*pendingPiece
|
||||||
assignedPieces map[abi.SectorID][]cid.Cid
|
assignedPieces map[abi.SectorID][]cid.Cid
|
||||||
|
creating *abi.SectorNumber // used to prevent a race where we could create a new sector more than once
|
||||||
|
|
||||||
upgradeLk sync.Mutex
|
upgradeLk sync.Mutex
|
||||||
toUpgrade map[abi.SectorNumber]struct{}
|
toUpgrade map[abi.SectorNumber]struct{}
|
||||||
|
2
extern/storage-sealing/states_sealing.go
vendored
2
extern/storage-sealing/states_sealing.go
vendored
@ -37,7 +37,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// todo: return to the sealing queue (this is extremely unlikely to happen)
|
// todo: return to the sealing queue (this is extremely unlikely to happen)
|
||||||
pp.accepted(sector.SectorNumber, 0, xerrors.Errorf("sector entered packing state early"))
|
pp.accepted(sector.SectorNumber, 0, xerrors.Errorf("sector %d entered packing state early", sector.SectorNumber))
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(m.openSectors, m.minerSectorID(sector.SectorNumber))
|
delete(m.openSectors, m.minerSectorID(sector.SectorNumber))
|
||||||
|
@ -419,7 +419,7 @@ func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error)
|
|||||||
// 1.
|
// 1.
|
||||||
func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *types.BlockMsg, err error) {
|
func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *types.BlockMsg, err error) {
|
||||||
log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids()))
|
log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids()))
|
||||||
start := build.Clock.Now()
|
tStart := build.Clock.Now()
|
||||||
|
|
||||||
round := base.TipSet.Height() + base.NullRounds + 1
|
round := base.TipSet.Height() + base.NullRounds + 1
|
||||||
|
|
||||||
@ -428,6 +428,9 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
|
|||||||
var mbi *api.MiningBaseInfo
|
var mbi *api.MiningBaseInfo
|
||||||
var rbase types.BeaconEntry
|
var rbase types.BeaconEntry
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|
||||||
|
var hasMinPower bool
|
||||||
|
|
||||||
// mbi can be nil if we are deep in penalty and there are 0 eligible sectors
|
// mbi can be nil if we are deep in penalty and there are 0 eligible sectors
|
||||||
// in the current deadline. If this case - put together a dummy one for reporting
|
// in the current deadline. If this case - put together a dummy one for reporting
|
||||||
// https://github.com/filecoin-project/lotus/blob/v1.9.0/chain/stmgr/utils.go#L500-L502
|
// https://github.com/filecoin-project/lotus/blob/v1.9.0/chain/stmgr/utils.go#L500-L502
|
||||||
@ -435,17 +438,24 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
|
|||||||
mbi = &api.MiningBaseInfo{
|
mbi = &api.MiningBaseInfo{
|
||||||
NetworkPower: big.NewInt(-1), // we do not know how big the network is at this point
|
NetworkPower: big.NewInt(-1), // we do not know how big the network is at this point
|
||||||
EligibleForMining: false,
|
EligibleForMining: false,
|
||||||
MinerPower: big.NewInt(0), // but we do know we do not have anything
|
MinerPower: big.NewInt(0), // but we do know we do not have anything eligible
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to opportunistically pull actual power and plug it into the fake mbi
|
||||||
|
if pow, err := m.api.StateMinerPower(ctx, m.address, base.TipSet.Key()); err == nil && pow != nil {
|
||||||
|
hasMinPower = pow.HasMinPower
|
||||||
|
mbi.MinerPower = pow.MinerPower.QualityAdjPower
|
||||||
|
mbi.NetworkPower = pow.TotalPower.QualityAdjPower
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
isLate := uint64(start.Unix()) > (base.TipSet.MinTimestamp() + uint64(base.NullRounds*builtin.EpochDurationSeconds) + build.PropagationDelaySecs)
|
isLate := uint64(tStart.Unix()) > (base.TipSet.MinTimestamp() + uint64(base.NullRounds*builtin.EpochDurationSeconds) + build.PropagationDelaySecs)
|
||||||
|
|
||||||
logStruct := []interface{}{
|
logStruct := []interface{}{
|
||||||
"tookMilliseconds", (build.Clock.Now().UnixNano() - start.UnixNano()) / 1_000_000,
|
"tookMilliseconds", (build.Clock.Now().UnixNano() - tStart.UnixNano()) / 1_000_000,
|
||||||
"forRound", int64(round),
|
"forRound", int64(round),
|
||||||
"baseEpoch", int64(base.TipSet.Height()),
|
"baseEpoch", int64(base.TipSet.Height()),
|
||||||
"baseDeltaSeconds", uint64(start.Unix()) - base.TipSet.MinTimestamp(),
|
"baseDeltaSeconds", uint64(tStart.Unix()) - base.TipSet.MinTimestamp(),
|
||||||
"nullRounds", int64(base.NullRounds),
|
"nullRounds", int64(base.NullRounds),
|
||||||
"lateStart", isLate,
|
"lateStart", isLate,
|
||||||
"beaconEpoch", rbase.Round,
|
"beaconEpoch", rbase.Round,
|
||||||
@ -459,7 +469,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorw("completed mineOne", logStruct...)
|
log.Errorw("completed mineOne", logStruct...)
|
||||||
} else if isLate {
|
} else if isLate || (hasMinPower && !mbi.EligibleForMining) {
|
||||||
log.Warnw("completed mineOne", logStruct...)
|
log.Warnw("completed mineOne", logStruct...)
|
||||||
} else {
|
} else {
|
||||||
log.Infow("completed mineOne", logStruct...)
|
log.Infow("completed mineOne", logStruct...)
|
||||||
@ -480,16 +490,10 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tMBI := build.Clock.Now()
|
|
||||||
|
|
||||||
beaconPrev := mbi.PrevBeaconEntry
|
|
||||||
|
|
||||||
tDrand := build.Clock.Now()
|
|
||||||
bvals := mbi.BeaconEntries
|
|
||||||
|
|
||||||
tPowercheck := build.Clock.Now()
|
tPowercheck := build.Clock.Now()
|
||||||
|
|
||||||
rbase = beaconPrev
|
bvals := mbi.BeaconEntries
|
||||||
|
rbase = mbi.PrevBeaconEntry
|
||||||
if len(bvals) > 0 {
|
if len(bvals) > 0 {
|
||||||
rbase = bvals[len(bvals)-1]
|
rbase = bvals[len(bvals)-1]
|
||||||
}
|
}
|
||||||
@ -553,7 +557,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
|
|||||||
}
|
}
|
||||||
|
|
||||||
tCreateBlock := build.Clock.Now()
|
tCreateBlock := build.Clock.Now()
|
||||||
dur := tCreateBlock.Sub(start)
|
dur := tCreateBlock.Sub(tStart)
|
||||||
parentMiners := make([]address.Address, len(base.TipSet.Blocks()))
|
parentMiners := make([]address.Address, len(base.TipSet.Blocks()))
|
||||||
for i, header := range base.TipSet.Blocks() {
|
for i, header := range base.TipSet.Blocks() {
|
||||||
parentMiners[i] = header.Miner
|
parentMiners[i] = header.Miner
|
||||||
@ -561,9 +565,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
|
|||||||
log.Infow("mined new block", "cid", minedBlock.Cid(), "height", int64(minedBlock.Header.Height), "miner", minedBlock.Header.Miner, "parents", parentMiners, "parentTipset", base.TipSet.Key().String(), "took", dur)
|
log.Infow("mined new block", "cid", minedBlock.Cid(), "height", int64(minedBlock.Header.Height), "miner", minedBlock.Header.Miner, "parents", parentMiners, "parentTipset", base.TipSet.Key().String(), "took", dur)
|
||||||
if dur > time.Second*time.Duration(build.BlockDelaySecs) {
|
if dur > time.Second*time.Duration(build.BlockDelaySecs) {
|
||||||
log.Warnw("CAUTION: block production took longer than the block delay. Your computer may not be fast enough to keep up",
|
log.Warnw("CAUTION: block production took longer than the block delay. Your computer may not be fast enough to keep up",
|
||||||
"tMinerBaseInfo ", tMBI.Sub(start),
|
"tPowercheck ", tPowercheck.Sub(tStart),
|
||||||
"tDrand ", tDrand.Sub(tMBI),
|
|
||||||
"tPowercheck ", tPowercheck.Sub(tDrand),
|
|
||||||
"tTicket ", tTicket.Sub(tPowercheck),
|
"tTicket ", tTicket.Sub(tPowercheck),
|
||||||
"tSeed ", tSeed.Sub(tTicket),
|
"tSeed ", tSeed.Sub(tTicket),
|
||||||
"tProof ", tProof.Sub(tSeed),
|
"tProof ", tProof.Sub(tSeed),
|
||||||
|
@ -77,7 +77,7 @@ func (hs *Service) HandleStream(s inet.Stream) {
|
|||||||
"hash", hmsg.GenesisHash)
|
"hash", hmsg.GenesisHash)
|
||||||
|
|
||||||
if hmsg.GenesisHash != hs.syncer.Genesis.Cids()[0] {
|
if hmsg.GenesisHash != hs.syncer.Genesis.Cids()[0] {
|
||||||
log.Warnf("other peer has different genesis! (%s)", hmsg.GenesisHash)
|
log.Debugf("other peer has different genesis! (%s)", hmsg.GenesisHash)
|
||||||
_ = s.Conn().Close()
|
_ = s.Conn().Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -244,13 +244,13 @@ func (sm *StorageMinerAPI) SectorsList(context.Context) ([]abi.SectorNumber, err
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
out := make([]abi.SectorNumber, len(sectors))
|
out := make([]abi.SectorNumber, 0, len(sectors))
|
||||||
for i, sector := range sectors {
|
for _, sector := range sectors {
|
||||||
if sector.State == sealing.UndefinedSectorState {
|
if sector.State == sealing.UndefinedSectorState {
|
||||||
continue // sector ID not set yet
|
continue // sector ID not set yet
|
||||||
}
|
}
|
||||||
|
|
||||||
out[i] = sector.SectorNumber
|
out = append(out, sector.SectorNumber)
|
||||||
}
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
@ -66,6 +66,7 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
logging.SetLogLevel("chain", "ERROR")
|
logging.SetLogLevel("chain", "ERROR")
|
||||||
logging.SetLogLevel("sub", "ERROR")
|
logging.SetLogLevel("sub", "ERROR")
|
||||||
logging.SetLogLevel("storageminer", "ERROR")
|
logging.SetLogLevel("storageminer", "ERROR")
|
||||||
|
logging.SetLogLevel("sectors", "DEBUG")
|
||||||
|
|
||||||
blockTime := 10 * time.Millisecond
|
blockTime := 10 * time.Millisecond
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user