Merge branch 'master' into next
This commit is contained in:
commit
cfdbc2312d
6
Makefile
6
Makefile
@ -191,6 +191,12 @@ health:
|
|||||||
.PHONY: health
|
.PHONY: health
|
||||||
BINS+=health
|
BINS+=health
|
||||||
|
|
||||||
|
testground:
|
||||||
|
go build -tags testground -o /dev/null ./cmd/lotus
|
||||||
|
|
||||||
|
.PHONY: testground
|
||||||
|
BINS+=testground
|
||||||
|
|
||||||
# MISC
|
# MISC
|
||||||
|
|
||||||
buildall: $(BINS)
|
buildall: $(BINS)
|
||||||
|
@ -59,10 +59,16 @@ type StorageMiner interface {
|
|||||||
|
|
||||||
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
|
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
|
||||||
DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error)
|
DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error)
|
||||||
DealsSetAcceptingStorageDeals(context.Context, bool) error
|
DealsConsiderOnlineStorageDeals(context.Context) (bool, error)
|
||||||
DealsSetAcceptingRetrievalDeals(context.Context, bool) error
|
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error
|
||||||
|
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error)
|
||||||
|
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error
|
||||||
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error)
|
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error)
|
||||||
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error
|
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error
|
||||||
|
DealsConsiderOfflineStorageDeals(context.Context) (bool, error)
|
||||||
|
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error
|
||||||
|
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error)
|
||||||
|
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error
|
||||||
|
|
||||||
StorageAddLocal(ctx context.Context, path string) error
|
StorageAddLocal(ctx context.Context, path string) error
|
||||||
}
|
}
|
||||||
|
@ -225,12 +225,18 @@ type StorageMinerStruct struct {
|
|||||||
StorageLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error `perm:"admin"`
|
StorageLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error `perm:"admin"`
|
||||||
StorageTryLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) `perm:"admin"`
|
StorageTryLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) `perm:"admin"`
|
||||||
|
|
||||||
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
|
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
|
||||||
DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
|
DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
|
||||||
DealsSetAcceptingStorageDeals func(context.Context, bool) error `perm:"admin"`
|
DealsConsiderOnlineStorageDeals func(context.Context) (bool, error) `perm:"read"`
|
||||||
DealsSetAcceptingRetrievalDeals func(context.Context, bool) error `perm:"admin"`
|
DealsSetConsiderOnlineStorageDeals func(context.Context, bool) error `perm:"admin"`
|
||||||
DealsPieceCidBlocklist func(context.Context) ([]cid.Cid, error) `perm:"admin"`
|
DealsConsiderOnlineRetrievalDeals func(context.Context) (bool, error) `perm:"read"`
|
||||||
DealsSetPieceCidBlocklist func(context.Context, []cid.Cid) error `perm:"read"`
|
DealsSetConsiderOnlineRetrievalDeals func(context.Context, bool) error `perm:"admin"`
|
||||||
|
DealsConsiderOfflineStorageDeals func(context.Context) (bool, error) `perm:"read"`
|
||||||
|
DealsSetConsiderOfflineStorageDeals func(context.Context, bool) error `perm:"admin"`
|
||||||
|
DealsConsiderOfflineRetrievalDeals func(context.Context) (bool, error) `perm:"read"`
|
||||||
|
DealsSetConsiderOfflineRetrievalDeals func(context.Context, bool) error `perm:"admin"`
|
||||||
|
DealsPieceCidBlocklist func(context.Context) ([]cid.Cid, error) `perm:"read"`
|
||||||
|
DealsSetPieceCidBlocklist func(context.Context, []cid.Cid) error `perm:"admin"`
|
||||||
|
|
||||||
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
|
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
|
||||||
}
|
}
|
||||||
@ -883,12 +889,20 @@ func (c *StorageMinerStruct) DealsList(ctx context.Context) ([]storagemarket.Sto
|
|||||||
return c.Internal.DealsList(ctx)
|
return c.Internal.DealsList(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) DealsSetAcceptingStorageDeals(ctx context.Context, b bool) error {
|
func (c *StorageMinerStruct) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) {
|
||||||
return c.Internal.DealsSetAcceptingStorageDeals(ctx, b)
|
return c.Internal.DealsConsiderOnlineStorageDeals(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) DealsSetAcceptingRetrievalDeals(ctx context.Context, b bool) error {
|
func (c *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error {
|
||||||
return c.Internal.DealsSetAcceptingRetrievalDeals(ctx, b)
|
return c.Internal.DealsSetConsiderOnlineStorageDeals(ctx, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) {
|
||||||
|
return c.Internal.DealsConsiderOnlineRetrievalDeals(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error {
|
||||||
|
return c.Internal.DealsSetConsiderOnlineRetrievalDeals(ctx, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) {
|
func (c *StorageMinerStruct) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) {
|
||||||
@ -899,6 +913,22 @@ func (c *StorageMinerStruct) DealsSetPieceCidBlocklist(ctx context.Context, cids
|
|||||||
return c.Internal.DealsSetPieceCidBlocklist(ctx, cids)
|
return c.Internal.DealsSetPieceCidBlocklist(ctx, cids)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) {
|
||||||
|
return c.Internal.DealsConsiderOfflineStorageDeals(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error {
|
||||||
|
return c.Internal.DealsSetConsiderOfflineStorageDeals(ctx, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) {
|
||||||
|
return c.Internal.DealsConsiderOfflineRetrievalDeals(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error {
|
||||||
|
return c.Internal.DealsSetConsiderOfflineRetrievalDeals(ctx, b)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error {
|
func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error {
|
||||||
return c.Internal.StorageAddLocal(ctx, path)
|
return c.Internal.StorageAddLocal(ctx, path)
|
||||||
}
|
}
|
||||||
|
@ -20,10 +20,9 @@ func init() {
|
|||||||
BuildType |= Build2k
|
BuildType |= Build2k
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seconds
|
const BlockDelaySecs = uint64(2)
|
||||||
const BlockDelay = 2
|
|
||||||
|
|
||||||
const PropagationDelay = 3
|
const PropagationDelaySecs = uint64(3)
|
||||||
|
|
||||||
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
|
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
|
||||||
// which the miner is slashed
|
// which the miner is slashed
|
||||||
|
38
build/params_shared_funcs.go
Normal file
38
build/params_shared_funcs.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p-core/protocol"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
)
|
||||||
|
|
||||||
|
func DefaultSectorSize() abi.SectorSize {
|
||||||
|
szs := make([]abi.SectorSize, 0, len(miner.SupportedProofTypes))
|
||||||
|
for spt := range miner.SupportedProofTypes {
|
||||||
|
ss, err := spt.SectorSize()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
szs = append(szs, ss)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(szs, func(i, j int) bool {
|
||||||
|
return szs[i] < szs[j]
|
||||||
|
})
|
||||||
|
|
||||||
|
return szs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Core network constants
|
||||||
|
|
||||||
|
func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) }
|
||||||
|
func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) }
|
||||||
|
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
|
||||||
|
return protocol.ID("/fil/kad/" + string(netName))
|
||||||
|
}
|
@ -1,10 +1,9 @@
|
|||||||
|
// +build !testground
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p-core/protocol"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
@ -13,32 +12,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DefaultSectorSize() abi.SectorSize {
|
|
||||||
szs := make([]abi.SectorSize, 0, len(miner.SupportedProofTypes))
|
|
||||||
for spt := range miner.SupportedProofTypes {
|
|
||||||
ss, err := spt.SectorSize()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
szs = append(szs, ss)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(szs, func(i, j int) bool {
|
|
||||||
return szs[i] < szs[j]
|
|
||||||
})
|
|
||||||
|
|
||||||
return szs[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Core network constants
|
|
||||||
|
|
||||||
func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) }
|
|
||||||
func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) }
|
|
||||||
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
|
|
||||||
return protocol.ID("/fil/kad/" + string(netName))
|
|
||||||
}
|
|
||||||
|
|
||||||
// /////
|
// /////
|
||||||
// Storage
|
// Storage
|
||||||
|
|
||||||
@ -48,8 +21,7 @@ const UnixfsLinksPerLevel = 1024
|
|||||||
// /////
|
// /////
|
||||||
// Consensus / Network
|
// Consensus / Network
|
||||||
|
|
||||||
// Seconds
|
const AllowableClockDriftSecs = uint64(1)
|
||||||
const AllowableClockDrift = 1
|
|
||||||
|
|
||||||
// Epochs
|
// Epochs
|
||||||
const ForkLengthThreshold = Finality
|
const ForkLengthThreshold = Finality
|
||||||
@ -59,12 +31,12 @@ var BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch)
|
|||||||
|
|
||||||
// Epochs
|
// Epochs
|
||||||
const Finality = miner.ChainFinality
|
const Finality = miner.ChainFinality
|
||||||
const MessageConfidence = 5
|
const MessageConfidence = uint64(5)
|
||||||
|
|
||||||
// constants for Weight calculation
|
// constants for Weight calculation
|
||||||
// The ratio of weight contributed by short-term vs long-term factors in a given round
|
// The ratio of weight contributed by short-term vs long-term factors in a given round
|
||||||
const WRatioNum = int64(1)
|
const WRatioNum = int64(1)
|
||||||
const WRatioDen = 2
|
const WRatioDen = uint64(2)
|
||||||
|
|
||||||
// /////
|
// /////
|
||||||
// Proofs
|
// Proofs
|
||||||
@ -82,25 +54,25 @@ const MaxSealLookback = SealRandomnessLookbackLimit + 2000 // TODO: Get from spe
|
|||||||
// Mining
|
// Mining
|
||||||
|
|
||||||
// Epochs
|
// Epochs
|
||||||
const TicketRandomnessLookback = 1
|
const TicketRandomnessLookback = abi.ChainEpoch(1)
|
||||||
|
|
||||||
const WinningPoStSectorSetLookback = 10
|
const WinningPoStSectorSetLookback = abi.ChainEpoch(10)
|
||||||
|
|
||||||
// /////
|
// /////
|
||||||
// Devnet settings
|
// Devnet settings
|
||||||
|
|
||||||
const TotalFilecoin = 2_000_000_000
|
const TotalFilecoin = uint64(2_000_000_000)
|
||||||
const MiningRewardTotal = 1_400_000_000
|
const MiningRewardTotal = uint64(1_400_000_000)
|
||||||
|
|
||||||
const FilecoinPrecision = 1_000_000_000_000_000_000
|
const FilecoinPrecision = uint64(1_000_000_000_000_000_000)
|
||||||
|
|
||||||
var InitialRewardBalance *big.Int
|
var InitialRewardBalance *big.Int
|
||||||
|
|
||||||
// TODO: Move other important consts here
|
// TODO: Move other important consts here
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
InitialRewardBalance = big.NewInt(MiningRewardTotal)
|
InitialRewardBalance = big.NewInt(int64(MiningRewardTotal))
|
||||||
InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(FilecoinPrecision))
|
InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync
|
// Sync
|
73
build/params_testground.go
Normal file
73
build/params_testground.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
// +build testground
|
||||||
|
|
||||||
|
// This file makes hardcoded parameters (const) configurable as vars.
|
||||||
|
//
|
||||||
|
// Its purpose is to unlock various degrees of flexibility and parametrization
|
||||||
|
// when writing Testground plans for Lotus.
|
||||||
|
//
|
||||||
|
package build
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
UnixfsChunkSize = uint64(1 << 20)
|
||||||
|
UnixfsLinksPerLevel = 1024
|
||||||
|
|
||||||
|
BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch)
|
||||||
|
BlockMessageLimit = 512
|
||||||
|
BlockGasLimit = int64(100_000_000_000)
|
||||||
|
BlockDelaySecs = uint64(builtin.EpochDurationSeconds)
|
||||||
|
PropagationDelaySecs = uint64(6)
|
||||||
|
|
||||||
|
AllowableClockDriftSecs = uint64(1)
|
||||||
|
|
||||||
|
Finality = miner.ChainFinalityish
|
||||||
|
ForkLengthThreshold = Finality
|
||||||
|
|
||||||
|
SlashablePowerDelay = 20
|
||||||
|
InteractivePoRepConfidence = 6
|
||||||
|
|
||||||
|
MessageConfidence uint64 = 5
|
||||||
|
|
||||||
|
WRatioNum = int64(1)
|
||||||
|
WRatioDen = uint64(2)
|
||||||
|
|
||||||
|
BadBlockCacheSize = 1 << 15
|
||||||
|
BlsSignatureCacheSize = 40000
|
||||||
|
VerifSigCacheSize = 32000
|
||||||
|
|
||||||
|
SealRandomnessLookback = Finality
|
||||||
|
SealRandomnessLookbackLimit = SealRandomnessLookback + 2000
|
||||||
|
MaxSealLookback = SealRandomnessLookbackLimit + 2000
|
||||||
|
|
||||||
|
TicketRandomnessLookback = abi.ChainEpoch(1)
|
||||||
|
WinningPoStSectorSetLookback = abi.ChainEpoch(10)
|
||||||
|
|
||||||
|
TotalFilecoin uint64 = 2_000_000_000
|
||||||
|
MiningRewardTotal uint64 = 1_400_000_000
|
||||||
|
|
||||||
|
FilecoinPrecision uint64 = 1_000_000_000_000_000_000
|
||||||
|
|
||||||
|
InitialRewardBalance = func() *big.Int {
|
||||||
|
v := big.NewInt(int64(MiningRewardTotal))
|
||||||
|
v = v.Mul(v, big.NewInt(int64(FilecoinPrecision)))
|
||||||
|
return v
|
||||||
|
}()
|
||||||
|
|
||||||
|
DrandConfig = dtypes.DrandConfig{
|
||||||
|
Servers: []string{
|
||||||
|
"https://pl-eu.testnet.drand.sh",
|
||||||
|
"https://pl-us.testnet.drand.sh",
|
||||||
|
"https://pl-sin.testnet.drand.sh",
|
||||||
|
},
|
||||||
|
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"138a324aa6540f93d0dad002aa89454b1bec2b6e948682cde6bd4db40f4b7c9b"}`,
|
||||||
|
}
|
||||||
|
)
|
@ -1,5 +1,6 @@
|
|||||||
// +build !debug
|
// +build !debug
|
||||||
// +build !2k
|
// +build !2k
|
||||||
|
// +build !testground
|
||||||
|
|
||||||
package build
|
package build
|
||||||
|
|
||||||
@ -19,7 +20,6 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seconds
|
const BlockDelaySecs = uint64(builtin.EpochDurationSeconds)
|
||||||
const BlockDelay = builtin.EpochDurationSeconds
|
|
||||||
|
|
||||||
const PropagationDelay = 6
|
const PropagationDelaySecs = uint64(6)
|
||||||
|
@ -443,8 +443,8 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler,
|
|||||||
}
|
}
|
||||||
|
|
||||||
we.lk.Lock()
|
we.lk.Lock()
|
||||||
we.matchers[id] = mf
|
|
||||||
defer we.lk.Unlock()
|
defer we.lk.Unlock()
|
||||||
|
we.matchers[id] = mf
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -476,10 +476,11 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventDat
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
me.lk.RLock()
|
||||||
|
defer me.lk.RUnlock()
|
||||||
|
|
||||||
res := make(map[triggerID]eventData)
|
res := make(map[triggerID]eventData)
|
||||||
me.messagesForTs(pts, func(msg *types.Message) {
|
me.messagesForTs(pts, func(msg *types.Message) {
|
||||||
me.lk.RLock()
|
|
||||||
defer me.lk.RUnlock()
|
|
||||||
// TODO: provide receipts
|
// TODO: provide receipts
|
||||||
|
|
||||||
for tid, matchFns := range me.matchers {
|
for tid, matchFns := range me.matchers {
|
||||||
|
@ -64,7 +64,6 @@ func (m mockAPI) setActor(tsk types.TipSetKey, act *types.Actor) {
|
|||||||
|
|
||||||
func TestPredicates(t *testing.T) {
|
func TestPredicates(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
bs := bstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
bs := bstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
store := cbornode.NewCborStore(bs)
|
store := cbornode.NewCborStore(bs)
|
||||||
|
|
||||||
@ -133,8 +132,29 @@ func TestPredicates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
deal2 := changedDeals[abi.DealID(2)]
|
deal2 := changedDeals[abi.DealID(2)]
|
||||||
if deal2.From.SlashEpoch != 0 || deal2.To.SlashEpoch != 6 {
|
if deal2.From.SlashEpoch != 0 || deal2.To.SlashEpoch != 6 {
|
||||||
t.Fatal("Unexpected change to LastUpdatedEpoch")
|
t.Fatal("Unexpected change to SlashEpoch")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that OnActorStateChanged does not call the callback if the state has not changed
|
||||||
|
mockAddr, err := address.NewFromString("t01")
|
||||||
|
require.NoError(t, err)
|
||||||
|
actorDiffFn := preds.OnActorStateChanged(mockAddr, func(context.Context, cid.Cid, cid.Cid) (bool, UserData, error) {
|
||||||
|
t.Fatal("No state change so this should not be called")
|
||||||
|
return false, nil, nil
|
||||||
|
})
|
||||||
|
changed, _, err = actorDiffFn(ctx, oldState, oldState)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, changed)
|
||||||
|
|
||||||
|
// Test that OnDealStateChanged does not call the callback if the state has not changed
|
||||||
|
diffDealStateFn := preds.OnDealStateChanged(func(context.Context, *amt.Root, *amt.Root) (bool, UserData, error) {
|
||||||
|
t.Fatal("No state change so this should not be called")
|
||||||
|
return false, nil, nil
|
||||||
|
})
|
||||||
|
marketState := createEmptyMarketState(t, store)
|
||||||
|
changed, _, err = diffDealStateFn(ctx, marketState, marketState)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, changed)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mockTipset(miner address.Address, timestamp uint64) (*types.TipSet, error) {
|
func mockTipset(miner address.Address, timestamp uint64) (*types.TipSet, error) {
|
||||||
@ -153,11 +173,7 @@ func mockTipset(miner address.Address, timestamp uint64) (*types.TipSet, error)
|
|||||||
func createMarketState(ctx context.Context, t *testing.T, store *cbornode.BasicIpldStore, deals map[abi.DealID]*market.DealState) cid.Cid {
|
func createMarketState(ctx context.Context, t *testing.T, store *cbornode.BasicIpldStore, deals map[abi.DealID]*market.DealState) cid.Cid {
|
||||||
rootCid := createAMT(ctx, t, store, deals)
|
rootCid := createAMT(ctx, t, store, deals)
|
||||||
|
|
||||||
emptyArrayCid, err := amt.NewAMT(store).Flush(context.TODO())
|
state := createEmptyMarketState(t, store)
|
||||||
require.NoError(t, err)
|
|
||||||
emptyMap, err := store.Put(context.TODO(), hamt.NewNode(store, hamt.UseTreeBitWidth(5)))
|
|
||||||
require.NoError(t, err)
|
|
||||||
state := market.ConstructState(emptyArrayCid, emptyMap, emptyMap)
|
|
||||||
state.States = rootCid
|
state.States = rootCid
|
||||||
|
|
||||||
stateC, err := store.Put(ctx, state)
|
stateC, err := store.Put(ctx, state)
|
||||||
@ -165,6 +181,14 @@ func createMarketState(ctx context.Context, t *testing.T, store *cbornode.BasicI
|
|||||||
return stateC
|
return stateC
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createEmptyMarketState(t *testing.T, store *cbornode.BasicIpldStore) *market.State {
|
||||||
|
emptyArrayCid, err := amt.NewAMT(store).Flush(context.TODO())
|
||||||
|
require.NoError(t, err)
|
||||||
|
emptyMap, err := store.Put(context.TODO(), hamt.NewNode(store, hamt.UseTreeBitWidth(5)))
|
||||||
|
require.NoError(t, err)
|
||||||
|
return market.ConstructState(emptyArrayCid, emptyMap, emptyMap)
|
||||||
|
}
|
||||||
|
|
||||||
func createAMT(ctx context.Context, t *testing.T, store *cbornode.BasicIpldStore, deals map[abi.DealID]*market.DealState) cid.Cid {
|
func createAMT(ctx context.Context, t *testing.T, store *cbornode.BasicIpldStore, deals map[abi.DealID]*market.DealState) cid.Cid {
|
||||||
root := amt.NewAMT(store)
|
root := amt.NewAMT(store)
|
||||||
for dealID, dealState := range deals {
|
for dealID, dealState := range deals {
|
||||||
|
@ -196,7 +196,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
|||||||
*genm2,
|
*genm2,
|
||||||
},
|
},
|
||||||
NetworkName: "",
|
NetworkName: "",
|
||||||
Timestamp: uint64(time.Now().Add(-500 * build.BlockDelay * time.Second).Unix()),
|
Timestamp: uint64(time.Now().Add(-500 * time.Duration(build.BlockDelaySecs) * time.Second).Unix()),
|
||||||
}
|
}
|
||||||
|
|
||||||
genb, err := genesis2.MakeGenesisBlock(context.TODO(), bs, sys, tpl)
|
genb, err := genesis2.MakeGenesisBlock(context.TODO(), bs, sys, tpl)
|
||||||
@ -223,7 +223,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
|||||||
miners := []address.Address{maddr1, maddr2}
|
miners := []address.Address{maddr1, maddr2}
|
||||||
|
|
||||||
beac := beacon.NewMockBeacon(time.Second)
|
beac := beacon.NewMockBeacon(time.Second)
|
||||||
//beac, err := drand.NewDrandBeacon(tpl.Timestamp, build.BlockDelay)
|
//beac, err := drand.NewDrandBeacon(tpl.Timestamp, build.BlockDelaySecs)
|
||||||
//if err != nil {
|
//if err != nil {
|
||||||
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
||||||
//}
|
//}
|
||||||
@ -414,7 +414,7 @@ func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticke
|
|||||||
if cg.Timestamper != nil {
|
if cg.Timestamper != nil {
|
||||||
ts = cg.Timestamper(parents, height-parents.Height())
|
ts = cg.Timestamper(parents, height-parents.Height())
|
||||||
} else {
|
} else {
|
||||||
ts = parents.MinTimestamp() + uint64((height-parents.Height())*build.BlockDelay)
|
ts = parents.MinTimestamp() + uint64(height-parents.Height())*build.BlockDelaySecs
|
||||||
}
|
}
|
||||||
|
|
||||||
fblk, err := MinerCreateBlock(context.TODO(), cg.sm, cg.w, &api.BlockTemplate{
|
fblk, err := MinerCreateBlock(context.TODO(), cg.sm, cg.w, &api.BlockTemplate{
|
||||||
|
@ -150,6 +150,17 @@ func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
aggSig := bls.Aggregate(blsSigs)
|
aggSig := bls.Aggregate(blsSigs)
|
||||||
|
if aggSig == nil {
|
||||||
|
if len(sigs) > 0 {
|
||||||
|
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &crypto.Signature{
|
||||||
|
Type: crypto.SigTypeBLS,
|
||||||
|
Data: new(bls.Signature)[:],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
return &crypto.Signature{
|
return &crypto.Signature{
|
||||||
Type: crypto.SigTypeBLS,
|
Type: crypto.SigTypeBLS,
|
||||||
Data: aggSig[:],
|
Data: aggSig[:],
|
||||||
|
@ -187,7 +187,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
|
|||||||
|
|
||||||
mp := &MessagePool{
|
mp := &MessagePool{
|
||||||
closer: make(chan struct{}),
|
closer: make(chan struct{}),
|
||||||
repubTk: time.NewTicker(build.BlockDelay * 10 * time.Second),
|
repubTk: time.NewTicker(time.Duration(build.BlockDelaySecs) * 10 * time.Second),
|
||||||
localAddrs: make(map[address.Address]struct{}),
|
localAddrs: make(map[address.Address]struct{}),
|
||||||
pending: make(map[address.Address]*msgSet),
|
pending: make(map[address.Address]*msgSet),
|
||||||
minGasPrice: types.NewInt(0),
|
minGasPrice: types.NewInt(0),
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
amt "github.com/filecoin-project/go-amt-ipld/v2"
|
amt "github.com/filecoin-project/go-amt-ipld/v2"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||||
@ -174,13 +175,34 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
|||||||
return nil, xerrors.Errorf("(get sectors) failed to load miner actor state: %w", err)
|
return nil, xerrors.Errorf("(get sectors) failed to load miner actor state: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Optimization: we could avoid loaditg the whole proving set here if we had AMT.GetNth with bitfield filtering
|
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||||
sectorSet, err := GetProvingSetRaw(ctx, sm, mas)
|
var deadlines miner.Deadlines
|
||||||
if err != nil {
|
if err := cst.Get(ctx, mas.Deadlines, &deadlines); err != nil {
|
||||||
return nil, xerrors.Errorf("getting proving set: %w", err)
|
return nil, xerrors.Errorf("failed to load deadlines: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(sectorSet) == 0 {
|
notProving, err := abi.BitFieldUnion(mas.Faults, mas.Recoveries)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to union faults and recoveries: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
allSectors, err := bitfield.MultiMerge(append(deadlines.Due[:], mas.NewSectors)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("merging deadline bitfields failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
provingSectors, err := bitfield.SubtractBitField(allSectors, notProving)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to subtract non-proving sectors from set: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
numProvSect, err := provingSectors.Count()
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to count bits: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(review): is this right? feels fishy to me
|
||||||
|
if numProvSect == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,17 +221,34 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
|||||||
return nil, xerrors.Errorf("getting miner ID: %w", err)
|
return nil, xerrors.Errorf("getting miner ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, uint64(len(sectorSet)))
|
ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, numProvSect)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("generating winning post challenges: %w", err)
|
return nil, xerrors.Errorf("generating winning post challenges: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sectors, err := provingSectors.All(miner.SectorsMax)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to enumerate all sector IDs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sectorAmt, err := amt.LoadAMT(ctx, cst, mas.Sectors)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to load sectors amt: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
out := make([]abi.SectorInfo, len(ids))
|
out := make([]abi.SectorInfo, len(ids))
|
||||||
for i, n := range ids {
|
for i, n := range ids {
|
||||||
|
sid := sectors[n]
|
||||||
|
|
||||||
|
var sinfo miner.SectorOnChainInfo
|
||||||
|
if err := sectorAmt.Get(ctx, sid, &sinfo); err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to get sector %d: %w", sid, err)
|
||||||
|
}
|
||||||
|
|
||||||
out[i] = abi.SectorInfo{
|
out[i] = abi.SectorInfo{
|
||||||
SealProof: spt,
|
SealProof: spt,
|
||||||
SectorNumber: sectorSet[n].ID,
|
SectorNumber: sinfo.SectorNumber,
|
||||||
SealedCID: sectorSet[n].Info.SealedCID,
|
SealedCID: sinfo.SealedCID,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,6 +49,10 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Blocks that are more than MaxHeightDrift epochs above
|
||||||
|
//the theoretical max height based on systime are quickly rejected
|
||||||
|
const MaxHeightDrift = 5
|
||||||
|
|
||||||
var log = logging.Logger("chain")
|
var log = logging.Logger("chain")
|
||||||
|
|
||||||
var LocalIncoming = "incoming"
|
var LocalIncoming = "incoming"
|
||||||
@ -157,6 +161,11 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
|
||||||
|
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
for _, b := range fts.Blocks {
|
for _, b := range fts.Blocks {
|
||||||
if reason, ok := syncer.bad.Has(b.Cid()); ok {
|
if reason, ok := syncer.bad.Has(b.Cid()); ok {
|
||||||
log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason)
|
log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason)
|
||||||
@ -657,18 +666,18 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
|
|||||||
// fast checks first
|
// fast checks first
|
||||||
|
|
||||||
now := uint64(time.Now().Unix())
|
now := uint64(time.Now().Unix())
|
||||||
if h.Timestamp > now+build.AllowableClockDrift {
|
if h.Timestamp > now+build.AllowableClockDriftSecs {
|
||||||
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal)
|
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal)
|
||||||
}
|
}
|
||||||
if h.Timestamp > now {
|
if h.Timestamp > now {
|
||||||
log.Warn("Got block from the future, but within threshold", h.Timestamp, time.Now().Unix())
|
log.Warn("Got block from the future, but within threshold", h.Timestamp, time.Now().Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.Timestamp < baseTs.MinTimestamp()+(build.BlockDelay*uint64(h.Height-baseTs.Height())) {
|
if h.Timestamp < baseTs.MinTimestamp()+(build.BlockDelaySecs*uint64(h.Height-baseTs.Height())) {
|
||||||
log.Warn("timestamp funtimes: ", h.Timestamp, baseTs.MinTimestamp(), h.Height, baseTs.Height())
|
log.Warn("timestamp funtimes: ", h.Timestamp, baseTs.MinTimestamp(), h.Height, baseTs.Height())
|
||||||
diff := (baseTs.MinTimestamp() + (build.BlockDelay * uint64(h.Height-baseTs.Height()))) - h.Timestamp
|
diff := (baseTs.MinTimestamp() + (build.BlockDelaySecs * uint64(h.Height-baseTs.Height()))) - h.Timestamp
|
||||||
|
|
||||||
return xerrors.Errorf("block was generated too soon (h.ts:%d < base.mints:%d + BLOCK_DELAY:%d * deltaH:%d; diff %d)", h.Timestamp, baseTs.MinTimestamp(), build.BlockDelay, h.Height-baseTs.Height(), diff)
|
return xerrors.Errorf("block was generated too soon (h.ts:%d < base.mints:%d + BLOCK_DELAY:%d * deltaH:%d; diff %d)", h.Timestamp, baseTs.MinTimestamp(), build.BlockDelaySecs, h.Height-baseTs.Height(), diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
msgsCheck := async.Err(func() error {
|
msgsCheck := async.Err(func() error {
|
||||||
@ -1521,3 +1530,13 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet)
|
|||||||
|
|
||||||
return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset")
|
return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
|
||||||
|
g, err := syncer.store.GetGenesis()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
now := uint64(time.Now().Unix())
|
||||||
|
return epoch > (abi.ChainEpoch((now-g.Timestamp)/build.BlockDelaySecs) + MaxHeightDrift)
|
||||||
|
}
|
||||||
|
@ -408,7 +408,7 @@ func TestSyncBadTimestamp(t *testing.T) {
|
|||||||
|
|
||||||
base := tu.g.CurTipset
|
base := tu.g.CurTipset
|
||||||
tu.g.Timestamper = func(pts *types.TipSet, tl abi.ChainEpoch) uint64 {
|
tu.g.Timestamper = func(pts *types.TipSet, tl abi.ChainEpoch) uint64 {
|
||||||
return pts.MinTimestamp() + (build.BlockDelay / 2)
|
return pts.MinTimestamp() + (build.BlockDelaySecs / 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("BASE: ", base.Cids())
|
fmt.Println("BASE: ", base.Cids())
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
type FIL BigInt
|
type FIL BigInt
|
||||||
|
|
||||||
func (f FIL) String() string {
|
func (f FIL) String() string {
|
||||||
r := new(big.Rat).SetFrac(f.Int, big.NewInt(build.FilecoinPrecision))
|
r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(build.FilecoinPrecision)))
|
||||||
if r.Sign() == 0 {
|
if r.Sign() == 0 {
|
||||||
return "0"
|
return "0"
|
||||||
}
|
}
|
||||||
@ -33,7 +33,7 @@ func ParseFIL(s string) (FIL, error) {
|
|||||||
return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s)
|
return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
r = r.Mul(r, big.NewRat(build.FilecoinPrecision, 1))
|
r = r.Mul(r, big.NewRat(int64(build.FilecoinPrecision), 1))
|
||||||
if !r.IsInt() {
|
if !r.IsInt() {
|
||||||
return FIL{}, fmt.Errorf("invalid FIL value: %q", s)
|
return FIL{}, fmt.Errorf("invalid FIL value: %q", s)
|
||||||
}
|
}
|
||||||
|
@ -186,7 +186,7 @@ func SyncWait(ctx context.Context, napi api.FullNode) error {
|
|||||||
|
|
||||||
fmt.Printf("\r\x1b[2KWorker %d: Target: %s\tState: %s\tHeight: %d", working, target, chain.SyncStageString(ss.Stage), ss.Height)
|
fmt.Printf("\r\x1b[2KWorker %d: Target: %s\tState: %s\tHeight: %d", working, target, chain.SyncStageString(ss.Stage), ss.Height)
|
||||||
|
|
||||||
if time.Now().Unix()-int64(head.MinTimestamp()) < build.BlockDelay {
|
if time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) {
|
||||||
fmt.Println("\nDone!")
|
fmt.Println("\nDone!")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,9 @@ var log = logging.Logger("chainwatch")
|
|||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
_ = logging.SetLogLevel("*", "INFO")
|
_ = logging.SetLogLevel("*", "INFO")
|
||||||
|
if err := logging.SetLogLevel("rpc", "error"); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
log.Info("Starting chainwatch")
|
log.Info("Starting chainwatch")
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ func subMpool(ctx context.Context, api aapi.FullNode, st *storage) {
|
|||||||
msgs[v.Message.Message.Cid()] = &v.Message.Message
|
msgs[v.Message.Message.Cid()] = &v.Message.Message
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Processing %d mpool updates", len(msgs))
|
log.Debugf("Processing %d mpool updates", len(msgs))
|
||||||
|
|
||||||
err := st.storeMessages(msgs)
|
err := st.storeMessages(msgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1,11 +1,17 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
miner_spec "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
_ "github.com/lib/pq"
|
_ "github.com/lib/pq"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -18,6 +24,9 @@ type storage struct {
|
|||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
|
||||||
headerLk sync.Mutex
|
headerLk sync.Mutex
|
||||||
|
|
||||||
|
// stateful miner data
|
||||||
|
minerSectors map[cid.Cid]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func openStorage(dbSource string) (*storage, error) {
|
func openStorage(dbSource string) (*storage, error) {
|
||||||
@ -28,7 +37,10 @@ func openStorage(dbSource string) (*storage, error) {
|
|||||||
|
|
||||||
db.SetMaxOpenConns(1350)
|
db.SetMaxOpenConns(1350)
|
||||||
|
|
||||||
st := &storage{db: db}
|
ms := make(map[cid.Cid]struct{})
|
||||||
|
ms[cid.Undef] = struct{}{}
|
||||||
|
|
||||||
|
st := &storage{db: db, minerSectors: ms}
|
||||||
|
|
||||||
return st, st.setup()
|
return st, st.setup()
|
||||||
}
|
}
|
||||||
@ -252,31 +264,56 @@ create table if not exists receipts
|
|||||||
|
|
||||||
create index if not exists receipts_msg_state_index
|
create index if not exists receipts_msg_state_index
|
||||||
on receipts (msg, state);
|
on receipts (msg, state);
|
||||||
/*
|
|
||||||
create table if not exists miner_heads
|
create table if not exists miner_sectors
|
||||||
(
|
(
|
||||||
head text not null,
|
miner_id text not null,
|
||||||
addr text not null,
|
sector_id bigint not null,
|
||||||
stateroot text not null,
|
|
||||||
sectorset text not null,
|
activation_epoch bigint not null,
|
||||||
setsize decimal not null,
|
expiration_epoch bigint not null,
|
||||||
provingset text not null,
|
termination_epoch bigint,
|
||||||
provingsize decimal not null,
|
|
||||||
owner text not null,
|
deal_weight text not null,
|
||||||
worker text not null,
|
verified_deal_weight text not null,
|
||||||
peerid text not null,
|
seal_cid text not null,
|
||||||
sectorsize bigint not null,
|
seal_rand_epoch bigint not null,
|
||||||
power decimal not null,
|
constraint miner_sectors_pk
|
||||||
active bool,
|
primary key (miner_id, sector_id)
|
||||||
ppe bigint not null,
|
|
||||||
slashed_at bigint not null,
|
|
||||||
constraint miner_heads_pk
|
|
||||||
primary key (head, addr)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
create index if not exists miner_heads_stateroot_index
|
create index if not exists miner_sectors_miner_sectorid_index
|
||||||
on miner_heads (stateroot);
|
on miner_sectors (miner_id, sector_id);
|
||||||
|
|
||||||
|
create table if not exists miner_info
|
||||||
|
(
|
||||||
|
miner_id text not null,
|
||||||
|
owner_addr text not null,
|
||||||
|
worker_addr text not null,
|
||||||
|
peer_id text,
|
||||||
|
sector_size text not null,
|
||||||
|
|
||||||
|
precommit_deposits text not null,
|
||||||
|
locked_funds text not null,
|
||||||
|
next_deadline_process_faults bigint not null,
|
||||||
|
constraint miner_info_pk
|
||||||
|
primary key (miner_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
/* used to tell when a miners sectors (proven-not-yet-expired) changed if the miner_sectors_cid's are different a new sector was added or removed (terminated/expired) */
|
||||||
|
create table if not exists miner_sectors_heads
|
||||||
|
(
|
||||||
|
miner_id text not null,
|
||||||
|
miner_sectors_cid text not null,
|
||||||
|
|
||||||
|
state_root text not null,
|
||||||
|
|
||||||
|
constraint miner_sectors_heads_pk
|
||||||
|
primary key (miner_id,miner_sectors_cid)
|
||||||
|
|
||||||
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
create or replace function miner_tips(epoch bigint)
|
create or replace function miner_tips(epoch bigint)
|
||||||
returns table (head text,
|
returns table (head text,
|
||||||
addr text,
|
addr text,
|
||||||
@ -456,54 +493,261 @@ func (st *storage) storeActors(actors map[address.Address]map[types.Actor]actorI
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *storage) storeMiners(miners map[minerKey]*minerInfo) error {
|
type storeSectorsAPI interface {
|
||||||
/*tx, err := st.db.Begin()
|
StateMinerSectors(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error)
|
||||||
if err != nil {
|
}
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := tx.Exec(`
|
func (st *storage) storeSectors(minerTips map[types.TipSetKey][]*minerStateInfo, sectorApi storeSectorsAPI) error {
|
||||||
|
tx, err := st.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
create temp table mh (like miner_heads excluding constraints) on commit drop;
|
if _, err := tx.Exec(`create temp table ms (like miner_sectors excluding constraints) on commit drop;`); err != nil {
|
||||||
|
return xerrors.Errorf("prep temp: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`copy ms (miner_id, sector_id, activation_epoch, expiration_epoch, deal_weight, verified_deal_weight, seal_cid, seal_rand_epoch) from STDIN`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
`); err != nil {
|
for tipset, miners := range minerTips {
|
||||||
return xerrors.Errorf("prep temp: %w", err)
|
for _, miner := range miners {
|
||||||
}
|
sectors, err := sectorApi.StateMinerSectors(context.TODO(), miner.addr, nil, true, tipset)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugw("Failed to load sectors", "tipset", tipset.String(), "miner", miner.addr.String(), "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
stmt, err := tx.Prepare(`copy mh (head, addr, stateroot, sectorset, setsize, provingset, provingsize, owner, worker, peerid, sectorsize, power, ppe) from STDIN`)
|
for _, sector := range sectors {
|
||||||
if err != nil {
|
if _, err := stmt.Exec(
|
||||||
return err
|
miner.addr.String(),
|
||||||
}
|
uint64(sector.ID),
|
||||||
for k, i := range miners {
|
int64(sector.Info.ActivationEpoch),
|
||||||
if _, err := stmt.Exec(
|
int64(sector.Info.Info.Expiration),
|
||||||
k.act.Head.String(),
|
sector.Info.DealWeight.String(),
|
||||||
k.addr.String(),
|
sector.Info.VerifiedDealWeight.String(),
|
||||||
k.stateroot.String(),
|
sector.Info.Info.SealedCID.String(),
|
||||||
i.state.Sectors.String(),
|
int64(sector.Info.Info.SealRandEpoch),
|
||||||
fmt.Sprint(i.ssize),
|
); err != nil {
|
||||||
i.state.ProvingSet.String(),
|
return err
|
||||||
fmt.Sprint(i.psize),
|
}
|
||||||
i.info.Owner.String(),
|
|
||||||
i.info.Worker.String(),
|
|
||||||
i.info.PeerId.String(),
|
|
||||||
i.info.SectorSize,
|
|
||||||
i.power.String(), // TODO: SPA
|
|
||||||
i.state.PoStState.ProvingPeriodStart,
|
|
||||||
); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := stmt.Close(); err != nil {
|
}
|
||||||
|
|
||||||
|
if err := stmt.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := tx.Exec(`insert into miner_sectors select * from ms on conflict do nothing `); err != nil {
|
||||||
|
return xerrors.Errorf("actor put: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *storage) storeMiners(minerTips map[types.TipSetKey][]*minerStateInfo) error {
|
||||||
|
tx, err := st.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := tx.Exec(`create temp table mi (like miner_info excluding constraints) on commit drop;`); err != nil {
|
||||||
|
return xerrors.Errorf("prep temp: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`copy mi (miner_id, owner_addr, worker_addr, peer_id, sector_size, precommit_deposits, locked_funds, next_deadline_process_faults) from STDIN`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for ts, miners := range minerTips {
|
||||||
|
for _, miner := range miners {
|
||||||
|
var pid string
|
||||||
|
if len(miner.info.PeerId) != 0 {
|
||||||
|
peerid, err := peer.IDFromBytes(miner.info.PeerId)
|
||||||
|
if err != nil {
|
||||||
|
// this should "never happen", but if it does we should still store info about the miner.
|
||||||
|
log.Warnw("failed to decode peerID", "peerID (bytes)", miner.info.PeerId, "miner", miner.addr, "tipset", ts.String())
|
||||||
|
} else {
|
||||||
|
pid = peerid.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, err := stmt.Exec(
|
||||||
|
miner.addr.String(),
|
||||||
|
miner.info.Owner.String(),
|
||||||
|
miner.info.Worker.String(),
|
||||||
|
pid,
|
||||||
|
miner.info.SectorSize.ShortString(),
|
||||||
|
miner.state.PreCommitDeposits.String(),
|
||||||
|
miner.state.LockedFunds.String(),
|
||||||
|
miner.state.NextDeadlineToProcessFaults,
|
||||||
|
); err != nil {
|
||||||
|
log.Errorw("failed to store miner state", "state", miner.state, "info", miner.info, "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := stmt.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := tx.Exec(`insert into miner_info select * from mi on conflict do nothing `); err != nil {
|
||||||
|
return xerrors.Errorf("actor put: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
type minerSectorUpdate struct {
|
||||||
|
minerState *minerStateInfo
|
||||||
|
tskey types.TipSetKey
|
||||||
|
oldSector cid.Cid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *storage) storeMinerSectorsHeads(minerTips map[types.TipSetKey][]*minerStateInfo, api api.FullNode) error {
|
||||||
|
tx, err := st.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := tx.Exec(`create temp table msh (like miner_sectors_heads excluding constraints) on commit drop;`); err != nil {
|
||||||
|
return xerrors.Errorf("prep temp: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`copy msh (miner_id, miner_sectors_cid, state_root) from STDIN`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var updateMiners []*minerSectorUpdate
|
||||||
|
for tsk, miners := range minerTips {
|
||||||
|
for _, miner := range miners {
|
||||||
|
sectorCID, err := st.getLatestMinerSectorCID(context.TODO(), miner.addr)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if sectorCID == cid.Undef {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, found := st.minerSectors[sectorCID]; !found {
|
||||||
|
// schedule miner table update
|
||||||
|
updateMiners = append(updateMiners, &minerSectorUpdate{
|
||||||
|
minerState: miner,
|
||||||
|
tskey: tsk,
|
||||||
|
oldSector: sectorCID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
st.minerSectors[sectorCID] = struct{}{}
|
||||||
|
log.Debugw("got sector CID", "miner", miner.addr, "cid", sectorCID.String())
|
||||||
|
if _, err := stmt.Exec(
|
||||||
|
miner.addr.String(),
|
||||||
|
miner.state.Sectors.String(),
|
||||||
|
miner.stateroot.String(),
|
||||||
|
); err != nil {
|
||||||
|
log.Errorw("failed to store miners sectors head", "state", miner.state, "info", miner.info, "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := stmt.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := tx.Exec(`insert into miner_sectors_heads select * from msh on conflict do nothing `); err != nil {
|
||||||
|
return xerrors.Errorf("actor put: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return st.updateMinerSectors(updateMiners, api)
|
||||||
|
}
|
||||||
|
|
||||||
|
type deletedSector struct {
|
||||||
|
deletedSector miner_spec.SectorOnChainInfo
|
||||||
|
miner address.Address
|
||||||
|
tskey types.TipSetKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *storage) updateMinerSectors(miners []*minerSectorUpdate, api api.FullNode) error {
|
||||||
|
log.Info("updating miners constant sector table")
|
||||||
|
var deletedSectors []*deletedSector
|
||||||
|
for _, miner := range miners {
|
||||||
|
s := &apiIpldStore{context.TODO(), api}
|
||||||
|
newSectors, err := adt.AsArray(s, miner.minerState.state.Sectors)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnw("new sectors as array", "error", err, "cid", miner.minerState.state.Sectors)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tx.Exec(`insert into miner_heads select * from mh on conflict do nothing `); err != nil {
|
oldSectors, err := adt.AsArray(s, miner.oldSector)
|
||||||
return xerrors.Errorf("actor put: %w", err)
|
if err != nil {
|
||||||
|
log.Warnw("old sectors as array", "error", err, "cid", miner.oldSector.String())
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return tx.Commit()*/
|
var oldSecInfo miner_spec.SectorOnChainInfo
|
||||||
return nil
|
var newSecInfo miner_spec.SectorOnChainInfo
|
||||||
|
// if we cannot find an old sector in the new list then it was removed.
|
||||||
|
if err := oldSectors.ForEach(&oldSecInfo, func(i int64) error {
|
||||||
|
found, err := newSectors.Get(uint64(oldSecInfo.Info.SectorNumber), &newSecInfo)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnw("new sectors get", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
log.Infow("MINER DELETED SECTOR", "miner", miner.minerState.addr.String(), "sector", oldSecInfo.Info.SectorNumber, "tipset", miner.tskey.String())
|
||||||
|
deletedSectors = append(deletedSectors, &deletedSector{
|
||||||
|
deletedSector: oldSecInfo,
|
||||||
|
miner: miner.minerState.addr,
|
||||||
|
tskey: miner.tskey,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
log.Warnw("old sectors foreach", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(deletedSectors) > 0 {
|
||||||
|
log.Infow("Calculated updates", "miner", miner.minerState.addr, "deleted sectors", len(deletedSectors))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// now we have all the sectors that were removed, update the database
|
||||||
|
tx, err := st.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stmt, err := tx.Prepare(`UPDATE miner_sectors SET termination_epoch=$1 WHERE miner_id=$2 AND sector_id=$3`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, ds := range deletedSectors {
|
||||||
|
ts, err := api.ChainGetTipSet(context.TODO(), ds.tskey)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnw("get tipset", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// TODO validate this shits right
|
||||||
|
if ts.Height() >= ds.deletedSector.Info.Expiration {
|
||||||
|
// means it expired, do nothing
|
||||||
|
log.Infow("expired sector", "miner", ds.miner.String(), "sector", ds.deletedSector.Info.SectorNumber)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.Infow("terminated sector", "miner", ds.miner.String(), "sector", ds.deletedSector.Info.SectorNumber)
|
||||||
|
// means it was terminated.
|
||||||
|
if _, err := stmt.Exec(int64(ts.Height()), ds.miner.String(), int64(ds.deletedSector.Info.SectorNumber)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := stmt.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer log.Info("update miner sectors complete")
|
||||||
|
return tx.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *storage) storeHeaders(bhs map[cid.Cid]*types.BlockHeader, sync bool) error {
|
func (st *storage) storeHeaders(bhs map[cid.Cid]*types.BlockHeader, sync bool) error {
|
||||||
@ -1008,3 +1252,27 @@ func (st *storage) refreshViews() error {
|
|||||||
func (st *storage) close() error {
|
func (st *storage) close() error {
|
||||||
return st.db.Close()
|
return st.db.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (st *storage) getLatestMinerSectorCID(ctx context.Context, miner address.Address) (cid.Cid, error) {
|
||||||
|
queryStr := fmt.Sprintf(`
|
||||||
|
SELECT miner_sectors_cid
|
||||||
|
FROM miner_sectors_heads
|
||||||
|
LEFT JOIN blocks ON miner_sectors_heads.state_root = blocks.parentstateroot
|
||||||
|
WHERE miner_id = '%s'
|
||||||
|
ORDER BY blocks.height DESC
|
||||||
|
LIMIT 1;
|
||||||
|
`,
|
||||||
|
miner.String())
|
||||||
|
|
||||||
|
var cidstr string
|
||||||
|
err := st.db.QueryRowContext(ctx, queryStr).Scan(&cidstr)
|
||||||
|
switch {
|
||||||
|
case err == sql.ErrNoRows:
|
||||||
|
log.Warnf("no miner with miner_id: %s in table", miner)
|
||||||
|
return cid.Undef, nil
|
||||||
|
case err != nil:
|
||||||
|
return cid.Undef, err
|
||||||
|
default:
|
||||||
|
return cid.Decode(cidstr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -5,17 +5,21 @@ import (
|
|||||||
"container/list"
|
"container/list"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
@ -49,20 +53,21 @@ func runSyncer(ctx context.Context, api api.FullNode, st *storage, maxBatch int)
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
type minerKey struct {
|
type minerStateInfo struct {
|
||||||
|
// common
|
||||||
addr address.Address
|
addr address.Address
|
||||||
act types.Actor
|
act types.Actor
|
||||||
stateroot cid.Cid
|
stateroot cid.Cid
|
||||||
tsKey types.TipSetKey
|
|
||||||
}
|
|
||||||
|
|
||||||
type minerInfo struct {
|
// miner specific
|
||||||
state miner.State
|
state miner.State
|
||||||
info miner.MinerInfo
|
info miner.MinerInfo
|
||||||
|
|
||||||
power big.Int
|
// tracked by power actor
|
||||||
ssize uint64
|
rawPower big.Int
|
||||||
psize uint64
|
qalPower big.Int
|
||||||
|
ssize uint64
|
||||||
|
psize uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
type actorInfo struct {
|
type actorInfo struct {
|
||||||
@ -80,25 +85,28 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.
|
|||||||
|
|
||||||
log.Infof("Getting headers / actors")
|
log.Infof("Getting headers / actors")
|
||||||
|
|
||||||
|
// global list of all blocks that need to be synced
|
||||||
allToSync := map[cid.Cid]*types.BlockHeader{}
|
allToSync := map[cid.Cid]*types.BlockHeader{}
|
||||||
|
// a stack
|
||||||
toVisit := list.New()
|
toVisit := list.New()
|
||||||
|
|
||||||
for _, header := range headTs.Blocks() {
|
for _, header := range headTs.Blocks() {
|
||||||
toVisit.PushBack(header)
|
toVisit.PushBack(header)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO consider making a db query to check where syncing left off at in the case of a restart and avoid reprocessing
|
||||||
|
// those entries, or write value to file on shutdown
|
||||||
|
// walk the entire chain starting from headTS
|
||||||
for toVisit.Len() > 0 {
|
for toVisit.Len() > 0 {
|
||||||
bh := toVisit.Remove(toVisit.Back()).(*types.BlockHeader)
|
bh := toVisit.Remove(toVisit.Back()).(*types.BlockHeader)
|
||||||
|
|
||||||
_, has := hazlist[bh.Cid()]
|
_, has := hazlist[bh.Cid()]
|
||||||
if _, seen := allToSync[bh.Cid()]; seen || has {
|
if _, seen := allToSync[bh.Cid()]; seen || has {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
allToSync[bh.Cid()] = bh
|
allToSync[bh.Cid()] = bh
|
||||||
|
|
||||||
if len(allToSync)%500 == 10 {
|
if len(allToSync)%500 == 10 {
|
||||||
log.Infof("todo: (%d) %s @%d", len(allToSync), bh.Cid(), bh.Height)
|
log.Debugf("to visit: (%d) %s @%d", len(allToSync), bh.Cid(), bh.Height)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(bh.Parents) == 0 {
|
if len(bh.Parents) == 0 {
|
||||||
@ -116,17 +124,25 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Main worker loop, this loop runs until all tipse from headTS to genesis have been processed.
|
||||||
for len(allToSync) > 0 {
|
for len(allToSync) > 0 {
|
||||||
|
// first map is addresses -> common actors states (head, code, balance, nonce)
|
||||||
|
// second map common actor states -> chain state (tipset, stateroot) & unique actor state (deserialization of their head CID) represented as json.
|
||||||
actors := map[address.Address]map[types.Actor]actorInfo{}
|
actors := map[address.Address]map[types.Actor]actorInfo{}
|
||||||
|
|
||||||
|
// map of actor public key address to ID address
|
||||||
addressToID := map[address.Address]address.Address{}
|
addressToID := map[address.Address]address.Address{}
|
||||||
minH := abi.ChainEpoch(math.MaxInt64)
|
minH := abi.ChainEpoch(math.MaxInt64)
|
||||||
|
|
||||||
|
// find the blockheader with the lowest height
|
||||||
for _, header := range allToSync {
|
for _, header := range allToSync {
|
||||||
if header.Height < minH {
|
if header.Height < minH {
|
||||||
minH = header.Height
|
minH = header.Height
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// toSync maps block cids to their headers and contains all block headers that will be synced in this batch
|
||||||
|
// `maxBatch` is a tunable parameter to control how many blocks we sync per iteration.
|
||||||
toSync := map[cid.Cid]*types.BlockHeader{}
|
toSync := map[cid.Cid]*types.BlockHeader{}
|
||||||
for c, header := range allToSync {
|
for c, header := range allToSync {
|
||||||
if header.Height < minH+abi.ChainEpoch(maxBatch) {
|
if header.Height < minH+abi.ChainEpoch(maxBatch) {
|
||||||
@ -134,12 +150,16 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.
|
|||||||
addressToID[header.Miner] = address.Undef
|
addressToID[header.Miner] = address.Undef
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// remove everything we are syncing this round from the global list of blocks to sync
|
||||||
for c := range toSync {
|
for c := range toSync {
|
||||||
delete(allToSync, c)
|
delete(allToSync, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Syncing %d blocks", len(toSync))
|
log.Infow("Starting Sync", "height", minH, "numBlocks", len(toSync), "maxBatch", maxBatch)
|
||||||
|
|
||||||
|
// map of addresses to changed actors
|
||||||
|
var changes map[string]types.Actor
|
||||||
|
// collect all actor state that has changes between block headers
|
||||||
paDone := 0
|
paDone := 0
|
||||||
parmap.Par(50, parmap.MapArr(toSync), func(bh *types.BlockHeader) {
|
parmap.Par(50, parmap.MapArr(toSync), func(bh *types.BlockHeader) {
|
||||||
paDone++
|
paDone++
|
||||||
@ -155,6 +175,8 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO suspicious there is not a lot to be gained by doing this in parallel since the genesis state
|
||||||
|
// is unlikely to contain a lot of actors, why not for loop here?
|
||||||
parmap.Par(50, aadrs, func(addr address.Address) {
|
parmap.Par(50, aadrs, func(addr address.Address) {
|
||||||
act, err := api.StateGetActor(ctx, addr, genesisTs.Key())
|
act, err := api.StateGetActor(ctx, addr, genesisTs.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -196,12 +218,15 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
changes, err := api.StateChangedActors(ctx, pts.ParentState(), bh.ParentStateRoot)
|
// TODO Does this return actors that have been deleted between states?
|
||||||
|
// collect all actors that had state changes between the blockheader parent-state and its grandparent-state.
|
||||||
|
changes, err = api.StateChangedActors(ctx, pts.ParentState(), bh.ParentStateRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// record the state of all actors that have changed
|
||||||
for a, act := range changes {
|
for a, act := range changes {
|
||||||
act := act
|
act := act
|
||||||
|
|
||||||
@ -229,6 +254,7 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.
|
|||||||
if !ok {
|
if !ok {
|
||||||
actors[addr] = map[types.Actor]actorInfo{}
|
actors[addr] = map[types.Actor]actorInfo{}
|
||||||
}
|
}
|
||||||
|
// a change occurred for the actor with address `addr` and state `act` at tipset `pts`.
|
||||||
actors[addr][act] = actorInfo{
|
actors[addr][act] = actorInfo{
|
||||||
stateroot: bh.ParentStateRoot,
|
stateroot: bh.ParentStateRoot,
|
||||||
state: string(state),
|
state: string(state),
|
||||||
@ -239,6 +265,11 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// map of tipset to all miners that had a head-change at that tipset.
|
||||||
|
minerTips := make(map[types.TipSetKey][]*minerStateInfo, len(changes))
|
||||||
|
// heads we've seen, im being paranoid
|
||||||
|
headsSeen := make(map[cid.Cid]struct{}, len(actors))
|
||||||
|
|
||||||
log.Infof("Getting messages")
|
log.Infof("Getting messages")
|
||||||
|
|
||||||
msgs, incls := fetchMessages(ctx, api, toSync)
|
msgs, incls := fetchMessages(ctx, api, toSync)
|
||||||
@ -265,57 +296,90 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.
|
|||||||
|
|
||||||
log.Infof("Getting miner info")
|
log.Infof("Getting miner info")
|
||||||
|
|
||||||
miners := map[minerKey]*minerInfo{}
|
minerChanges := 0
|
||||||
|
|
||||||
for addr, m := range actors {
|
for addr, m := range actors {
|
||||||
for actor, c := range m {
|
for actor, c := range m {
|
||||||
if actor.Code != builtin.StorageMinerActorCodeID {
|
if actor.Code != builtin.StorageMinerActorCodeID {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
miners[minerKey{
|
// only want miner actors with head change events
|
||||||
|
if _, found := headsSeen[actor.Head]; found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
minerChanges++
|
||||||
|
|
||||||
|
minerTips[c.tsKey] = append(minerTips[c.tsKey], &minerStateInfo{
|
||||||
addr: addr,
|
addr: addr,
|
||||||
act: actor,
|
act: actor,
|
||||||
stateroot: c.stateroot,
|
stateroot: c.stateroot,
|
||||||
tsKey: c.tsKey,
|
|
||||||
}] = &minerInfo{}
|
state: miner.State{},
|
||||||
|
info: miner.MinerInfo{},
|
||||||
|
|
||||||
|
rawPower: big.Zero(),
|
||||||
|
qalPower: big.Zero(),
|
||||||
|
})
|
||||||
|
|
||||||
|
headsSeen[actor.Head] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
parmap.Par(50, parmap.KVMapArr(miners), func(it func() (minerKey, *minerInfo)) {
|
minerProcessingStartedAt := time.Now()
|
||||||
k, info := it()
|
log.Infow("Processing miners", "numTips", len(minerTips), "numMinerChanges", minerChanges)
|
||||||
|
// extract the power actor state at each tipset, loop over all miners that changed at said tipset and extract their
|
||||||
|
// claims from the power actor state. This ensures we only fetch the power actors state once for each tipset.
|
||||||
|
parmap.Par(50, parmap.KVMapArr(minerTips), func(it func() (types.TipSetKey, []*minerStateInfo)) {
|
||||||
|
tsKey, minerInfo := it()
|
||||||
|
|
||||||
// TODO: get the storage power actors state and and pull the miner power from there, currently this hits the
|
// get the power actors claims map
|
||||||
// storage power actor once for each miner for each tipset, we can do better by just getting it for each tipset
|
mp, err := getPowerActorClaimsMap(ctx, api, tsKey)
|
||||||
// and reading each miner power from the result.
|
|
||||||
pow, err := api.StateMinerPower(ctx, k.addr, k.tsKey)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
// Not sure why this would fail, but its probably worth continuing
|
|
||||||
}
|
|
||||||
info.power = pow.MinerPower.QualityAdjPower
|
|
||||||
|
|
||||||
sszs, err := api.StateMinerSectorCount(ctx, k.addr, k.tsKey)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
info.psize = sszs.Pset
|
// Get miner raw and quality power
|
||||||
info.ssize = sszs.Sset
|
for _, mi := range minerInfo {
|
||||||
|
var claim power.Claim
|
||||||
|
// get miner claim from power actors claim map and store if found, else the miner had no claim at
|
||||||
|
// this tipset
|
||||||
|
found, err := mp.Get(adt.AddrKey(mi.addr), &claim)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
}
|
||||||
|
if found {
|
||||||
|
mi.qalPower = claim.QualityAdjPower
|
||||||
|
mi.rawPower = claim.RawBytePower
|
||||||
|
}
|
||||||
|
|
||||||
astb, err := api.ChainReadObj(ctx, k.act.Head)
|
// Get the miner state info
|
||||||
if err != nil {
|
astb, err := api.ChainReadObj(ctx, mi.act.Head)
|
||||||
log.Error(err)
|
if err != nil {
|
||||||
return
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := mi.state.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
mi.info = mi.state.Info
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := info.state.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
|
// TODO Get the Sector Count
|
||||||
log.Error(err)
|
// FIXME this is returning a lot of "address not found" errors, which is strange given that StateChangedActors
|
||||||
return
|
// retruns all actors that had a state change at tipset `k.tsKey`, maybe its returning deleted miners too??
|
||||||
}
|
/*
|
||||||
|
sszs, err := api.StateMinerSectorCount(ctx, k.addr, k.tsKey)
|
||||||
info.info = info.state.Info
|
if err != nil {
|
||||||
|
info.psize = 0
|
||||||
|
info.ssize = 0
|
||||||
|
} else {
|
||||||
|
info.psize = sszs.Pset
|
||||||
|
info.ssize = sszs.Sset
|
||||||
|
}
|
||||||
|
*/
|
||||||
})
|
})
|
||||||
|
log.Infow("Completed Miner Processing", "duration", time.Since(minerProcessingStartedAt).String(), "processed", minerChanges)
|
||||||
|
|
||||||
log.Info("Getting receipts")
|
log.Info("Getting receipts")
|
||||||
|
|
||||||
@ -343,8 +407,21 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Storing miners")
|
log.Info("Storing miners")
|
||||||
|
if err := st.storeMiners(minerTips); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err := st.storeMiners(miners); err != nil {
|
log.Info("Storing miner sectors")
|
||||||
|
sectorStart := time.Now()
|
||||||
|
if err := st.storeSectors(minerTips, api); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Infow("Finished storing miner sectors", "duration", time.Since(sectorStart).String())
|
||||||
|
|
||||||
|
log.Info("Storing miner sectors heads")
|
||||||
|
if err := st.storeMinerSectorsHeads(minerTips, api); err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -465,3 +542,55 @@ func fetchParentReceipts(ctx context.Context, api api.FullNode, toSync map[cid.C
|
|||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// load the power actor state clam as an adt.Map at the tipset `ts`.
|
||||||
|
func getPowerActorClaimsMap(ctx context.Context, api api.FullNode, ts types.TipSetKey) (*adt.Map, error) {
|
||||||
|
powerActor, err := api.StateGetActor(ctx, builtin.StoragePowerActorAddr, ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
powerRaw, err := api.ChainReadObj(ctx, powerActor.Head)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var powerActorState power.State
|
||||||
|
if err := powerActorState.UnmarshalCBOR(bytes.NewReader(powerRaw)); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal power actor state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := &apiIpldStore{ctx, api}
|
||||||
|
return adt.AsMap(s, powerActorState.Claims)
|
||||||
|
}
|
||||||
|
|
||||||
|
// require for AMT and HAMT access
|
||||||
|
// TODO extract this to a common location in lotus and reuse the code
|
||||||
|
type apiIpldStore struct {
|
||||||
|
ctx context.Context
|
||||||
|
api api.FullNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *apiIpldStore) Context() context.Context {
|
||||||
|
return ht.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *apiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
|
||||||
|
raw, err := ht.api.ChainReadObj(ctx, c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cu, ok := out.(cbg.CBORUnmarshaler)
|
||||||
|
if ok {
|
||||||
|
if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Object does not implement CBORUnmarshaler: %T", out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *apiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
|
||||||
|
return cid.Undef, fmt.Errorf("Put is not implemented on apiIpldStore")
|
||||||
|
}
|
||||||
|
@ -63,7 +63,7 @@ var watchHeadCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
Name: "interval",
|
Name: "interval",
|
||||||
Value: build.BlockDelay,
|
Value: int(build.BlockDelaySecs),
|
||||||
Usage: "interval in seconds between chain head checks",
|
Usage: "interval in seconds between chain head checks",
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
@ -72,8 +72,9 @@ var watchHeadCmd = &cli.Command{
|
|||||||
Usage: "systemd unit name to restart on health check failure",
|
Usage: "systemd unit name to restart on health check failure",
|
||||||
},
|
},
|
||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
Name: "api-timeout",
|
Name: "api-timeout",
|
||||||
Value: build.BlockDelay,
|
// TODO: this default value seems spurious.
|
||||||
|
Value: int(build.BlockDelaySecs),
|
||||||
Usage: "timeout between API retries",
|
Usage: "timeout between API retries",
|
||||||
},
|
},
|
||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
@ -236,7 +237,7 @@ func waitForSyncComplete(ctx context.Context, a api.FullNode, r int, t time.Dura
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if time.Now().Unix()-int64(head.MinTimestamp()) < build.BlockDelay {
|
if time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,7 @@ var genesisAddMinerCmd = &cli.Command{
|
|||||||
log.Infof("Giving %s some initial balance", miner.Owner)
|
log.Infof("Giving %s some initial balance", miner.Owner)
|
||||||
template.Accounts = append(template.Accounts, genesis.Actor{
|
template.Accounts = append(template.Accounts, genesis.Actor{
|
||||||
Type: genesis.TAccount,
|
Type: genesis.TAccount,
|
||||||
Balance: big.Mul(big.NewInt(50_000_000), big.NewInt(build.FilecoinPrecision)),
|
Balance: big.Mul(big.NewInt(50_000_000), big.NewInt(int64(build.FilecoinPrecision))),
|
||||||
Meta: (&genesis.AccountMeta{Owner: miner.Owner}).ActorMeta(),
|
Meta: (&genesis.AccountMeta{Owner: miner.Owner}).ActorMeta(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -11,77 +13,356 @@ import (
|
|||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
"github.com/libp2p/go-libp2p-core/crypto"
|
||||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/lp2p"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
|
|
||||||
|
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||||
|
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
||||||
)
|
)
|
||||||
|
|
||||||
type walletInfo struct {
|
var validTypes = []string{wallet.KTBLS, wallet.KTSecp256k1, lp2p.KTLibp2pHost}
|
||||||
|
|
||||||
|
type keyInfoOutput struct {
|
||||||
Type string
|
Type string
|
||||||
Address string
|
Address string
|
||||||
PublicKey string
|
PublicKey string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wi walletInfo) String() string {
|
var keyinfoCmd = &cli.Command{
|
||||||
bs, _ := json.Marshal(wi)
|
Name: "keyinfo",
|
||||||
return string(bs)
|
Usage: "work with lotus keyinfo files (wallets and libp2p host keys)",
|
||||||
|
Description: `The subcommands of keyinfo provide helpful tools for working with keyinfo files without
|
||||||
|
having to run the lotus daemon.`,
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
keyinfoNewCmd,
|
||||||
|
keyinfoInfoCmd,
|
||||||
|
keyinfoImportCmd,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var keyinfoCmd = &cli.Command{
|
var keyinfoImportCmd = &cli.Command{
|
||||||
Name: "keyinfo",
|
Name: "import",
|
||||||
Description: "decode a keyinfo",
|
Usage: "import a keyinfo file into a lotus repository",
|
||||||
|
Description: `The import command provides a way to import keyfiles into a lotus repository
|
||||||
|
without running the daemon.
|
||||||
|
|
||||||
|
Note: The LOTUS_PATH directory must be created. This command will not create this directory for you.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
|
||||||
|
env LOTUS_PATH=/var/lib/lotus lotus-shed keyinfo import libp2p-host.keyinfo`,
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
flagRepo := cctx.String("repo")
|
||||||
|
|
||||||
|
var input io.Reader
|
||||||
|
if cctx.Args().Len() == 0 {
|
||||||
|
input = os.Stdin
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
input, err = os.Open(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
encoded, err := ioutil.ReadAll(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
decoded, err := hex.DecodeString(strings.TrimSpace(string(encoded)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var keyInfo types.KeyInfo
|
||||||
|
if err := json.Unmarshal(decoded, &keyInfo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fsrepo, err := repo.NewFS(flagRepo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lkrepo, err := fsrepo.Lock(repo.FullNode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer lkrepo.Close()
|
||||||
|
|
||||||
|
keystore, err := lkrepo.KeyStore()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch keyInfo.Type {
|
||||||
|
case lp2p.KTLibp2pHost:
|
||||||
|
if err := keystore.Put(lp2p.KLibp2pHost, keyInfo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sk, err := crypto.UnmarshalPrivateKey(keyInfo.PrivateKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
peerid, err := peer.IDFromPrivateKey(sk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s\n", peerid.String())
|
||||||
|
|
||||||
|
break
|
||||||
|
case wallet.KTSecp256k1:
|
||||||
|
case wallet.KTBLS:
|
||||||
|
w, err := wallet.NewWallet(keystore)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, err := w.Import(&keyInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s\n", addr.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var keyinfoInfoCmd = &cli.Command{
|
||||||
|
Name: "info",
|
||||||
|
Usage: "print information about a keyinfo file",
|
||||||
|
Description: `The info command prints additional information about a key which can't easily
|
||||||
|
be retrieved by inspecting the file itself.
|
||||||
|
|
||||||
|
The 'format' flag takes a golang text/template template as its value.
|
||||||
|
|
||||||
|
The following fields can be retrived through this command
|
||||||
|
Type
|
||||||
|
Address
|
||||||
|
PublicKey
|
||||||
|
|
||||||
|
The PublicKey value will be printed base64 encoded using golangs StdEncoding
|
||||||
|
|
||||||
|
Examples
|
||||||
|
|
||||||
|
Retreive the address of a lotus wallet
|
||||||
|
lotus-shed keyinfo info --format '{{ .Address }}' wallet.keyinfo
|
||||||
|
`,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "format",
|
Name: "format",
|
||||||
Value: "{{.Address}}",
|
Value: "{{ .Type }} {{ .Address }}",
|
||||||
Usage: "Format to output",
|
Usage: "specify which output columns to print",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
format := cctx.String("format")
|
format := cctx.String("format")
|
||||||
|
|
||||||
var input io.Reader
|
var input io.Reader
|
||||||
|
|
||||||
if cctx.Args().Len() == 0 {
|
if cctx.Args().Len() == 0 {
|
||||||
input = os.Stdin
|
input = os.Stdin
|
||||||
} else {
|
} else {
|
||||||
input = strings.NewReader(cctx.Args().First())
|
var err error
|
||||||
|
input, err = os.Open(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes, err := ioutil.ReadAll(input)
|
encoded, err := ioutil.ReadAll(input)
|
||||||
|
|
||||||
data, err := hex.DecodeString(strings.TrimSpace(string(bytes)))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var ki types.KeyInfo
|
decoded, err := hex.DecodeString(strings.TrimSpace(string(encoded)))
|
||||||
if err := json.Unmarshal(data, &ki); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := wallet.NewKey(ki)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
bs, err := json.Marshal(key)
|
var keyInfo types.KeyInfo
|
||||||
|
if err := json.Unmarshal(decoded, &keyInfo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var kio keyInfoOutput
|
||||||
|
|
||||||
|
switch keyInfo.Type {
|
||||||
|
case lp2p.KTLibp2pHost:
|
||||||
|
kio.Type = keyInfo.Type
|
||||||
|
|
||||||
|
sk, err := crypto.UnmarshalPrivateKey(keyInfo.PrivateKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pk := sk.GetPublic()
|
||||||
|
|
||||||
|
peerid, err := peer.IDFromPrivateKey(sk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pkBytes, err := pk.Raw()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
kio.Address = peerid.String()
|
||||||
|
kio.PublicKey = base64.StdEncoding.EncodeToString(pkBytes)
|
||||||
|
|
||||||
|
break
|
||||||
|
case wallet.KTSecp256k1:
|
||||||
|
case wallet.KTBLS:
|
||||||
|
kio.Type = keyInfo.Type
|
||||||
|
|
||||||
|
key, err := wallet.NewKey(keyInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
kio.Address = key.Address.String()
|
||||||
|
kio.PublicKey = base64.StdEncoding.EncodeToString(key.PublicKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpl, err := template.New("output").Parse(format)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var wi walletInfo
|
return tmpl.Execute(os.Stdout, kio)
|
||||||
if err := json.Unmarshal(bs, &wi); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl, err := template.New("").Parse(format)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tmpl.Execute(os.Stdout, wi)
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var keyinfoNewCmd = &cli.Command{
|
||||||
|
Name: "new",
|
||||||
|
Usage: "create a new keyinfo file of the provided type",
|
||||||
|
ArgsUsage: "[bls|secp256k1|libp2p-host]",
|
||||||
|
Description: `Keyinfo files are base16 encoded json structures containing a type
|
||||||
|
string value, and a base64 encoded private key.
|
||||||
|
|
||||||
|
Both the bls and secp256k1 keyfiles can be imported into a running lotus daemon using
|
||||||
|
the 'lotus wallet import' command. Or imported to a non-running / unitialized repo using
|
||||||
|
the 'lotus-shed keyinfo import' command. Libp2p host keys can only be imported using lotus-shed
|
||||||
|
as lotus itself does not provide this functionality at the moment.`,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "output",
|
||||||
|
Value: "<type>-<addr>.keyinfo",
|
||||||
|
Usage: "output file formt",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "silent",
|
||||||
|
Value: false,
|
||||||
|
Usage: "do not print the address to stdout",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if !cctx.Args().Present() {
|
||||||
|
return fmt.Errorf("please specify a type to generate")
|
||||||
|
}
|
||||||
|
|
||||||
|
keyType := cctx.Args().First()
|
||||||
|
flagOutput := cctx.String("output")
|
||||||
|
|
||||||
|
if i := SliceIndex(len(validTypes), func(i int) bool {
|
||||||
|
if keyType == validTypes[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}); i == -1 {
|
||||||
|
return fmt.Errorf("invalid key type argument provided '%s'", keyType)
|
||||||
|
}
|
||||||
|
|
||||||
|
keystore := wallet.NewMemKeyStore()
|
||||||
|
|
||||||
|
var keyAddr string
|
||||||
|
var keyInfo types.KeyInfo
|
||||||
|
|
||||||
|
switch keyType {
|
||||||
|
case lp2p.KTLibp2pHost:
|
||||||
|
sk, err := lp2p.PrivKey(keystore)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ki, err := keystore.Get(lp2p.KLibp2pHost)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
peerid, err := peer.IDFromPrivateKey(sk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
keyAddr = peerid.String()
|
||||||
|
keyInfo = ki
|
||||||
|
|
||||||
|
break
|
||||||
|
case wallet.KTSecp256k1:
|
||||||
|
case wallet.KTBLS:
|
||||||
|
key, err := wallet.GenerateKey(wallet.ActSigType(keyType))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
keyAddr = key.Address.String()
|
||||||
|
keyInfo = key.KeyInfo
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := flagOutput
|
||||||
|
filename = strings.ReplaceAll(filename, "<addr>", keyAddr)
|
||||||
|
filename = strings.ReplaceAll(filename, "<type>", keyType)
|
||||||
|
|
||||||
|
file, err := os.Create(filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err := file.Close(); err != nil {
|
||||||
|
log.Warnf("failed to close output file: %w", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
bytes, err := json.Marshal(keyInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
encoded := hex.EncodeToString(bytes)
|
||||||
|
if _, err := file.Write([]byte(encoded)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cctx.Bool("silent") {
|
||||||
|
fmt.Println(keyAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func SliceIndex(length int, fn func(i int) bool) int {
|
||||||
|
for i := 0; i < length; i++ {
|
||||||
|
if fn(i) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
@ -19,7 +19,6 @@ func main() {
|
|||||||
base16Cmd,
|
base16Cmd,
|
||||||
bitFieldCmd,
|
bitFieldCmd,
|
||||||
keyinfoCmd,
|
keyinfoCmd,
|
||||||
peerkeyCmd,
|
|
||||||
noncefix,
|
noncefix,
|
||||||
bigIntParseCmd,
|
bigIntParseCmd,
|
||||||
staterootStatsCmd,
|
staterootStatsCmd,
|
||||||
|
@ -1,101 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/lp2p"
|
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
|
||||||
)
|
|
||||||
|
|
||||||
type keystore struct {
|
|
||||||
set bool
|
|
||||||
info types.KeyInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ks *keystore) Put(name string, info types.KeyInfo) error {
|
|
||||||
ks.info = info
|
|
||||||
ks.set = true
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ks *keystore) Get(name string) (types.KeyInfo, error) {
|
|
||||||
if !ks.set {
|
|
||||||
return types.KeyInfo{}, types.ErrKeyInfoNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return ks.info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ks *keystore) Delete(name string) error {
|
|
||||||
panic("Implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ks *keystore) List() ([]string, error) {
|
|
||||||
panic("Implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
var peerkeyCmd = &cli.Command{
|
|
||||||
Name: "peerkey",
|
|
||||||
Description: "create libp2p host key",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
&cli.StringFlag{
|
|
||||||
Name: "output",
|
|
||||||
Value: "<peerid>.peerkey",
|
|
||||||
Usage: "Output file format",
|
|
||||||
},
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "silent",
|
|
||||||
Value: false,
|
|
||||||
Usage: "Do not print peerid at end",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Action: func(cctx *cli.Context) error {
|
|
||||||
output := cctx.String("output")
|
|
||||||
ks := keystore{}
|
|
||||||
|
|
||||||
sk, err := lp2p.PrivKey(&ks)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
bs, err := json.Marshal(ks.info)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
peerid, err := peer.IDFromPrivateKey(sk)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
output = strings.ReplaceAll(output, "<peerid>", peerid.String())
|
|
||||||
|
|
||||||
f, err := os.Create(output)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := f.Close(); err != nil {
|
|
||||||
log.Warnf("failed to close output file: %w", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _, err := f.Write(bs); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !cctx.Bool("silent") {
|
|
||||||
fmt.Println(peerid.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
@ -128,7 +128,7 @@ var infoCmd = &cli.Command{
|
|||||||
if expWinChance > 1 {
|
if expWinChance > 1 {
|
||||||
expWinChance = 1
|
expWinChance = 1
|
||||||
}
|
}
|
||||||
winRate := time.Duration(float64(time.Second*build.BlockDelay) / expWinChance)
|
winRate := time.Duration(float64(time.Second*time.Duration(build.BlockDelaySecs)) / expWinChance)
|
||||||
winPerDay := float64(time.Hour*24) / float64(winRate)
|
winPerDay := float64(time.Hour*24) / float64(winRate)
|
||||||
|
|
||||||
fmt.Print("Expected block win rate: ")
|
fmt.Print("Expected block win rate: ")
|
||||||
|
@ -50,33 +50,100 @@ func GetCidEncoder(cctx *cli.Context) (cidenc.Encoder, error) {
|
|||||||
return e, nil
|
return e, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var enableCmd = &cli.Command{
|
var storageDealSelectionCmd = &cli.Command{
|
||||||
Name: "enable",
|
Name: "selection",
|
||||||
Usage: "Configure the miner to consider storage deal proposals",
|
Usage: "Configure acceptance criteria for storage deal proposals",
|
||||||
Flags: []cli.Flag{},
|
Subcommands: []*cli.Command{
|
||||||
Action: func(cctx *cli.Context) error {
|
storageDealSelectionShowCmd,
|
||||||
api, closer, err := lcli.GetStorageMinerAPI(cctx)
|
storageDealSelectionResetCmd,
|
||||||
if err != nil {
|
storageDealSelectionRejectCmd,
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer closer()
|
|
||||||
|
|
||||||
return api.DealsSetAcceptingStorageDeals(lcli.DaemonContext(cctx), true)
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var disableCmd = &cli.Command{
|
var storageDealSelectionShowCmd = &cli.Command{
|
||||||
Name: "disable",
|
Name: "list",
|
||||||
Usage: "Configure the miner to reject all storage deal proposals",
|
Usage: "List storage deal proposal selection criteria",
|
||||||
Flags: []cli.Flag{},
|
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
api, closer, err := lcli.GetStorageMinerAPI(cctx)
|
smapi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
return api.DealsSetAcceptingStorageDeals(lcli.DaemonContext(cctx), false)
|
onlineOk, err := smapi.DealsConsiderOnlineStorageDeals(lcli.DaemonContext(cctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
offlineOk, err := smapi.DealsConsiderOfflineStorageDeals(lcli.DaemonContext(cctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("considering online storage deals: %t\n", onlineOk)
|
||||||
|
fmt.Printf("considering offline storage deals: %t\n", offlineOk)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageDealSelectionResetCmd = &cli.Command{
|
||||||
|
Name: "reset",
|
||||||
|
Usage: "Reset storage deal proposal selection criteria to default values",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
smapi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
err = smapi.DealsSetConsiderOnlineStorageDeals(lcli.DaemonContext(cctx), true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = smapi.DealsSetConsiderOfflineStorageDeals(lcli.DaemonContext(cctx), true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageDealSelectionRejectCmd = &cli.Command{
|
||||||
|
Name: "reject",
|
||||||
|
Usage: "Configure criteria which necessitate automatic rejection",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "online",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "offline",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
smapi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
if cctx.Bool("online") {
|
||||||
|
err = smapi.DealsSetConsiderOnlineStorageDeals(lcli.DaemonContext(cctx), false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("offline") {
|
||||||
|
err = smapi.DealsSetConsiderOfflineStorageDeals(lcli.DaemonContext(cctx), false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,7 +190,7 @@ var setAskCmd = &cli.Command{
|
|||||||
return xerrors.Errorf("cannot parse duration: %w", err)
|
return xerrors.Errorf("cannot parse duration: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
qty := dur.Seconds() / build.BlockDelay
|
qty := dur.Seconds() / float64(build.BlockDelaySecs)
|
||||||
|
|
||||||
min, err := units.RAMInBytes(cctx.String("min-piece-size"))
|
min, err := units.RAMInBytes(cctx.String("min-piece-size"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -208,7 +275,7 @@ var getAskCmd = &cli.Command{
|
|||||||
dlt := ask.Expiry - head.Height()
|
dlt := ask.Expiry - head.Height()
|
||||||
rem := "<expired>"
|
rem := "<expired>"
|
||||||
if dlt > 0 {
|
if dlt > 0 {
|
||||||
rem = (time.Second * time.Duration(dlt*build.BlockDelay)).String()
|
rem = (time.Second * time.Duration(int64(dlt)*int64(build.BlockDelaySecs))).String()
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%s\t%d\n", ask.Price, types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), ask.Expiry, rem, ask.SeqNo)
|
fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%s\t%d\n", ask.Price, types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), ask.Expiry, rem, ask.SeqNo)
|
||||||
@ -223,8 +290,7 @@ var storageDealsCmd = &cli.Command{
|
|||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
dealsImportDataCmd,
|
dealsImportDataCmd,
|
||||||
dealsListCmd,
|
dealsListCmd,
|
||||||
enableCmd,
|
storageDealSelectionCmd,
|
||||||
disableCmd,
|
|
||||||
setAskCmd,
|
setAskCmd,
|
||||||
getAskCmd,
|
getAskCmd,
|
||||||
setBlocklistCmd,
|
setBlocklistCmd,
|
||||||
|
@ -211,11 +211,11 @@ var provingInfoCmd = &cli.Command{
|
|||||||
func epochTime(curr, e abi.ChainEpoch) string {
|
func epochTime(curr, e abi.ChainEpoch) string {
|
||||||
switch {
|
switch {
|
||||||
case curr > e:
|
case curr > e:
|
||||||
return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(build.BlockDelay*(curr-e)))
|
return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e)))
|
||||||
case curr == e:
|
case curr == e:
|
||||||
return fmt.Sprintf("%d (now)", e)
|
return fmt.Sprintf("%d (now)", e)
|
||||||
case curr < e:
|
case curr < e:
|
||||||
return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(build.BlockDelay*(e-curr)))
|
return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr)))
|
||||||
}
|
}
|
||||||
|
|
||||||
panic("math broke")
|
panic("math broke")
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
@ -9,37 +11,103 @@ var retrievalDealsCmd = &cli.Command{
|
|||||||
Name: "retrieval-deals",
|
Name: "retrieval-deals",
|
||||||
Usage: "Manage retrieval deals and related configuration",
|
Usage: "Manage retrieval deals and related configuration",
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
enableRetrievalCmd,
|
retrievalDealSelectionCmd,
|
||||||
disableRetrievalCmd,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var enableRetrievalCmd = &cli.Command{
|
var retrievalDealSelectionCmd = &cli.Command{
|
||||||
Name: "enable",
|
Name: "selection",
|
||||||
Usage: "Configure the miner to consider retrieval deal proposals",
|
Usage: "Configure acceptance criteria for retrieval deal proposals",
|
||||||
Flags: []cli.Flag{},
|
Subcommands: []*cli.Command{
|
||||||
|
retrievalDealSelectionShowCmd,
|
||||||
|
retrievalDealSelectionResetCmd,
|
||||||
|
retrievalDealSelectionRejectCmd,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var retrievalDealSelectionShowCmd = &cli.Command{
|
||||||
|
Name: "list",
|
||||||
|
Usage: "List retrieval deal proposal selection criteria",
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
api, closer, err := lcli.GetStorageMinerAPI(cctx)
|
smapi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
return api.DealsSetAcceptingRetrievalDeals(lcli.DaemonContext(cctx), true)
|
onlineOk, err := smapi.DealsConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
offlineOk, err := smapi.DealsConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("considering online retrieval deals: %t\n", onlineOk)
|
||||||
|
fmt.Printf("considering offline retrieval deals: %t\n", offlineOk)
|
||||||
|
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var disableRetrievalCmd = &cli.Command{
|
var retrievalDealSelectionResetCmd = &cli.Command{
|
||||||
Name: "disable",
|
Name: "reset",
|
||||||
Usage: "Configure the miner to reject all retrieval deal proposals",
|
Usage: "Reset retrieval deal proposal selection criteria to default values",
|
||||||
Flags: []cli.Flag{},
|
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
api, closer, err := lcli.GetStorageMinerAPI(cctx)
|
smapi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
return api.DealsSetAcceptingRetrievalDeals(lcli.DaemonContext(cctx), false)
|
err = smapi.DealsSetConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx), true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = smapi.DealsSetConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx), true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var retrievalDealSelectionRejectCmd = &cli.Command{
|
||||||
|
Name: "reject",
|
||||||
|
Usage: "Configure criteria which necessitate automatic rejection",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "online",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "offline",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
smapi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
if cctx.Bool("online") {
|
||||||
|
err = smapi.DealsSetConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx), false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("offline") {
|
||||||
|
err = smapi.DealsSetConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx), false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -82,7 +82,7 @@ func init() {
|
|||||||
ep.WinCount = ep.ComputeWinCount(types.NewInt(1), types.NewInt(1))
|
ep.WinCount = ep.ComputeWinCount(types.NewInt(1), types.NewInt(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
uts := head.MinTimestamp() + uint64(build.BlockDelay)
|
uts := head.MinTimestamp() + uint64(build.BlockDelaySecs)
|
||||||
nheight := head.Height() + 1
|
nheight := head.Height() + 1
|
||||||
blk, err := api.MinerCreateBlock(ctx, &lapi.BlockTemplate{
|
blk, err := api.MinerCreateBlock(ctx, &lapi.BlockTemplate{
|
||||||
addr, head.Key(), ticket, ep, nil, msgs, nheight, uts, gen.ValidWpostForTesting,
|
addr, head.Key(), ticket, ep, nil, msgs, nheight, uts, gen.ValidWpostForTesting,
|
||||||
|
@ -43,7 +43,7 @@ Assemble a `FullTipSet` populated with the single block received earlier.
|
|||||||
This function contains most of the validation logic grouped in separate closures that run asynchronously, this list does not reflect validation order then.
|
This function contains most of the validation logic grouped in separate closures that run asynchronously, this list does not reflect validation order then.
|
||||||
|
|
||||||
`V:` Block `Timestamp`:
|
`V:` Block `Timestamp`:
|
||||||
* Is not bigger than current time plus `AllowableClockDrift`.
|
* Is not bigger than current time plus `AllowableClockDriftSecs`.
|
||||||
* Is not smaller than previous block's `Timestamp` plus `BlockDelay` (including null blocks).
|
* Is not smaller than previous block's `Timestamp` plus `BlockDelay` (including null blocks).
|
||||||
|
|
||||||
### Messages
|
### Messages
|
||||||
|
@ -38,7 +38,7 @@ If you see this, that means your computer is too slow and your blocks are not in
|
|||||||
## Error: No space left on device
|
## Error: No space left on device
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
lotus-storage-miner pledge-sector
|
lotus-storage-miner sectors pledge
|
||||||
# No space left on device (os error 28)
|
# No space left on device (os error 28)
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -51,7 +51,8 @@ If you suspect that your GPU is not being used, first make sure it is properly c
|
|||||||
First, to watch GPU utilization run `nvtop` in one terminal, then in a separate terminal, run:
|
First, to watch GPU utilization run `nvtop` in one terminal, then in a separate terminal, run:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
lotus-bench --sector-size=2KiB
|
make bench
|
||||||
|
./bench sealing --sector-size=2KiB
|
||||||
```
|
```
|
||||||
|
|
||||||
This process uses a fair amount of GPU, and generally takes ~4 minutes to complete. If you do not see any activity in nvtop from lotus during the entire process, it is likely something is misconfigured with your GPU.
|
This process uses a fair amount of GPU, and generally takes ~4 minutes to complete. If you do not see any activity in nvtop from lotus during the entire process, it is likely something is misconfigured with your GPU.
|
||||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ca281af0b6c00314382a75ae869e5cb22c83655b
|
Subproject commit 5342c7c97d1a1df4650629d14f2823d52889edd9
|
6
go.mod
6
go.mod
@ -23,12 +23,12 @@ require (
|
|||||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
|
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
|
||||||
github.com/filecoin-project/go-data-transfer v0.3.0
|
github.com/filecoin-project/go-data-transfer v0.3.0
|
||||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5
|
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5
|
||||||
github.com/filecoin-project/go-fil-markets v0.3.0
|
github.com/filecoin-project/go-fil-markets v0.3.1
|
||||||
github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24
|
github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca
|
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca
|
||||||
github.com/filecoin-project/go-statestore v0.1.0
|
github.com/filecoin-project/go-statestore v0.1.0
|
||||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
||||||
github.com/filecoin-project/sector-storage v0.0.0-20200626110003-76ce3b9d9496
|
github.com/filecoin-project/sector-storage v0.0.0-20200701092105-a2de752a3324
|
||||||
github.com/filecoin-project/specs-actors v0.7.1-0.20200629045128-8b4965e097bb
|
github.com/filecoin-project/specs-actors v0.7.1-0.20200629045128-8b4965e097bb
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea
|
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea
|
||||||
github.com/filecoin-project/storage-fsm v0.0.0-20200626155829-408c9a7b3336
|
github.com/filecoin-project/storage-fsm v0.0.0-20200626155829-408c9a7b3336
|
||||||
@ -65,7 +65,7 @@ require (
|
|||||||
github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf
|
github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf
|
||||||
github.com/ipfs/go-ipld-format v0.2.0
|
github.com/ipfs/go-ipld-format v0.2.0
|
||||||
github.com/ipfs/go-log v1.0.4
|
github.com/ipfs/go-log v1.0.4
|
||||||
github.com/ipfs/go-log/v2 v2.1.2-0.20200609205458-f8d20c392cb7
|
github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4
|
||||||
github.com/ipfs/go-merkledag v0.3.1
|
github.com/ipfs/go-merkledag v0.3.1
|
||||||
github.com/ipfs/go-path v0.0.7
|
github.com/ipfs/go-path v0.0.7
|
||||||
github.com/ipfs/go-unixfs v0.2.4
|
github.com/ipfs/go-unixfs v0.2.4
|
||||||
|
13
go.sum
13
go.sum
@ -235,8 +235,8 @@ github.com/filecoin-project/go-data-transfer v0.3.0 h1:BwBrrXu9Unh9JjjX4GAc5FfzU
|
|||||||
github.com/filecoin-project/go-data-transfer v0.3.0/go.mod h1:cONglGP4s/d+IUQw5mWZrQK+FQATQxr3AXzi4dRh0l4=
|
github.com/filecoin-project/go-data-transfer v0.3.0/go.mod h1:cONglGP4s/d+IUQw5mWZrQK+FQATQxr3AXzi4dRh0l4=
|
||||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo=
|
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo=
|
||||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA=
|
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA=
|
||||||
github.com/filecoin-project/go-fil-markets v0.3.0 h1:7iCGiuTSia4f4DmOn3s96NWUwMNSOI0ZHel/XgeApAQ=
|
github.com/filecoin-project/go-fil-markets v0.3.1 h1:YLH4ck4hQrKBpQ3fo0VcA2SXqiAosizxBJ/QHYgR9aE=
|
||||||
github.com/filecoin-project/go-fil-markets v0.3.0/go.mod h1:UXsXi43AyUQ5ieb4yIaLgk4PVt7TAbl1UCccuNw+7ds=
|
github.com/filecoin-project/go-fil-markets v0.3.1/go.mod h1:UY+/zwNXHN73HcrN6HxNDpv6KKM6ehqfCuE9vK9khF8=
|
||||||
github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24 h1:Jc7vkplmZYVuaEcSXGHDwefvZIdoyyaoGDLqSr8Svms=
|
github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24 h1:Jc7vkplmZYVuaEcSXGHDwefvZIdoyyaoGDLqSr8Svms=
|
||||||
github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24/go.mod h1:j6zV//WXIIY5kky873Q3iIKt/ViOE8rcijovmpxrXzM=
|
github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24/go.mod h1:j6zV//WXIIY5kky873Q3iIKt/ViOE8rcijovmpxrXzM=
|
||||||
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 h1:92PET+sx1Hb4W/8CgFwGuxaKbttwY+UNspYZTvXY0vs=
|
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 h1:92PET+sx1Hb4W/8CgFwGuxaKbttwY+UNspYZTvXY0vs=
|
||||||
@ -256,11 +256,12 @@ github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/
|
|||||||
github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM=
|
github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM=
|
||||||
github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246 h1:NfYQRmVRe0LzlNbK5Ket3vbBOwFD5TvtcNtfo/Sd8mg=
|
github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246 h1:NfYQRmVRe0LzlNbK5Ket3vbBOwFD5TvtcNtfo/Sd8mg=
|
||||||
github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246/go.mod h1:8f0hWDzzIi1hKs4IVKH9RnDsO4LEHVz8BNat0okDOuY=
|
github.com/filecoin-project/sector-storage v0.0.0-20200625154333-98ef8e4ef246/go.mod h1:8f0hWDzzIi1hKs4IVKH9RnDsO4LEHVz8BNat0okDOuY=
|
||||||
github.com/filecoin-project/sector-storage v0.0.0-20200626110003-76ce3b9d9496 h1:Z/7aDwuIJmarXsR7gcTOU5+ypu0ch1c8KVaSLlWMmDw=
|
github.com/filecoin-project/sector-storage v0.0.0-20200701092105-a2de752a3324 h1:MmxTkkhQMGWH3fr4BPpGoFQocG1dTvAAbkL3VEaZcsY=
|
||||||
github.com/filecoin-project/sector-storage v0.0.0-20200626110003-76ce3b9d9496/go.mod h1:8f0hWDzzIi1hKs4IVKH9RnDsO4LEHVz8BNat0okDOuY=
|
github.com/filecoin-project/sector-storage v0.0.0-20200701092105-a2de752a3324/go.mod h1:r12d7tsmJKz8QDGoCvl65Ay2al6mOgDqxAGUxbyrgMs=
|
||||||
github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
|
github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
|
||||||
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
|
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
|
||||||
github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
||||||
|
github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
||||||
github.com/filecoin-project/specs-actors v0.7.0 h1:tldjW8pFiJcMtyGPsXmPoFdbN/18mKW3BpEMlO4NJAc=
|
github.com/filecoin-project/specs-actors v0.7.0 h1:tldjW8pFiJcMtyGPsXmPoFdbN/18mKW3BpEMlO4NJAc=
|
||||||
github.com/filecoin-project/specs-actors v0.7.0/go.mod h1:+z0htZu/wLBDbOLcQTKKUEC2rkUTFzL2KJ/bRAVWkws=
|
github.com/filecoin-project/specs-actors v0.7.0/go.mod h1:+z0htZu/wLBDbOLcQTKKUEC2rkUTFzL2KJ/bRAVWkws=
|
||||||
github.com/filecoin-project/specs-actors v0.7.1-0.20200629045128-8b4965e097bb h1:09FJswK8kHQSJtVD49ZjwePjoS4wGrqR/Y+tl7TN10w=
|
github.com/filecoin-project/specs-actors v0.7.1-0.20200629045128-8b4965e097bb h1:09FJswK8kHQSJtVD49ZjwePjoS4wGrqR/Y+tl7TN10w=
|
||||||
@ -595,8 +596,8 @@ github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBW
|
|||||||
github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
|
github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
|
||||||
github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
|
github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
|
||||||
github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
|
github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
|
||||||
github.com/ipfs/go-log/v2 v2.1.2-0.20200609205458-f8d20c392cb7 h1:LtL/rvdfbKSthZGmAAD9o4KKg6HA6Qn8gXCCdgnj7lw=
|
github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 h1:3bijxqzQ1O9yg7gd7Aqk80oaEvsJ+uXw0zSvi2qR3Jw=
|
||||||
github.com/ipfs/go-log/v2 v2.1.2-0.20200609205458-f8d20c392cb7/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
|
github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
|
||||||
github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA=
|
github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA=
|
||||||
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
||||||
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||||
|
38
lib/sigs/bls/bls_bench_test.go
Normal file
38
lib/sigs/bls/bls_bench_test.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
package bls
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkBLSSign(b *testing.B) {
|
||||||
|
signer := blsSigner{}
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
pk, _ := signer.GenPrivate()
|
||||||
|
randMsg := make([]byte, 32)
|
||||||
|
rand.Read(randMsg)
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
_, _ = signer.Sign(pk, randMsg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBLSVerify(b *testing.B) {
|
||||||
|
signer := blsSigner{}
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
b.StopTimer()
|
||||||
|
randMsg := make([]byte, 32)
|
||||||
|
rand.Read(randMsg)
|
||||||
|
|
||||||
|
priv, _ := signer.GenPrivate()
|
||||||
|
pk, _ := signer.ToPublic(priv)
|
||||||
|
addr, _ := address.NewBLSAddress(pk)
|
||||||
|
sig, _ := signer.Sign(priv, randMsg)
|
||||||
|
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
_ = signer.Verify(sig, addr, randMsg)
|
||||||
|
}
|
||||||
|
}
|
@ -19,7 +19,7 @@ import (
|
|||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/events"
|
"github.com/filecoin-project/lotus/chain/events"
|
||||||
@ -80,7 +80,15 @@ func (n *ClientNodeAdapter) ListStorageProviders(ctx context.Context, encodedTs
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
storageProviderInfo := utils.NewStorageProviderInfo(addr, mi.Worker, mi.SectorSize, peer.ID(mi.PeerId))
|
multiaddrs := make([]multiaddr.Multiaddr, 0, len(mi.Multiaddrs))
|
||||||
|
for _, a := range mi.Multiaddrs {
|
||||||
|
maddr, err := multiaddr.NewMultiaddrBytes(a)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
multiaddrs = append(multiaddrs, maddr)
|
||||||
|
}
|
||||||
|
storageProviderInfo := utils.NewStorageProviderInfo(addr, mi.Worker, mi.SectorSize, mi.PeerId, multiaddrs)
|
||||||
out = append(out, &storageProviderInfo)
|
out = append(out, &storageProviderInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,7 +330,7 @@ func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.ev.Called(checkFunc, called, revert, build.MessageConfidence+1, build.SealRandomnessLookbackLimit, matchEvent); err != nil {
|
if err := c.ev.Called(checkFunc, called, revert, int(build.MessageConfidence+1), build.SealRandomnessLookbackLimit, matchEvent); err != nil {
|
||||||
return xerrors.Errorf("failed to set up called handler: %w", err)
|
return xerrors.Errorf("failed to set up called handler: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -405,4 +413,25 @@ func (n *ClientNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb
|
|||||||
return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, nil)
|
return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *ClientNodeAdapter) GetMinerInfo(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*storagemarket.StorageProviderInfo, error) {
|
||||||
|
tsk, err := types.TipSetKeyFromBytes(encodedTs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mi, err := n.StateMinerInfo(ctx, addr, tsk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
multiaddrs := make([]multiaddr.Multiaddr, 0, len(mi.Multiaddrs))
|
||||||
|
for _, a := range mi.Multiaddrs {
|
||||||
|
maddr, err := multiaddr.NewMultiaddrBytes(a)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
multiaddrs = append(multiaddrs, maddr)
|
||||||
|
}
|
||||||
|
out := utils.NewStorageProviderInfo(addr, mi.Worker, mi.SectorSize, mi.PeerId, multiaddrs)
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ storagemarket.StorageClientNode = &ClientNodeAdapter{}
|
var _ storagemarket.StorageClientNode = &ClientNodeAdapter{}
|
||||||
|
@ -321,7 +321,7 @@ func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provide
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := n.ev.Called(checkFunc, called, revert, build.MessageConfidence+1, build.SealRandomnessLookbackLimit, matchEvent); err != nil {
|
if err := n.ev.Called(checkFunc, called, revert, int(build.MessageConfidence+1), build.SealRandomnessLookbackLimit, matchEvent); err != nil {
|
||||||
return xerrors.Errorf("failed to set up called handler: %w", err)
|
return xerrors.Errorf("failed to set up called handler: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,17 +6,20 @@ import (
|
|||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewStorageProviderInfo(address address.Address, miner address.Address, sectorSize abi.SectorSize, peer peer.ID) storagemarket.StorageProviderInfo {
|
func NewStorageProviderInfo(address address.Address, miner address.Address, sectorSize abi.SectorSize, peer peer.ID, addrs []multiaddr.Multiaddr) storagemarket.StorageProviderInfo {
|
||||||
|
|
||||||
return storagemarket.StorageProviderInfo{
|
return storagemarket.StorageProviderInfo{
|
||||||
Address: address,
|
Address: address,
|
||||||
Worker: miner,
|
Worker: miner,
|
||||||
SectorSize: uint64(sectorSize),
|
SectorSize: uint64(sectorSize),
|
||||||
PeerID: peer,
|
PeerID: peer,
|
||||||
|
Addrs: addrs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address)
|
|||||||
address: addr,
|
address: addr,
|
||||||
waitFunc: func(ctx context.Context, baseTime uint64) (func(bool), error) {
|
waitFunc: func(ctx context.Context, baseTime uint64) (func(bool), error) {
|
||||||
// Wait around for half the block time in case other parents come in
|
// Wait around for half the block time in case other parents come in
|
||||||
deadline := baseTime + build.PropagationDelay
|
deadline := baseTime + build.PropagationDelaySecs
|
||||||
time.Sleep(time.Until(time.Unix(int64(deadline), 0)))
|
time.Sleep(time.Until(time.Unix(int64(deadline), 0)))
|
||||||
|
|
||||||
return func(bool) {}, nil
|
return func(bool) {}, nil
|
||||||
@ -150,7 +150,7 @@ func (m *Miner) mine(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
if base.TipSet.Equals(lastBase.TipSet) && lastBase.NullRounds == base.NullRounds {
|
if base.TipSet.Equals(lastBase.TipSet) && lastBase.NullRounds == base.NullRounds {
|
||||||
log.Warnf("BestMiningCandidate from the previous round: %s (nulls:%d)", lastBase.TipSet.Cids(), lastBase.NullRounds)
|
log.Warnf("BestMiningCandidate from the previous round: %s (nulls:%d)", lastBase.TipSet.Cids(), lastBase.NullRounds)
|
||||||
m.niceSleep(build.BlockDelay * time.Second)
|
m.niceSleep(time.Duration(build.BlockDelaySecs) * time.Second)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ func (m *Miner) mine(ctx context.Context) {
|
|||||||
// has enough time to form.
|
// has enough time to form.
|
||||||
//
|
//
|
||||||
// See: https://github.com/filecoin-project/lotus/issues/1845
|
// See: https://github.com/filecoin-project/lotus/issues/1845
|
||||||
nextRound := time.Unix(int64(base.TipSet.MinTimestamp()+uint64(build.BlockDelay*base.NullRounds))+int64(build.PropagationDelay), 0)
|
nextRound := time.Unix(int64(base.TipSet.MinTimestamp()+build.BlockDelaySecs*uint64(base.NullRounds))+int64(build.PropagationDelaySecs), 0)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(time.Until(nextRound)):
|
case <-time.After(time.Until(nextRound)):
|
||||||
@ -255,12 +255,16 @@ func (m *Miner) hasPower(ctx context.Context, addr address.Address, ts *types.Ti
|
|||||||
return mpower.MinerPower.QualityAdjPower.GreaterThanEqual(power.ConsensusMinerMinPower), nil
|
return mpower.MinerPower.QualityAdjPower.GreaterThanEqual(power.ConsensusMinerMinPower), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// mineOne mines a single block, and does so synchronously, if and only if we
|
// mineOne attempts to mine a single block, and does so synchronously, if and
|
||||||
// have won the current round.
|
// only if we are eligible to mine.
|
||||||
//
|
//
|
||||||
// {hint/landmark}: This method coordinates all the steps involved in mining a
|
// {hint/landmark}: This method coordinates all the steps involved in mining a
|
||||||
// block, including the condition of whether mine or not at all depending on
|
// block, including the condition of whether mine or not at all depending on
|
||||||
// whether we win the round or not.
|
// whether we win the round or not.
|
||||||
|
//
|
||||||
|
// This method does the following:
|
||||||
|
//
|
||||||
|
// 1.
|
||||||
func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, error) {
|
func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, error) {
|
||||||
log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids()))
|
log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids()))
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
@ -352,7 +356,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg,
|
|||||||
tCreateBlock := time.Now()
|
tCreateBlock := time.Now()
|
||||||
dur := tCreateBlock.Sub(start)
|
dur := tCreateBlock.Sub(start)
|
||||||
log.Infow("mined new block", "cid", b.Cid(), "height", b.Header.Height, "took", dur)
|
log.Infow("mined new block", "cid", b.Cid(), "height", b.Header.Height, "took", dur)
|
||||||
if dur > time.Second*build.BlockDelay {
|
if dur > time.Second*time.Duration(build.BlockDelaySecs) {
|
||||||
log.Warn("CAUTION: block production took longer than the block delay. Your computer may not be fast enough to keep up")
|
log.Warn("CAUTION: block production took longer than the block delay. Your computer may not be fast enough to keep up")
|
||||||
|
|
||||||
log.Warnw("tMinerBaseInfo ", "duration", tMBI.Sub(start))
|
log.Warnw("tMinerBaseInfo ", "duration", tMBI.Sub(start))
|
||||||
@ -413,7 +417,7 @@ func (m *Miner) createBlock(base *MiningBase, addr address.Address, ticket *type
|
|||||||
msgs = msgs[:build.BlockMessageLimit]
|
msgs = msgs[:build.BlockMessageLimit]
|
||||||
}
|
}
|
||||||
|
|
||||||
uts := base.TipSet.MinTimestamp() + uint64(build.BlockDelay*(base.NullRounds+1))
|
uts := base.TipSet.MinTimestamp() + build.BlockDelaySecs*(uint64(base.NullRounds)+1)
|
||||||
|
|
||||||
nheight := base.TipSet.Height() + base.NullRounds + 1
|
nheight := base.TipSet.Height() + base.NullRounds + 1
|
||||||
|
|
||||||
|
@ -313,12 +313,16 @@ func Online() Option {
|
|||||||
Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver),
|
Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver),
|
||||||
Override(new(*miner.Miner), modules.SetupBlockProducer),
|
Override(new(*miner.Miner), modules.SetupBlockProducer),
|
||||||
|
|
||||||
Override(new(dtypes.AcceptingRetrievalDealsConfigFunc), modules.NewAcceptingRetrievalDealsConfigFunc),
|
Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc),
|
||||||
Override(new(dtypes.SetAcceptingRetrievalDealsConfigFunc), modules.NewSetAcceptingRetrievalDealsConfigFunc),
|
Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc),
|
||||||
Override(new(dtypes.AcceptingStorageDealsConfigFunc), modules.NewAcceptingStorageDealsConfigFunc),
|
Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc),
|
||||||
Override(new(dtypes.SetAcceptingStorageDealsConfigFunc), modules.NewSetAcceptingStorageDealsConfigFunc),
|
Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc),
|
||||||
Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc),
|
Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc),
|
||||||
Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc),
|
Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc),
|
||||||
|
Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc),
|
||||||
|
Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc),
|
||||||
|
Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc),
|
||||||
|
Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -34,9 +34,11 @@ type StorageMiner struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type DealmakingConfig struct {
|
type DealmakingConfig struct {
|
||||||
AcceptingStorageDeals bool
|
ConsiderOnlineStorageDeals bool
|
||||||
AcceptingRetrievalDeals bool
|
ConsiderOfflineStorageDeals bool
|
||||||
PieceCidBlocklist []cid.Cid
|
ConsiderOnlineRetrievalDeals bool
|
||||||
|
ConsiderOfflineRetrievalDeals bool
|
||||||
|
PieceCidBlocklist []cid.Cid
|
||||||
}
|
}
|
||||||
|
|
||||||
// API contains configs for API endpoint
|
// API contains configs for API endpoint
|
||||||
@ -124,9 +126,11 @@ func DefaultStorageMiner() *StorageMiner {
|
|||||||
},
|
},
|
||||||
|
|
||||||
Dealmaking: DealmakingConfig{
|
Dealmaking: DealmakingConfig{
|
||||||
AcceptingStorageDeals: true,
|
ConsiderOnlineStorageDeals: true,
|
||||||
AcceptingRetrievalDeals: true,
|
ConsiderOfflineStorageDeals: true,
|
||||||
PieceCidBlocklist: []cid.Cid{},
|
ConsiderOnlineRetrievalDeals: true,
|
||||||
|
ConsiderOfflineRetrievalDeals: true,
|
||||||
|
PieceCidBlocklist: []cid.Cid{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
|
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
basicnode "github.com/ipld/go-ipld-prime/node/basic"
|
basicnode "github.com/ipld/go-ipld-prime/node/basic"
|
||||||
"github.com/ipld/go-ipld-prime/traversal/selector"
|
"github.com/ipld/go-ipld-prime/traversal/selector"
|
||||||
"github.com/ipld/go-ipld-prime/traversal/selector/builder"
|
"github.com/ipld/go-ipld-prime/traversal/selector/builder"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -90,6 +91,15 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
|
|||||||
return nil, xerrors.Errorf("failed getting peer ID: %w", err)
|
return nil, xerrors.Errorf("failed getting peer ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
multiaddrs := make([]multiaddr.Multiaddr, 0, len(mi.Multiaddrs))
|
||||||
|
for _, a := range mi.Multiaddrs {
|
||||||
|
maddr, err := multiaddr.NewMultiaddrBytes(a)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
multiaddrs = append(multiaddrs, maddr)
|
||||||
|
}
|
||||||
|
|
||||||
md, err := a.StateMinerProvingDeadline(ctx, params.Miner, types.EmptyTSK)
|
md, err := a.StateMinerProvingDeadline(ctx, params.Miner, types.EmptyTSK)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed getting peer ID: %w", err)
|
return nil, xerrors.Errorf("failed getting peer ID: %w", err)
|
||||||
@ -104,7 +114,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
|
|||||||
return nil, xerrors.New("data doesn't fit in a sector")
|
return nil, xerrors.New("data doesn't fit in a sector")
|
||||||
}
|
}
|
||||||
|
|
||||||
providerInfo := utils.NewStorageProviderInfo(params.Miner, mi.Worker, mi.SectorSize, peer.ID(mi.PeerId))
|
providerInfo := utils.NewStorageProviderInfo(params.Miner, mi.Worker, mi.SectorSize, mi.PeerId, multiaddrs)
|
||||||
|
|
||||||
dealStart := params.DealStartEpoch
|
dealStart := params.DealStartEpoch
|
||||||
if dealStart <= 0 { // unset, or explicitly 'epoch undefined'
|
if dealStart <= 0 { // unset, or explicitly 'epoch undefined'
|
||||||
@ -431,7 +441,7 @@ func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) {
|
func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) {
|
||||||
info := utils.NewStorageProviderInfo(miner, address.Undef, 0, p)
|
info := utils.NewStorageProviderInfo(miner, address.Undef, 0, p, nil)
|
||||||
signedAsk, err := a.SMDealClient.GetAsk(ctx, info)
|
signedAsk, err := a.SMDealClient.GetAsk(ctx, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -122,7 +122,7 @@ func (a *CommonAPI) Version(context.Context) (api.Version, error) {
|
|||||||
Version: build.UserVersion(),
|
Version: build.UserVersion(),
|
||||||
APIVersion: build.APIVersion,
|
APIVersion: build.APIVersion,
|
||||||
|
|
||||||
BlockDelay: build.BlockDelay,
|
BlockDelay: build.BlockDelaySecs,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,10 +43,16 @@ type StorageMinerAPI struct {
|
|||||||
StorageMgr *sectorstorage.Manager `optional:"true"`
|
StorageMgr *sectorstorage.Manager `optional:"true"`
|
||||||
*stores.Index
|
*stores.Index
|
||||||
|
|
||||||
SetAcceptingStorageDealsConfigFunc dtypes.SetAcceptingStorageDealsConfigFunc
|
ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc
|
||||||
SetAcceptingRetrievalDealsConfigFunc dtypes.SetAcceptingRetrievalDealsConfigFunc
|
SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc
|
||||||
StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc
|
ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc
|
||||||
SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc
|
SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc
|
||||||
|
StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc
|
||||||
|
SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc
|
||||||
|
ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc
|
||||||
|
SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc
|
||||||
|
ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc
|
||||||
|
SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
|
func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -229,12 +235,36 @@ func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]storagemarket.Stora
|
|||||||
return sm.StorageProvider.ListDeals(ctx)
|
return sm.StorageProvider.ListDeals(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) DealsSetAcceptingStorageDeals(ctx context.Context, b bool) error {
|
func (sm *StorageMinerAPI) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) {
|
||||||
return sm.SetAcceptingStorageDealsConfigFunc(b)
|
return sm.ConsiderOnlineStorageDealsConfigFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) DealsSetAcceptingRetrievalDeals(ctx context.Context, b bool) error {
|
func (sm *StorageMinerAPI) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error {
|
||||||
return sm.SetAcceptingRetrievalDealsConfigFunc(b)
|
return sm.SetConsiderOnlineStorageDealsConfigFunc(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *StorageMinerAPI) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) {
|
||||||
|
return sm.ConsiderOnlineRetrievalDealsConfigFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *StorageMinerAPI) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error {
|
||||||
|
return sm.SetConsiderOnlineRetrievalDealsConfigFunc(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *StorageMinerAPI) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) {
|
||||||
|
return sm.ConsiderOfflineStorageDealsConfigFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *StorageMinerAPI) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error {
|
||||||
|
return sm.SetConsiderOfflineStorageDealsConfigFunc(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *StorageMinerAPI) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) {
|
||||||
|
return sm.ConsiderOfflineRetrievalDealsConfigFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *StorageMinerAPI) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error {
|
||||||
|
return sm.SetConsiderOfflineRetrievalDealsConfigFunc(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fname string) error {
|
func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fname string) error {
|
||||||
|
@ -10,27 +10,43 @@ import (
|
|||||||
type MinerAddress address.Address
|
type MinerAddress address.Address
|
||||||
type MinerID abi.ActorID
|
type MinerID abi.ActorID
|
||||||
|
|
||||||
// AcceptingStorageDealsConfigFunc is a function which reads from miner config
|
// ConsiderOnlineStorageDealsConfigFunc is a function which reads from miner
|
||||||
// to determine if the user has disabled storage deals (or not).
|
// config to determine if the user has disabled storage deals (or not).
|
||||||
type AcceptingStorageDealsConfigFunc func() (bool, error)
|
type ConsiderOnlineStorageDealsConfigFunc func() (bool, error)
|
||||||
|
|
||||||
// SetAcceptingStorageDealsConfigFunc is a function which is used to disable or
|
// SetConsiderOnlineStorageDealsConfigFunc is a function which is used to
|
||||||
// enable storage deal acceptance.
|
// disable or enable storage deal acceptance.
|
||||||
type SetAcceptingStorageDealsConfigFunc func(bool) error
|
type SetConsiderOnlineStorageDealsConfigFunc func(bool) error
|
||||||
|
|
||||||
// AcceptingRetrievalDealsConfigFunc is a function which reads from miner config
|
// ConsiderOnlineRetrievalDealsConfigFunc is a function which reads from miner
|
||||||
// to determine if the user has disabled retrieval acceptance (or not).
|
// config to determine if the user has disabled retrieval acceptance (or not).
|
||||||
type AcceptingRetrievalDealsConfigFunc func() (bool, error)
|
type ConsiderOnlineRetrievalDealsConfigFunc func() (bool, error)
|
||||||
|
|
||||||
// SetAcceptingRetrievalDealsConfigFunc is a function which is used to disable
|
// SetConsiderOnlineRetrievalDealsConfigFunc is a function which is used to
|
||||||
// or enable retrieval deal acceptance.
|
// disable or enable retrieval deal acceptance.
|
||||||
type SetAcceptingRetrievalDealsConfigFunc func(bool) error
|
type SetConsiderOnlineRetrievalDealsConfigFunc func(bool) error
|
||||||
|
|
||||||
// StorageDealPieceCidBlocklistConfigFunc is a function which reads from miner config
|
// StorageDealPieceCidBlocklistConfigFunc is a function which reads from miner
|
||||||
// to obtain a list of CIDs for which the storage miner will not accept storage
|
// config to obtain a list of CIDs for which the storage miner will not accept
|
||||||
// proposals.
|
// storage proposals.
|
||||||
type StorageDealPieceCidBlocklistConfigFunc func() ([]cid.Cid, error)
|
type StorageDealPieceCidBlocklistConfigFunc func() ([]cid.Cid, error)
|
||||||
|
|
||||||
// SetStorageDealPieceCidBlocklistConfigFunc is a function which is used to set a
|
// SetStorageDealPieceCidBlocklistConfigFunc is a function which is used to set a
|
||||||
// list of CIDs for which the storage miner will reject deal proposals.
|
// list of CIDs for which the storage miner will reject deal proposals.
|
||||||
type SetStorageDealPieceCidBlocklistConfigFunc func([]cid.Cid) error
|
type SetStorageDealPieceCidBlocklistConfigFunc func([]cid.Cid) error
|
||||||
|
|
||||||
|
// ConsiderOfflineStorageDealsConfigFunc is a function which reads from miner
|
||||||
|
// config to determine if the user has disabled storage deals (or not).
|
||||||
|
type ConsiderOfflineStorageDealsConfigFunc func() (bool, error)
|
||||||
|
|
||||||
|
// SetConsiderOfflineStorageDealsConfigFunc is a function which is used to
|
||||||
|
// disable or enable storage deal acceptance.
|
||||||
|
type SetConsiderOfflineStorageDealsConfigFunc func(bool) error
|
||||||
|
|
||||||
|
// ConsiderOfflineRetrievalDealsConfigFunc is a function which reads from miner
|
||||||
|
// config to determine if the user has disabled retrieval acceptance (or not).
|
||||||
|
type ConsiderOfflineRetrievalDealsConfigFunc func() (bool, error)
|
||||||
|
|
||||||
|
// SetConsiderOfflineRetrievalDealsConfigFunc is a function which is used to
|
||||||
|
// disable or enable retrieval deal acceptance.
|
||||||
|
type SetConsiderOfflineRetrievalDealsConfigFunc func(bool) error
|
||||||
|
@ -19,7 +19,10 @@ import (
|
|||||||
|
|
||||||
var log = logging.Logger("p2pnode")
|
var log = logging.Logger("p2pnode")
|
||||||
|
|
||||||
const kstorePrivkey = "libp2p-host"
|
const (
|
||||||
|
KLibp2pHost = "libp2p-host"
|
||||||
|
KTLibp2pHost = KLibp2pHost
|
||||||
|
)
|
||||||
|
|
||||||
type Libp2pOpts struct {
|
type Libp2pOpts struct {
|
||||||
fx.Out
|
fx.Out
|
||||||
@ -28,7 +31,7 @@ type Libp2pOpts struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func PrivKey(ks types.KeyStore) (crypto.PrivKey, error) {
|
func PrivKey(ks types.KeyStore) (crypto.PrivKey, error) {
|
||||||
k, err := ks.Get(kstorePrivkey)
|
k, err := ks.Get(KLibp2pHost)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return crypto.UnmarshalPrivateKey(k.PrivateKey)
|
return crypto.UnmarshalPrivateKey(k.PrivateKey)
|
||||||
}
|
}
|
||||||
@ -44,8 +47,8 @@ func PrivKey(ks types.KeyStore) (crypto.PrivKey, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ks.Put(kstorePrivkey, types.KeyInfo{
|
if err := ks.Put(KLibp2pHost, types.KeyInfo{
|
||||||
Type: kstorePrivkey,
|
Type: KTLibp2pHost,
|
||||||
PrivateKey: kbytes,
|
PrivateKey: kbytes,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -123,6 +123,6 @@ func RandomBeacon(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Random
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
//return beacon.NewMockBeacon(build.BlockDelay * time.Second)
|
//return beacon.NewMockBeacon(build.BlockDelaySecs * time.Second)
|
||||||
return drand.NewDrandBeacon(gen.Timestamp, build.BlockDelay, p.PubSub, p.DrandConfig)
|
return drand.NewDrandBeacon(gen.Timestamp, build.BlockDelaySecs, p.PubSub, p.DrandConfig)
|
||||||
}
|
}
|
||||||
|
@ -315,7 +315,7 @@ func NewStorageAsk(ctx helpers.MetricsCtx, fapi lapi.FullNode, ds dtypes.Metadat
|
|||||||
return storedAsk, nil
|
return storedAsk, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Config, storedAsk *storedask.StoredAsk, h host.Host, ds dtypes.MetadataDS, ibs dtypes.StagingBlockstore, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode, isAcceptingFunc dtypes.AcceptingStorageDealsConfigFunc, blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc) (storagemarket.StorageProvider, error) {
|
func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Config, storedAsk *storedask.StoredAsk, h host.Host, ds dtypes.MetadataDS, ibs dtypes.StagingBlockstore, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode, onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc) (storagemarket.StorageProvider, error) {
|
||||||
net := smnet.NewFromLibp2pHost(h)
|
net := smnet.NewFromLibp2pHost(h)
|
||||||
store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(r.Path()))
|
store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(r.Path()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -323,14 +323,24 @@ func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
opt := storageimpl.CustomDealDecisionLogic(func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) {
|
opt := storageimpl.CustomDealDecisionLogic(func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) {
|
||||||
b, err := isAcceptingFunc()
|
b, err := onlineOk()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, "miner error", err
|
return false, "miner error", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !b {
|
if deal.Ref != nil && deal.Ref.TransferType != storagemarket.TTManual && !b {
|
||||||
log.Warnf("storage deal acceptance disabled; rejecting storage deal proposal from client: %s", deal.Client.String())
|
log.Warnf("online storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String())
|
||||||
return false, "miner is not accepting storage deals", nil
|
return false, "miner is not considering online storage deals", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = offlineOk()
|
||||||
|
if err != nil {
|
||||||
|
return false, "miner error", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if deal.Ref != nil && deal.Ref.TransferType == storagemarket.TTManual && !b {
|
||||||
|
log.Warnf("offline storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String())
|
||||||
|
return false, "miner is not accepting offline storage deals", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
blocklist, err := blocklistFunc()
|
blocklist, err := blocklistFunc()
|
||||||
@ -357,7 +367,7 @@ func StorageProvider(minerAddress dtypes.MinerAddress, ffiConfig *ffiwrapper.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore
|
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore
|
||||||
func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.SectorManager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore, isAcceptingFunc dtypes.AcceptingRetrievalDealsConfigFunc) (retrievalmarket.RetrievalProvider, error) {
|
func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.SectorManager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore, onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) (retrievalmarket.RetrievalProvider, error) {
|
||||||
adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full)
|
adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full)
|
||||||
|
|
||||||
maddr, err := minerAddrFromDS(ds)
|
maddr, err := minerAddrFromDS(ds)
|
||||||
@ -368,14 +378,23 @@ func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.S
|
|||||||
netwk := rmnet.NewFromLibp2pHost(h)
|
netwk := rmnet.NewFromLibp2pHost(h)
|
||||||
|
|
||||||
opt := retrievalimpl.DealDeciderOpt(func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) {
|
opt := retrievalimpl.DealDeciderOpt(func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) {
|
||||||
b, err := isAcceptingFunc()
|
b, err := onlineOk()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, "miner error", err
|
return false, "miner error", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !b {
|
if !b {
|
||||||
log.Warn("retrieval deal acceptance disabled; rejecting retrieval deal proposal from client")
|
log.Warn("online retrieval deal consideration disabled; rejecting retrieval deal proposal from client")
|
||||||
return false, "miner is not accepting retrieval deals", nil
|
return false, "miner is not accepting online retrieval deals", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = offlineOk()
|
||||||
|
if err != nil {
|
||||||
|
return false, "miner error", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !b {
|
||||||
|
log.Info("offline retrieval has not been implemented yet")
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, "", nil
|
return true, "", nil
|
||||||
@ -416,37 +435,37 @@ func StorageAuth(ctx helpers.MetricsCtx, ca lapi.Common) (sectorstorage.StorageA
|
|||||||
return sectorstorage.StorageAuth(headers), nil
|
return sectorstorage.StorageAuth(headers), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAcceptingRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.AcceptingRetrievalDealsConfigFunc, error) {
|
func NewConsiderOnlineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineStorageDealsConfigFunc, error) {
|
||||||
return func() (out bool, err error) {
|
return func() (out bool, err error) {
|
||||||
err = readCfg(r, func(cfg *config.StorageMiner) {
|
err = readCfg(r, func(cfg *config.StorageMiner) {
|
||||||
out = cfg.Dealmaking.AcceptingRetrievalDeals
|
out = cfg.Dealmaking.ConsiderOnlineStorageDeals
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSetAcceptingRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.SetAcceptingRetrievalDealsConfigFunc, error) {
|
func NewSetConsideringOnlineStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderOnlineStorageDealsConfigFunc, error) {
|
||||||
return func(b bool) (err error) {
|
return func(b bool) (err error) {
|
||||||
err = mutateCfg(r, func(cfg *config.StorageMiner) {
|
err = mutateCfg(r, func(cfg *config.StorageMiner) {
|
||||||
cfg.Dealmaking.AcceptingRetrievalDeals = b
|
cfg.Dealmaking.ConsiderOnlineStorageDeals = b
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAcceptingStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.AcceptingStorageDealsConfigFunc, error) {
|
func NewConsiderOnlineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineRetrievalDealsConfigFunc, error) {
|
||||||
return func() (out bool, err error) {
|
return func() (out bool, err error) {
|
||||||
err = readCfg(r, func(cfg *config.StorageMiner) {
|
err = readCfg(r, func(cfg *config.StorageMiner) {
|
||||||
out = cfg.Dealmaking.AcceptingStorageDeals
|
out = cfg.Dealmaking.ConsiderOnlineRetrievalDeals
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSetAcceptingStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.SetAcceptingStorageDealsConfigFunc, error) {
|
func NewSetConsiderOnlineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.SetConsiderOnlineRetrievalDealsConfigFunc, error) {
|
||||||
return func(b bool) (err error) {
|
return func(b bool) (err error) {
|
||||||
err = mutateCfg(r, func(cfg *config.StorageMiner) {
|
err = mutateCfg(r, func(cfg *config.StorageMiner) {
|
||||||
cfg.Dealmaking.AcceptingStorageDeals = b
|
cfg.Dealmaking.ConsiderOnlineRetrievalDeals = b
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}, nil
|
}, nil
|
||||||
@ -470,6 +489,42 @@ func NewSetStorageDealPieceCidBlocklistConfigFunc(r repo.LockedRepo) (dtypes.Set
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewConsiderOfflineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOfflineStorageDealsConfigFunc, error) {
|
||||||
|
return func() (out bool, err error) {
|
||||||
|
err = readCfg(r, func(cfg *config.StorageMiner) {
|
||||||
|
out = cfg.Dealmaking.ConsiderOfflineStorageDeals
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSetConsideringOfflineStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderOfflineStorageDealsConfigFunc, error) {
|
||||||
|
return func(b bool) (err error) {
|
||||||
|
err = mutateCfg(r, func(cfg *config.StorageMiner) {
|
||||||
|
cfg.Dealmaking.ConsiderOfflineStorageDeals = b
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewConsiderOfflineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOfflineRetrievalDealsConfigFunc, error) {
|
||||||
|
return func() (out bool, err error) {
|
||||||
|
err = readCfg(r, func(cfg *config.StorageMiner) {
|
||||||
|
out = cfg.Dealmaking.ConsiderOfflineRetrievalDeals
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSetConsiderOfflineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.SetConsiderOfflineRetrievalDealsConfigFunc, error) {
|
||||||
|
return func(b bool) (err error) {
|
||||||
|
err = mutateCfg(r, func(cfg *config.StorageMiner) {
|
||||||
|
cfg.Dealmaking.ConsiderOfflineRetrievalDeals = b
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func readCfg(r repo.LockedRepo, accessor func(*config.StorageMiner)) error {
|
func readCfg(r repo.LockedRepo, accessor func(*config.StorageMiner)) error {
|
||||||
raw, err := r.Config()
|
raw, err := r.Config()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -8,5 +8,5 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func RandomBeacon() (beacon.RandomBeacon, error) {
|
func RandomBeacon() (beacon.RandomBeacon, error) {
|
||||||
return beacon.NewMockBeacon(build.BlockDelay * time.Second), nil
|
return beacon.NewMockBeacon(time.Duration(build.BlockDelaySecs) * time.Second), nil
|
||||||
}
|
}
|
||||||
|
@ -349,7 +349,7 @@ func mockSbBuilder(t *testing.T, nFull int, storage []test.StorageMiner) ([]test
|
|||||||
templ := &genesis.Template{
|
templ := &genesis.Template{
|
||||||
Accounts: genaccs,
|
Accounts: genaccs,
|
||||||
Miners: genms,
|
Miners: genms,
|
||||||
Timestamp: uint64(time.Now().Unix() - (build.BlockDelay * 20000)),
|
Timestamp: uint64(time.Now().Unix()) - (build.BlockDelaySecs * 20000),
|
||||||
}
|
}
|
||||||
|
|
||||||
// END PRESEAL SECTION
|
// END PRESEAL SECTION
|
||||||
|
@ -114,7 +114,7 @@ sync_complete:
|
|||||||
// If we get within 20 blocks of the current exected block height we
|
// If we get within 20 blocks of the current exected block height we
|
||||||
// consider sync complete. Block propagation is not always great but we still
|
// consider sync complete. Block propagation is not always great but we still
|
||||||
// want to be recording stats as soon as we can
|
// want to be recording stats as soon as we can
|
||||||
if timestampDelta < build.BlockDelay*20 {
|
if timestampDelta < int64(build.BlockDelaySecs)*20 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user