Merge branch 'master' into nonsense/integrate-testplans-to-lotus

This commit is contained in:
Anton Evangelatov 2020-11-23 20:20:44 +01:00
commit fd640864cb
50 changed files with 784 additions and 596 deletions

View File

@ -5,9 +5,9 @@ all: build
unexport GOFLAGS
GOVERSION:=$(shell go version | cut -d' ' -f 3 | awk -F. '{printf "%d%03d", $$2, $$3}')
ifeq ($(shell expr $(GOVERSION) \< 15005), 1)
$(warning Your Golang version is go 1.$(shell expr $(GOVERSION) / 1000).$(shell expr $(GOVERSION) % 1000))
GOVERSION:=$(shell go version | cut -d' ' -f 3 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
ifeq ($(shell expr $(GOVERSION) \< 1015005), 1)
$(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000))
$(error Update Golang to version to at least 1.15.5)
endif

View File

@ -10,7 +10,7 @@
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.14.7-blue.svg" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.15.5-blue.svg" /></a>
<br>
</p>

View File

@ -46,6 +46,11 @@ type Common interface {
// usage and current rate per protocol
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error)
// ConnectionGater API
NetBlockAdd(ctx context.Context, acl NetBlockList) error
NetBlockRemove(ctx context.Context, acl NetBlockList) error
NetBlockList(ctx context.Context) (NetBlockList, error)
// MethodGroup: Common
// ID returns peerID of libp2p node backing this API

View File

@ -60,6 +60,9 @@ type CommonStruct struct {
NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"`
NetBlockAdd func(ctx context.Context, acl api.NetBlockList) error `perm:"admin"`
NetBlockRemove func(ctx context.Context, acl api.NetBlockList) error `perm:"admin"`
NetBlockList func(ctx context.Context) (api.NetBlockList, error) `perm:"read"`
ID func(context.Context) (peer.ID, error) `perm:"read"`
Version func(context.Context) (api.Version, error) `perm:"read"`
@ -495,6 +498,18 @@ func (c *CommonStruct) NetBandwidthStatsByProtocol(ctx context.Context) (map[pro
return c.Internal.NetBandwidthStatsByProtocol(ctx)
}
func (c *CommonStruct) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
return c.Internal.NetBlockAdd(ctx, acl)
}
func (c *CommonStruct) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
return c.Internal.NetBlockRemove(ctx, acl)
}
func (c *CommonStruct) NetBlockList(ctx context.Context) (api.NetBlockList, error) {
return c.Internal.NetBlockList(ctx)
}
func (c *CommonStruct) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
return c.Internal.NetAgentVersion(ctx, p)
}

View File

@ -107,3 +107,9 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta
}
return channel
}
type NetBlockList struct {
Peers []peer.ID
IPAddrs []string
IPSubnets []string
}

View File

@ -28,7 +28,7 @@ const (
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
AddSupportedProofTypes(types...)

View File

@ -38,20 +38,26 @@ func newPeerTracker(lc fx.Lifecycle, h host.Host, pmgr *peermgr.PeerMgr) *bsPeer
pmgr: pmgr,
}
sub, err := h.EventBus().Subscribe(new(peermgr.NewFilPeer))
evtSub, err := h.EventBus().Subscribe(new(peermgr.FilPeerEvt))
if err != nil {
panic(err)
}
go func() {
for newPeer := range sub.Out() {
bsPt.addPeer(newPeer.(peermgr.NewFilPeer).Id)
for evt := range evtSub.Out() {
pEvt := evt.(peermgr.FilPeerEvt)
switch pEvt.Type {
case peermgr.AddFilPeerEvt:
bsPt.addPeer(pEvt.ID)
case peermgr.RemoveFilPeerEvt:
bsPt.removePeer(pEvt.ID)
}
}
}()
lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
return sub.Close()
return evtSub.Close()
},
})

View File

@ -240,10 +240,13 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted
// check if RBF passes
minPrice := ComputeMinRBF(exms.Message.GasPremium)
if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 {
log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium,
log.Debugw("add with RBF", "oldpremium", exms.Message.GasPremium,
"newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce)
} else {
log.Info("add with duplicate nonce")
log.Debugf("add with duplicate nonce. message from %s with nonce %d already in mpool,"+
" increase GasPremium to %s from %s to trigger replace by fee: %s",
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,
ErrRBFTooLowPremium)
return false, xerrors.Errorf("message from %s with nonce %d already in mpool,"+
" increase GasPremium to %s from %s to trigger replace by fee: %w",
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,

View File

@ -6,30 +6,13 @@ import (
"encoding/binary"
"math"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/specs-actors/actors/migration/nv3"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/state"
@ -38,6 +21,17 @@ import (
"github.com/filecoin-project/lotus/chain/vm"
bstore "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/bufbstore"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/migration/nv3"
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
)
// UpgradeFunc is a migration function run at every upgrade.
@ -83,8 +77,9 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Expensive: true,
Migration: UpgradeActorsV2,
}, {
Height: build.UpgradeTapeHeight,
Network: network.Version5,
Height: build.UpgradeTapeHeight,
Network: network.Version5,
Migration: nil,
}, {
Height: build.UpgradeLiftoffHeight,
Network: network.Version5,

View File

@ -726,6 +726,12 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error {
log.Debug("tried to add block to tipset tracker that was already there")
return nil
}
h, err := cs.GetBlock(oc)
if err == nil && h != nil {
if h.Miner == b.Miner {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid())
}
}
}
cs.tipsets[b.Height] = append(tss, b.Cid())
@ -797,7 +803,7 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error)
return types.NewTipSet(all)
}
inclMiners := map[address.Address]bool{b.Miner: true}
inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
for _, bhc := range tsets {
if bhc == b.Cid() {
continue
@ -808,14 +814,14 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error)
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
}
if inclMiners[h.Miner] {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache", h.Miner, h.Height)
if cid, found := inclMiners[h.Miner]; found {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
continue
}
if types.CidArrsEqual(h.Parents, b.Parents) {
all = append(all, h)
inclMiners[h.Miner] = true
inclMiners[h.Miner] = bhc
}
}

View File

@ -6,9 +6,18 @@ import (
"fmt"
"time"
"golang.org/x/xerrors"
address "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl/client"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
lru "github.com/hashicorp/golang-lru"
blocks "github.com/ipfs/go-block-format"
bserv "github.com/ipfs/go-blockservice"
@ -21,19 +30,7 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl/client"
"golang.org/x/xerrors"
)
var log = logging.Logger("sub")
@ -342,16 +339,14 @@ func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Me
if size := msg.Size(); size > 1<<20-1<<15 {
log.Errorf("ignoring oversize block (%dB)", size)
ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, "oversize_block"))
stats.Record(ctx, metrics.BlockValidationFailure.M(1))
recordFailure(ctx, metrics.BlockValidationFailure, "oversize_block")
return pubsub.ValidationIgnore
}
blk, what, err := bv.decodeAndCheckBlock(msg)
if err != nil {
log.Errorf("got invalid local block: %s", err)
ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, what))
stats.Record(ctx, metrics.BlockValidationFailure.M(1))
recordFailure(ctx, metrics.BlockValidationFailure, what)
return pubsub.ValidationIgnore
}

View File

@ -36,9 +36,9 @@ type ActorPredicate func(vmr.Runtime, rtt.VMActor) error
func ActorsVersionPredicate(ver actors.Version) ActorPredicate {
return func(rt vmr.Runtime, v rtt.VMActor) error {
nver := actors.VersionForNetwork(rt.NetworkVersion())
if nver != ver {
return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d", v.Code(), ver, nver, rt.CurrEpoch())
aver := actors.VersionForNetwork(rt.NetworkVersion())
if aver != ver {
return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d and nver %d", v.Code(), ver, aver, rt.CurrEpoch(), rt.NetworkVersion())
}
return nil
}

View File

@ -18,6 +18,7 @@ import (
"github.com/filecoin-project/go-address"
atypes "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/addrutil"
)
@ -34,6 +35,7 @@ var netCmd = &cli.Command{
netScores,
NetReachability,
NetBandwidthCmd,
NetBlockCmd,
},
}
@ -375,3 +377,202 @@ var NetBandwidthCmd = &cli.Command{
},
}
var NetBlockCmd = &cli.Command{
Name: "block",
Usage: "Manage network connection gating rules",
Subcommands: []*cli.Command{
NetBlockAddCmd,
NetBlockRemoveCmd,
NetBlockListCmd,
},
}
var NetBlockAddCmd = &cli.Command{
Name: "add",
Usage: "Add connection gating rules",
Subcommands: []*cli.Command{
NetBlockAddPeer,
NetBlockAddIP,
NetBlockAddSubnet,
},
}
var NetBlockAddPeer = &cli.Command{
Name: "peer",
Usage: "Block a peer",
ArgsUsage: "<Peer> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
var peers []peer.ID
for _, s := range cctx.Args().Slice() {
p, err := peer.Decode(s)
if err != nil {
return err
}
peers = append(peers, p)
}
return api.NetBlockAdd(ctx, atypes.NetBlockList{Peers: peers})
},
}
var NetBlockAddIP = &cli.Command{
Name: "ip",
Usage: "Block an IP address",
ArgsUsage: "<IP> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
return api.NetBlockAdd(ctx, atypes.NetBlockList{IPAddrs: cctx.Args().Slice()})
},
}
var NetBlockAddSubnet = &cli.Command{
Name: "subnet",
Usage: "Block an IP subnet",
ArgsUsage: "<CIDR> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
return api.NetBlockAdd(ctx, atypes.NetBlockList{IPSubnets: cctx.Args().Slice()})
},
}
var NetBlockRemoveCmd = &cli.Command{
Name: "remove",
Usage: "Remove connection gating rules",
Subcommands: []*cli.Command{
NetBlockRemovePeer,
NetBlockRemoveIP,
NetBlockRemoveSubnet,
},
}
var NetBlockRemovePeer = &cli.Command{
Name: "peer",
Usage: "Unblock a peer",
ArgsUsage: "<Peer> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
var peers []peer.ID
for _, s := range cctx.Args().Slice() {
p, err := peer.Decode(s)
if err != nil {
return err
}
peers = append(peers, p)
}
return api.NetBlockRemove(ctx, atypes.NetBlockList{Peers: peers})
},
}
var NetBlockRemoveIP = &cli.Command{
Name: "ip",
Usage: "Unblock an IP address",
ArgsUsage: "<IP> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
return api.NetBlockRemove(ctx, atypes.NetBlockList{IPAddrs: cctx.Args().Slice()})
},
}
var NetBlockRemoveSubnet = &cli.Command{
Name: "subnet",
Usage: "Unblock an IP subnet",
ArgsUsage: "<CIDR> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
return api.NetBlockRemove(ctx, atypes.NetBlockList{IPSubnets: cctx.Args().Slice()})
},
}
var NetBlockListCmd = &cli.Command{
Name: "list",
Usage: "list connection gating rules",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
acl, err := api.NetBlockList(ctx)
if err != nil {
return err
}
if len(acl.Peers) != 0 {
sort.Slice(acl.Peers, func(i, j int) bool {
return strings.Compare(string(acl.Peers[i]), string(acl.Peers[j])) > 0
})
fmt.Println("Blocked Peers:")
for _, p := range acl.Peers {
fmt.Printf("\t%s\n", p)
}
}
if len(acl.IPAddrs) != 0 {
sort.Slice(acl.IPAddrs, func(i, j int) bool {
return strings.Compare(acl.IPAddrs[i], acl.IPAddrs[j]) < 0
})
fmt.Println("Blocked IPs:")
for _, a := range acl.IPAddrs {
fmt.Printf("\t%s\n", a)
}
}
if len(acl.IPSubnets) != 0 {
sort.Slice(acl.IPSubnets, func(i, j int) bool {
return strings.Compare(acl.IPSubnets[i], acl.IPSubnets[j]) < 0
})
fmt.Println("Blocked Subnets:")
for _, n := range acl.IPSubnets {
fmt.Printf("\t%s\n", n)
}
}
return nil
},
}

View File

@ -72,6 +72,7 @@ var stateCmd = &cli.Command{
stateMinerInfo,
stateMarketCmd,
stateExecTraceCmd,
stateNtwkVersionCmd,
},
}
@ -1758,6 +1759,9 @@ var stateSectorCmd = &cli.Command{
if err != nil {
return err
}
if si == nil {
return xerrors.Errorf("sector %d for miner %s not found", sid, maddr)
}
fmt.Println("SectorNumber: ", si.SectorNumber)
fmt.Println("SealProof: ", si.SealProof)
@ -1831,3 +1835,35 @@ var stateMarketBalanceCmd = &cli.Command{
return nil
},
}
var stateNtwkVersionCmd = &cli.Command{
Name: "network-version",
Usage: "Returns the network version",
Action: func(cctx *cli.Context) error {
if cctx.Args().Present() {
return ShowHelp(cctx, fmt.Errorf("doesn't expect any arguments"))
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
ts, err := LoadTipSet(ctx, cctx, api)
if err != nil {
return err
}
nv, err := api.StateNetworkVersion(ctx, ts.Key())
if err != nil {
return err
}
fmt.Printf("Network Version: %d\n", nv)
return nil
},
}

View File

@ -60,7 +60,7 @@ type TipSetExec struct {
var importBenchCmd = &cli.Command{
Name: "import",
Usage: "benchmark chain import and validation",
Usage: "Benchmark chain import and validation",
Subcommands: []*cli.Command{
importAnalyzeCmd,
},

View File

@ -40,8 +40,10 @@ import (
var log = logging.Logger("lotus-bench")
type BenchResults struct {
SectorSize abi.SectorSize
SectorSize abi.SectorSize
SectorNumber int
SealingSum SealingResult
SealingResults []SealingResult
PostGenerateCandidates time.Duration
@ -56,6 +58,26 @@ type BenchResults struct {
VerifyWindowPostHot time.Duration
}
func (bo *BenchResults) SumSealingTime() error {
if len(bo.SealingResults) <= 0 {
return xerrors.Errorf("BenchResults SealingResults len <= 0")
}
if len(bo.SealingResults) != bo.SectorNumber {
return xerrors.Errorf("BenchResults SealingResults len(%d) != bo.SectorNumber(%d)", len(bo.SealingResults), bo.SectorNumber)
}
for _, sealing := range bo.SealingResults {
bo.SealingSum.AddPiece += sealing.AddPiece
bo.SealingSum.PreCommit1 += sealing.PreCommit1
bo.SealingSum.PreCommit2 += sealing.PreCommit2
bo.SealingSum.Commit1 += sealing.Commit1
bo.SealingSum.Commit2 += sealing.Commit2
bo.SealingSum.Verify += sealing.Verify
bo.SealingSum.Unseal += sealing.Unseal
}
return nil
}
type SealingResult struct {
AddPiece time.Duration
PreCommit1 time.Duration
@ -95,12 +117,13 @@ func main() {
}
var sealBenchCmd = &cli.Command{
Name: "sealing",
Name: "sealing",
Usage: "Benchmark seal and winning post and window post",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "storage-dir",
Value: "~/.lotus-bench",
Usage: "Path to the storage directory that will store sectors long term",
Usage: "path to the storage directory that will store sectors long term",
},
&cli.StringFlag{
Name: "sector-size",
@ -132,16 +155,22 @@ var sealBenchCmd = &cli.Command{
Name: "skip-unseal",
Usage: "skip the unseal portion of the benchmark",
},
&cli.StringFlag{
Name: "ticket-preimage",
Usage: "ticket random",
},
&cli.StringFlag{
Name: "save-commit2-input",
Usage: "Save commit2 input to a file",
Usage: "save commit2 input to a file",
},
&cli.IntFlag{
Name: "num-sectors",
Usage: "select number of sectors to seal",
Value: 1,
},
&cli.IntFlag{
Name: "parallel",
Usage: "num run in parallel",
Value: 1,
},
},
@ -213,7 +242,8 @@ var sealBenchCmd = &cli.Command{
sectorSize := abi.SectorSize(sectorSizeInt)
// Only fetch parameters if actually needed
if !c.Bool("skip-commit2") {
skipc2 := c.Bool("skip-commit2")
if !skipc2 {
if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil {
return xerrors.Errorf("getting params: %w", err)
}
@ -228,6 +258,8 @@ var sealBenchCmd = &cli.Command{
return err
}
sectorNumber := c.Int("num-sectors")
var sealTimings []SealingResult
var sealedSectors []saproof2.SectorInfo
@ -238,18 +270,11 @@ var sealBenchCmd = &cli.Command{
PreCommit2: 1,
Commit: 1,
}
sealTimings, sealedSectors, err = runSeals(sb, sbfs, c.Int("num-sectors"), parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), c.Bool("skip-commit2"), c.Bool("skip-unseal"))
sealTimings, sealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal"))
if err != nil {
return xerrors.Errorf("failed to run seals: %w", err)
}
}
beforePost := time.Now()
var challenge [32]byte
rand.Read(challenge[:])
if robench != "" {
} else {
// TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not)
// TODO: this assumes we only ever benchmark a preseal
@ -282,10 +307,19 @@ var sealBenchCmd = &cli.Command{
bo := BenchResults{
SectorSize: sectorSize,
SectorNumber: sectorNumber,
SealingResults: sealTimings,
}
if err := bo.SumSealingTime(); err != nil {
return err
}
if !c.Bool("skip-commit2") {
var challenge [32]byte
rand.Read(challenge[:])
beforePost := time.Now()
if !skipc2 {
log.Info("generating winning post candidates")
wipt, err := spt(sectorSize).RegisteredWinningPoStProof()
if err != nil {
@ -420,21 +454,21 @@ var sealBenchCmd = &cli.Command{
fmt.Println(string(data))
} else {
fmt.Printf("----\nresults (v28) (%d)\n", sectorSize)
fmt.Printf("----\nresults (v28) SectorSize:(%d), SectorNumber:(%d)\n", sectorSize, sectorNumber)
if robench == "" {
fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingResults[0].AddPiece, bps(bo.SectorSize, bo.SealingResults[0].AddPiece)) // TODO: average across multiple sealings
fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingResults[0].PreCommit1, bps(bo.SectorSize, bo.SealingResults[0].PreCommit1))
fmt.Printf("seal: preCommit phase 2: %s (%s)\n", bo.SealingResults[0].PreCommit2, bps(bo.SectorSize, bo.SealingResults[0].PreCommit2))
fmt.Printf("seal: commit phase 1: %s (%s)\n", bo.SealingResults[0].Commit1, bps(bo.SectorSize, bo.SealingResults[0].Commit1))
fmt.Printf("seal: commit phase 2: %s (%s)\n", bo.SealingResults[0].Commit2, bps(bo.SectorSize, bo.SealingResults[0].Commit2))
fmt.Printf("seal: verify: %s\n", bo.SealingResults[0].Verify)
fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingSum.AddPiece, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.AddPiece))
fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingSum.PreCommit1, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.PreCommit1))
fmt.Printf("seal: preCommit phase 2: %s (%s)\n", bo.SealingSum.PreCommit2, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.PreCommit2))
fmt.Printf("seal: commit phase 1: %s (%s)\n", bo.SealingSum.Commit1, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Commit1))
fmt.Printf("seal: commit phase 2: %s (%s)\n", bo.SealingSum.Commit2, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Commit2))
fmt.Printf("seal: verify: %s\n", bo.SealingSum.Verify)
if !c.Bool("skip-unseal") {
fmt.Printf("unseal: %s (%s)\n", bo.SealingResults[0].Unseal, bps(bo.SectorSize, bo.SealingResults[0].Unseal))
fmt.Printf("unseal: %s (%s)\n", bo.SealingSum.Unseal, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Unseal))
}
fmt.Println("")
}
if !c.Bool("skip-commit2") {
fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize*abi.SectorSize(len(bo.SealingResults)), bo.PostGenerateCandidates))
if !skipc2 {
fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize, len(bo.SealingResults), bo.PostGenerateCandidates))
fmt.Printf("compute winning post proof (cold): %s\n", bo.PostWinningProofCold)
fmt.Printf("compute winning post proof (hot): %s\n", bo.PostWinningProofHot)
fmt.Printf("verify winning post proof (cold): %s\n", bo.VerifyWinningPostCold)
@ -467,8 +501,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
if numSectors%par.PreCommit1 != 0 {
return nil, nil, fmt.Errorf("parallelism factor must cleanly divide numSectors")
}
for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ {
for i := abi.SectorNumber(0); i < abi.SectorNumber(numSectors); i++ {
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: mid,
@ -489,7 +522,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
pieces = append(pieces, pi)
sealTimings[i-1].AddPiece = time.Since(start)
sealTimings[i].AddPiece = time.Since(start)
}
sectorsPerWorker := numSectors / par.PreCommit1
@ -498,10 +531,9 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
for wid := 0; wid < par.PreCommit1; wid++ {
go func(worker int) {
sealerr := func() error {
start := 1 + (worker * sectorsPerWorker)
start := worker * sectorsPerWorker
end := start + sectorsPerWorker
for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ {
ix := int(i - 1)
sid := storage.SectorRef{
ID: abi.SectorID{
Miner: mid,
@ -516,8 +548,8 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
ticket := abi.SealRandomness(trand[:])
log.Infof("[%d] Running replication(1)...", i)
pieces := []abi.PieceInfo{pieces[ix]}
pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, pieces)
piece := []abi.PieceInfo{pieces[i]}
pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, piece)
if err != nil {
return xerrors.Errorf("commit: %w", err)
}
@ -535,7 +567,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
precommit2 := time.Now()
<-preCommit2Sema
sealedSectors[ix] = saproof2.SectorInfo{
sealedSectors[i] = saproof2.SectorInfo{
SealProof: sid.ProofType,
SectorNumber: i,
SealedCID: cids.Sealed,
@ -549,7 +581,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
commitSema <- struct{}{}
commitStart := time.Now()
log.Infof("[%d] Generating PoRep for sector (1)", i)
c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, pieces, cids)
c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, piece, cids)
if err != nil {
return err
}
@ -630,12 +662,12 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
}
unseal := time.Now()
sealTimings[ix].PreCommit1 = precommit1.Sub(start)
sealTimings[ix].PreCommit2 = precommit2.Sub(pc2Start)
sealTimings[ix].Commit1 = sealcommit1.Sub(commitStart)
sealTimings[ix].Commit2 = sealcommit2.Sub(sealcommit1)
sealTimings[ix].Verify = verifySeal.Sub(sealcommit2)
sealTimings[ix].Unseal = unseal.Sub(verifySeal)
sealTimings[i].PreCommit1 = precommit1.Sub(start)
sealTimings[i].PreCommit2 = precommit2.Sub(pc2Start)
sealTimings[i].Commit1 = sealcommit1.Sub(commitStart)
sealTimings[i].Commit2 = sealcommit2.Sub(sealcommit1)
sealTimings[i].Verify = verifySeal.Sub(sealcommit2)
sealTimings[i].Unseal = unseal.Sub(verifySeal)
}
return nil
}()
@ -658,8 +690,9 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
}
var proveCmd = &cli.Command{
Name: "prove",
Usage: "Benchmark a proof computation",
Name: "prove",
Usage: "Benchmark a proof computation",
ArgsUsage: "[input.json]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "no-gpu",
@ -711,8 +744,6 @@ var proveCmd = &cli.Command{
return err
}
start := time.Now()
ref := storage.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(mid),
@ -721,6 +752,9 @@ var proveCmd = &cli.Command{
ProofType: spt(abi.SectorSize(c2in.SectorSize)),
}
fmt.Printf("----\nstart proof computation\n")
start := time.Now()
proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out)
if err != nil {
return err
@ -733,13 +767,14 @@ var proveCmd = &cli.Command{
fmt.Printf("----\nresults (v28) (%d)\n", c2in.SectorSize)
dur := sealCommit2.Sub(start)
fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), dur))
fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), 1, dur))
return nil
},
}
func bps(data abi.SectorSize, d time.Duration) string {
bdata := new(big.Int).SetUint64(uint64(data))
func bps(sectorSize abi.SectorSize, sectorNum int, d time.Duration) string {
bdata := new(big.Int).SetUint64(uint64(sectorSize))
bdata = bdata.Mul(bdata, big.NewInt(int64(sectorNum)))
bdata = bdata.Mul(bdata, big.NewInt(time.Second.Nanoseconds()))
bps := bdata.Div(bdata, big.NewInt(d.Nanoseconds()))
return types.SizeStr(types.BigInt{Int: bps}) + "/s"

View File

@ -7,6 +7,8 @@ import (
"io/ioutil"
"os"
"github.com/filecoin-project/go-state-types/network"
"github.com/docker/go-units"
logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir"
@ -127,7 +129,7 @@ var preSealCmd = &cli.Command{
}
sectorSize := abi.SectorSize(sectorSizeInt)
spt, err := miner.SealProofTypeFromSectorSize(sectorSize, build.NewestNetworkVersion)
spt, err := miner.SealProofTypeFromSectorSize(sectorSize, network.Version0)
if err != nil {
return err
}

View File

@ -19,9 +19,9 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
"github.com/filecoin-project/specs-storage/storage"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"

View File

@ -53,6 +53,9 @@
* [NetBandwidthStats](#NetBandwidthStats)
* [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer)
* [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol)
* [NetBlockAdd](#NetBlockAdd)
* [NetBlockList](#NetBlockList)
* [NetBlockRemove](#NetBlockRemove)
* [NetConnect](#NetConnect)
* [NetConnectedness](#NetConnectedness)
* [NetDisconnect](#NetDisconnect)
@ -798,6 +801,58 @@ Response:
}
```
### NetBlockAdd
Perms: admin
Inputs:
```json
[
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
]
```
Response: `{}`
### NetBlockList
Perms: read
Inputs: `null`
Response:
```json
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
```
### NetBlockRemove
Perms: admin
Inputs:
```json
[
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
]
```
Response: `{}`
### NetConnect

View File

@ -110,6 +110,9 @@
* [NetBandwidthStats](#NetBandwidthStats)
* [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer)
* [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol)
* [NetBlockAdd](#NetBlockAdd)
* [NetBlockList](#NetBlockList)
* [NetBlockRemove](#NetBlockRemove)
* [NetConnect](#NetConnect)
* [NetConnectedness](#NetConnectedness)
* [NetDisconnect](#NetDisconnect)
@ -2639,6 +2642,58 @@ Response:
}
```
### NetBlockAdd
Perms: admin
Inputs:
```json
[
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
]
```
Response: `{}`
### NetBlockList
Perms: read
Inputs: `null`
Response:
```json
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
```
### NetBlockRemove
Perms: admin
Inputs:
```json
[
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
]
```
Response: `{}`
### NetConnect

View File

@ -40,7 +40,7 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
}
if !locked {
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector, "sealed")
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector)
bad = append(bad, sector.ID)
return nil
}

View File

@ -1,53 +0,0 @@
package ffiwrapper
import (
"io"
"os"
"sync"
"golang.org/x/xerrors"
)
func ToReadableFile(r io.Reader, n int64) (*os.File, func() error, error) {
f, ok := r.(*os.File)
if ok {
return f, func() error { return nil }, nil
}
var w *os.File
f, w, err := os.Pipe()
if err != nil {
return nil, nil, err
}
var wait sync.Mutex
var werr error
wait.Lock()
go func() {
defer wait.Unlock()
var copied int64
copied, werr = io.CopyN(w, r, n)
if werr != nil {
log.Warnf("toReadableFile: copy error: %+v", werr)
}
err := w.Close()
if werr == nil && err != nil {
werr = err
log.Warnf("toReadableFile: close error: %+v", err)
return
}
if copied != n {
log.Warnf("copied different amount than expected: %d != %d", copied, n)
werr = xerrors.Errorf("copied different amount than expected: %d != %d", copied, n)
}
}()
return f, func() error {
wait.Lock()
return werr
}, nil
}

View File

@ -20,9 +20,10 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
"github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
)
var _ Storage = &Sealer{}
@ -175,7 +176,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi
}
func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) {
prf, werr, err := ToReadableFile(bytes.NewReader(in), int64(len(in)))
prf, werr, err := commpffi.ToReadableFile(bytes.NewReader(in), int64(len(in)))
if err != nil {
return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err)
}
@ -610,20 +611,6 @@ func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error {
return xerrors.Errorf("not supported at this layer") // happens in localworker
}
func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) {
f, werr, err := ToReadableFile(piece, int64(pieceSize))
if err != nil {
return cid.Undef, err
}
pieceCID, err := ffi.GeneratePieceCIDFromFile(proofType, f, pieceSize)
if err != nil {
return cid.Undef, err
}
return pieceCID, werr()
}
func GetRequiredPadding(oldLength abi.PaddedPieceSize, newPieceLength abi.PaddedPieceSize) ([]abi.PaddedPieceSize, abi.PaddedPieceSize) {
padPieces := make([]abi.PaddedPieceSize, 0)

View File

@ -15,6 +15,8 @@ import (
"testing"
"time"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/ipfs/go-cid"
@ -465,7 +467,7 @@ func BenchmarkWriteWithAlignment(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
rf, w, _ := ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt))
rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt))
tf, _ := ioutil.TempFile("/tmp/", "scrb-")
b.StartTimer()
@ -524,7 +526,7 @@ func TestGenerateUnsealedCID(t *testing.T) {
ups := int(abi.PaddedPieceSize(2048).Unpadded())
commP := func(b []byte) cid.Cid {
pf, werr, err := ToReadableFile(bytes.NewReader(b), int64(len(b)))
pf, werr, err := commpffi.ToReadableFile(bytes.NewReader(b), int64(len(b)))
require.NoError(t, err)
c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b)))

View File

@ -7,11 +7,12 @@ import (
"os"
"testing"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
ffi "github.com/filecoin-project/filecoin-ffi"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
"github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require"
@ -29,7 +30,7 @@ func TestWriteTwoPcs(t *testing.T) {
buf := bytes.Repeat([]byte{0xab * byte(i)}, int(paddedSize.Unpadded()))
rawBytes = append(rawBytes, buf...)
rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf)))
rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(buf), int64(len(buf)))
_, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil)
if err != nil {

View File

@ -9,15 +9,15 @@ import (
"testing"
ffi "github.com/filecoin-project/filecoin-ffi"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
"github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
)
func padFFI(buf []byte) []byte {
rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf)))
rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(buf), int64(len(buf)))
tf, _ := ioutil.TempFile("/tmp/", "scrb-")
_, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil)

View File

@ -58,6 +58,10 @@ type SectorManager interface {
type WorkerID uuid.UUID // worker session UUID
var ClosedWorkerID = uuid.UUID{}
func (w WorkerID) String() string {
return uuid.UUID(w).String()
}
type Manager struct {
ls stores.LocalStorage
storage *stores.Remote

View File

@ -11,6 +11,7 @@ import (
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
@ -76,7 +77,7 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef,
var b bytes.Buffer
tr := io.TeeReader(r, &b)
c, err := ffiwrapper.GeneratePieceCIDFromFile(sectorID.ProofType, tr, size)
c, err := ffiwrapper2.GeneratePieceCIDFromFile(sectorID.ProofType, tr, size)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err)
}

View File

@ -49,25 +49,25 @@ func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, call
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
if minNeedMem > res.MemPhysical {
log.Debugf("sched: not scheduling on worker %d for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib)
log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib)
return false
}
maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
if maxNeedMem > res.MemSwap+res.MemPhysical {
log.Debugf("sched: not scheduling on worker %d for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
return false
}
if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs {
log.Debugf("sched: not scheduling on worker %d for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs)
log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs)
return false
}
if len(res.GPUs) > 0 && needRes.CanGPU {
if a.gpuUsed {
log.Debugf("sched: not scheduling on worker %d for %s; GPU in use", wid, caller)
log.Debugf("sched: not scheduling on worker %s for %s; GPU in use", wid, caller)
return false
}
}

View File

@ -486,6 +486,6 @@ func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) {
}
sh.openWindows = newWindows
log.Debugf("worker %d dropped", wid)
log.Debugf("worker %s dropped", wid)
}
}

View File

@ -1,56 +0,0 @@
package zerocomm
import (
"math/bits"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
)
const Levels = 37
const Skip = 2 // can't generate for 32, 64b
var PieceComms = [Levels - Skip][32]byte{
{0x37, 0x31, 0xbb, 0x99, 0xac, 0x68, 0x9f, 0x66, 0xee, 0xf5, 0x97, 0x3e, 0x4a, 0x94, 0xda, 0x18, 0x8f, 0x4d, 0xdc, 0xae, 0x58, 0x7, 0x24, 0xfc, 0x6f, 0x3f, 0xd6, 0xd, 0xfd, 0x48, 0x83, 0x33},
{0x64, 0x2a, 0x60, 0x7e, 0xf8, 0x86, 0xb0, 0x4, 0xbf, 0x2c, 0x19, 0x78, 0x46, 0x3a, 0xe1, 0xd4, 0x69, 0x3a, 0xc0, 0xf4, 0x10, 0xeb, 0x2d, 0x1b, 0x7a, 0x47, 0xfe, 0x20, 0x5e, 0x5e, 0x75, 0xf},
{0x57, 0xa2, 0x38, 0x1a, 0x28, 0x65, 0x2b, 0xf4, 0x7f, 0x6b, 0xef, 0x7a, 0xca, 0x67, 0x9b, 0xe4, 0xae, 0xde, 0x58, 0x71, 0xab, 0x5c, 0xf3, 0xeb, 0x2c, 0x8, 0x11, 0x44, 0x88, 0xcb, 0x85, 0x26},
{0x1f, 0x7a, 0xc9, 0x59, 0x55, 0x10, 0xe0, 0x9e, 0xa4, 0x1c, 0x46, 0xb, 0x17, 0x64, 0x30, 0xbb, 0x32, 0x2c, 0xd6, 0xfb, 0x41, 0x2e, 0xc5, 0x7c, 0xb1, 0x7d, 0x98, 0x9a, 0x43, 0x10, 0x37, 0x2f},
{0xfc, 0x7e, 0x92, 0x82, 0x96, 0xe5, 0x16, 0xfa, 0xad, 0xe9, 0x86, 0xb2, 0x8f, 0x92, 0xd4, 0x4a, 0x4f, 0x24, 0xb9, 0x35, 0x48, 0x52, 0x23, 0x37, 0x6a, 0x79, 0x90, 0x27, 0xbc, 0x18, 0xf8, 0x33},
{0x8, 0xc4, 0x7b, 0x38, 0xee, 0x13, 0xbc, 0x43, 0xf4, 0x1b, 0x91, 0x5c, 0xe, 0xed, 0x99, 0x11, 0xa2, 0x60, 0x86, 0xb3, 0xed, 0x62, 0x40, 0x1b, 0xf9, 0xd5, 0x8b, 0x8d, 0x19, 0xdf, 0xf6, 0x24},
{0xb2, 0xe4, 0x7b, 0xfb, 0x11, 0xfa, 0xcd, 0x94, 0x1f, 0x62, 0xaf, 0x5c, 0x75, 0xf, 0x3e, 0xa5, 0xcc, 0x4d, 0xf5, 0x17, 0xd5, 0xc4, 0xf1, 0x6d, 0xb2, 0xb4, 0xd7, 0x7b, 0xae, 0xc1, 0xa3, 0x2f},
{0xf9, 0x22, 0x61, 0x60, 0xc8, 0xf9, 0x27, 0xbf, 0xdc, 0xc4, 0x18, 0xcd, 0xf2, 0x3, 0x49, 0x31, 0x46, 0x0, 0x8e, 0xae, 0xfb, 0x7d, 0x2, 0x19, 0x4d, 0x5e, 0x54, 0x81, 0x89, 0x0, 0x51, 0x8},
{0x2c, 0x1a, 0x96, 0x4b, 0xb9, 0xb, 0x59, 0xeb, 0xfe, 0xf, 0x6d, 0xa2, 0x9a, 0xd6, 0x5a, 0xe3, 0xe4, 0x17, 0x72, 0x4a, 0x8f, 0x7c, 0x11, 0x74, 0x5a, 0x40, 0xca, 0xc1, 0xe5, 0xe7, 0x40, 0x11},
{0xfe, 0xe3, 0x78, 0xce, 0xf1, 0x64, 0x4, 0xb1, 0x99, 0xed, 0xe0, 0xb1, 0x3e, 0x11, 0xb6, 0x24, 0xff, 0x9d, 0x78, 0x4f, 0xbb, 0xed, 0x87, 0x8d, 0x83, 0x29, 0x7e, 0x79, 0x5e, 0x2, 0x4f, 0x2},
{0x8e, 0x9e, 0x24, 0x3, 0xfa, 0x88, 0x4c, 0xf6, 0x23, 0x7f, 0x60, 0xdf, 0x25, 0xf8, 0x3e, 0xe4, 0xd, 0xca, 0x9e, 0xd8, 0x79, 0xeb, 0x6f, 0x63, 0x52, 0xd1, 0x50, 0x84, 0xf5, 0xad, 0xd, 0x3f},
{0x75, 0x2d, 0x96, 0x93, 0xfa, 0x16, 0x75, 0x24, 0x39, 0x54, 0x76, 0xe3, 0x17, 0xa9, 0x85, 0x80, 0xf0, 0x9, 0x47, 0xaf, 0xb7, 0xa3, 0x5, 0x40, 0xd6, 0x25, 0xa9, 0x29, 0x1c, 0xc1, 0x2a, 0x7},
{0x70, 0x22, 0xf6, 0xf, 0x7e, 0xf6, 0xad, 0xfa, 0x17, 0x11, 0x7a, 0x52, 0x61, 0x9e, 0x30, 0xce, 0xa8, 0x2c, 0x68, 0x7, 0x5a, 0xdf, 0x1c, 0x66, 0x77, 0x86, 0xec, 0x50, 0x6e, 0xef, 0x2d, 0x19},
{0xd9, 0x98, 0x87, 0xb9, 0x73, 0x57, 0x3a, 0x96, 0xe1, 0x13, 0x93, 0x64, 0x52, 0x36, 0xc1, 0x7b, 0x1f, 0x4c, 0x70, 0x34, 0xd7, 0x23, 0xc7, 0xa9, 0x9f, 0x70, 0x9b, 0xb4, 0xda, 0x61, 0x16, 0x2b},
{0xd0, 0xb5, 0x30, 0xdb, 0xb0, 0xb4, 0xf2, 0x5c, 0x5d, 0x2f, 0x2a, 0x28, 0xdf, 0xee, 0x80, 0x8b, 0x53, 0x41, 0x2a, 0x2, 0x93, 0x1f, 0x18, 0xc4, 0x99, 0xf5, 0xa2, 0x54, 0x8, 0x6b, 0x13, 0x26},
{0x84, 0xc0, 0x42, 0x1b, 0xa0, 0x68, 0x5a, 0x1, 0xbf, 0x79, 0x5a, 0x23, 0x44, 0x6, 0x4f, 0xe4, 0x24, 0xbd, 0x52, 0xa9, 0xd2, 0x43, 0x77, 0xb3, 0x94, 0xff, 0x4c, 0x4b, 0x45, 0x68, 0xe8, 0x11},
{0x65, 0xf2, 0x9e, 0x5d, 0x98, 0xd2, 0x46, 0xc3, 0x8b, 0x38, 0x8c, 0xfc, 0x6, 0xdb, 0x1f, 0x6b, 0x2, 0x13, 0x3, 0xc5, 0xa2, 0x89, 0x0, 0xb, 0xdc, 0xe8, 0x32, 0xa9, 0xc3, 0xec, 0x42, 0x1c},
{0xa2, 0x24, 0x75, 0x8, 0x28, 0x58, 0x50, 0x96, 0x5b, 0x7e, 0x33, 0x4b, 0x31, 0x27, 0xb0, 0xc0, 0x42, 0xb1, 0xd0, 0x46, 0xdc, 0x54, 0x40, 0x21, 0x37, 0x62, 0x7c, 0xd8, 0x79, 0x9c, 0xe1, 0x3a},
{0xda, 0xfd, 0xab, 0x6d, 0xa9, 0x36, 0x44, 0x53, 0xc2, 0x6d, 0x33, 0x72, 0x6b, 0x9f, 0xef, 0xe3, 0x43, 0xbe, 0x8f, 0x81, 0x64, 0x9e, 0xc0, 0x9, 0xaa, 0xd3, 0xfa, 0xff, 0x50, 0x61, 0x75, 0x8},
{0xd9, 0x41, 0xd5, 0xe0, 0xd6, 0x31, 0x4a, 0x99, 0x5c, 0x33, 0xff, 0xbd, 0x4f, 0xbe, 0x69, 0x11, 0x8d, 0x73, 0xd4, 0xe5, 0xfd, 0x2c, 0xd3, 0x1f, 0xf, 0x7c, 0x86, 0xeb, 0xdd, 0x14, 0xe7, 0x6},
{0x51, 0x4c, 0x43, 0x5c, 0x3d, 0x4, 0xd3, 0x49, 0xa5, 0x36, 0x5f, 0xbd, 0x59, 0xff, 0xc7, 0x13, 0x62, 0x91, 0x11, 0x78, 0x59, 0x91, 0xc1, 0xa3, 0xc5, 0x3a, 0xf2, 0x20, 0x79, 0x74, 0x1a, 0x2f},
{0xad, 0x6, 0x85, 0x39, 0x69, 0xd3, 0x7d, 0x34, 0xff, 0x8, 0xe0, 0x9f, 0x56, 0x93, 0xa, 0x4a, 0xd1, 0x9a, 0x89, 0xde, 0xf6, 0xc, 0xbf, 0xee, 0x7e, 0x1d, 0x33, 0x81, 0xc1, 0xe7, 0x1c, 0x37},
{0x39, 0x56, 0xe, 0x7b, 0x13, 0xa9, 0x3b, 0x7, 0xa2, 0x43, 0xfd, 0x27, 0x20, 0xff, 0xa7, 0xcb, 0x3e, 0x1d, 0x2e, 0x50, 0x5a, 0xb3, 0x62, 0x9e, 0x79, 0xf4, 0x63, 0x13, 0x51, 0x2c, 0xda, 0x6},
{0xcc, 0xc3, 0xc0, 0x12, 0xf5, 0xb0, 0x5e, 0x81, 0x1a, 0x2b, 0xbf, 0xdd, 0xf, 0x68, 0x33, 0xb8, 0x42, 0x75, 0xb4, 0x7b, 0xf2, 0x29, 0xc0, 0x5, 0x2a, 0x82, 0x48, 0x4f, 0x3c, 0x1a, 0x5b, 0x3d},
{0x7d, 0xf2, 0x9b, 0x69, 0x77, 0x31, 0x99, 0xe8, 0xf2, 0xb4, 0xb, 0x77, 0x91, 0x9d, 0x4, 0x85, 0x9, 0xee, 0xd7, 0x68, 0xe2, 0xc7, 0x29, 0x7b, 0x1f, 0x14, 0x37, 0x3, 0x4f, 0xc3, 0xc6, 0x2c},
{0x66, 0xce, 0x5, 0xa3, 0x66, 0x75, 0x52, 0xcf, 0x45, 0xc0, 0x2b, 0xcc, 0x4e, 0x83, 0x92, 0x91, 0x9b, 0xde, 0xac, 0x35, 0xde, 0x2f, 0xf5, 0x62, 0x71, 0x84, 0x8e, 0x9f, 0x7b, 0x67, 0x51, 0x7},
{0xd8, 0x61, 0x2, 0x18, 0x42, 0x5a, 0xb5, 0xe9, 0x5b, 0x1c, 0xa6, 0x23, 0x9d, 0x29, 0xa2, 0xe4, 0x20, 0xd7, 0x6, 0xa9, 0x6f, 0x37, 0x3e, 0x2f, 0x9c, 0x9a, 0x91, 0xd7, 0x59, 0xd1, 0x9b, 0x1},
{0x6d, 0x36, 0x4b, 0x1e, 0xf8, 0x46, 0x44, 0x1a, 0x5a, 0x4a, 0x68, 0x86, 0x23, 0x14, 0xac, 0xc0, 0xa4, 0x6f, 0x1, 0x67, 0x17, 0xe5, 0x34, 0x43, 0xe8, 0x39, 0xee, 0xdf, 0x83, 0xc2, 0x85, 0x3c},
{0x7, 0x7e, 0x5f, 0xde, 0x35, 0xc5, 0xa, 0x93, 0x3, 0xa5, 0x50, 0x9, 0xe3, 0x49, 0x8a, 0x4e, 0xbe, 0xdf, 0xf3, 0x9c, 0x42, 0xb7, 0x10, 0xb7, 0x30, 0xd8, 0xec, 0x7a, 0xc7, 0xaf, 0xa6, 0x3e},
{0xe6, 0x40, 0x5, 0xa6, 0xbf, 0xe3, 0x77, 0x79, 0x53, 0xb8, 0xad, 0x6e, 0xf9, 0x3f, 0xf, 0xca, 0x10, 0x49, 0xb2, 0x4, 0x16, 0x54, 0xf2, 0xa4, 0x11, 0xf7, 0x70, 0x27, 0x99, 0xce, 0xce, 0x2},
{0x25, 0x9d, 0x3d, 0x6b, 0x1f, 0x4d, 0x87, 0x6d, 0x11, 0x85, 0xe1, 0x12, 0x3a, 0xf6, 0xf5, 0x50, 0x1a, 0xf0, 0xf6, 0x7c, 0xf1, 0x5b, 0x52, 0x16, 0x25, 0x5b, 0x7b, 0x17, 0x8d, 0x12, 0x5, 0x1d},
{0x3f, 0x9a, 0x4d, 0x41, 0x1d, 0xa4, 0xef, 0x1b, 0x36, 0xf3, 0x5f, 0xf0, 0xa1, 0x95, 0xae, 0x39, 0x2a, 0xb2, 0x3f, 0xee, 0x79, 0x67, 0xb7, 0xc4, 0x1b, 0x3, 0xd1, 0x61, 0x3f, 0xc2, 0x92, 0x39},
{0xfe, 0x4e, 0xf3, 0x28, 0xc6, 0x1a, 0xa3, 0x9c, 0xfd, 0xb2, 0x48, 0x4e, 0xaa, 0x32, 0xa1, 0x51, 0xb1, 0xfe, 0x3d, 0xfd, 0x1f, 0x96, 0xdd, 0x8c, 0x97, 0x11, 0xfd, 0x86, 0xd6, 0xc5, 0x81, 0x13},
{0xf5, 0x5d, 0x68, 0x90, 0xe, 0x2d, 0x83, 0x81, 0xec, 0xcb, 0x81, 0x64, 0xcb, 0x99, 0x76, 0xf2, 0x4b, 0x2d, 0xe0, 0xdd, 0x61, 0xa3, 0x1b, 0x97, 0xce, 0x6e, 0xb2, 0x38, 0x50, 0xd5, 0xe8, 0x19},
{0xaa, 0xaa, 0x8c, 0x4c, 0xb4, 0xa, 0xac, 0xee, 0x1e, 0x2, 0xdc, 0x65, 0x42, 0x4b, 0x2a, 0x6c, 0x8e, 0x99, 0xf8, 0x3, 0xb7, 0x2f, 0x79, 0x29, 0xc4, 0x10, 0x1d, 0x7f, 0xae, 0x6b, 0xff, 0x32},
}
func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid {
level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32
commP, _ := commcid.PieceCommitmentV1ToCID(PieceComms[level][:])
return commP
}

View File

@ -1,115 +0,0 @@
package zerocomm_test
import (
"bytes"
"fmt"
"io"
"testing"
commcid "github.com/filecoin-project/go-fil-commcid"
abi "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
)
func TestComms(t *testing.T) {
t.Skip("don't have enough ram") // no, but seriously, currently this needs like 3tb of /tmp
var expPieceComms [zerocomm.Levels - zerocomm.Skip]cid.Cid
{
l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127)
if err != nil {
t.Fatal(err)
}
expPieceComms[0] = l2
}
for i := 1; i < zerocomm.Levels-2; i++ {
var err error
sz := abi.UnpaddedPieceSize(127 << uint(i))
fmt.Println(i, sz)
r := io.LimitReader(&NullReader{}, int64(sz))
expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz)
if err != nil {
t.Fatal(err)
}
}
for i, comm := range expPieceComms {
c, err := commcid.CIDToPieceCommitmentV1(comm)
if err != nil {
t.Fatal(err)
}
if string(c) != string(zerocomm.PieceComms[i][:]) {
t.Errorf("zero commitment %d didn't match", i)
}
}
for _, comm := range expPieceComms { // Could do codegen, but this is good enough
fmt.Printf("%#v,\n", comm)
}
}
func TestCommsSmall(t *testing.T) {
var expPieceComms [8]cid.Cid
lvls := len(expPieceComms) + zerocomm.Skip
{
l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127)
if err != nil {
t.Fatal(err)
}
expPieceComms[0] = l2
}
for i := 1; i < lvls-2; i++ {
var err error
sz := abi.UnpaddedPieceSize(127 << uint(i))
fmt.Println(i, sz)
r := io.LimitReader(&NullReader{}, int64(sz))
expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz)
if err != nil {
t.Fatal(err)
}
}
for i, comm := range expPieceComms {
c, err := commcid.CIDToPieceCommitmentV1(comm)
if err != nil {
t.Fatal(err)
}
if string(c) != string(zerocomm.PieceComms[i][:]) {
t.Errorf("zero commitment %d didn't match", i)
}
}
for _, comm := range expPieceComms { // Could do codegen, but this is good enough
fmt.Printf("%#v,\n", comm)
}
}
func TestForSise(t *testing.T) {
exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 1016)), 1016)
if err != nil {
return
}
actual := zerocomm.ZeroPieceCommitment(1016)
if !exp.Equals(actual) {
t.Errorf("zero commitment didn't match")
}
}
type NullReader struct{}
func (NullReader) Read(out []byte) (int, error) {
for i := range out {
out[i] = 0
}
return len(out), nil
}

View File

@ -12,9 +12,9 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
)
// TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting

View File

@ -13,7 +13,7 @@ import (
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-statemachine"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
"github.com/filecoin-project/go-commp-utils/zerocomm"
)
const minRetryTime = 1 * time.Minute

9
go.mod
View File

@ -1,6 +1,6 @@
module github.com/filecoin-project/lotus
go 1.14
go 1.15
require (
contrib.go.opencensus.io/exporter/jaeger v0.1.0
@ -27,10 +27,11 @@ require (
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect
github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
github.com/filecoin-project/go-data-transfer v1.2.0
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a
github.com/filecoin-project/go-fil-markets v1.0.6
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49
github.com/filecoin-project/go-multistore v0.0.3
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
@ -87,7 +88,7 @@ require (
github.com/ipfs/go-path v0.0.7
github.com/ipfs/go-unixfs v0.2.4
github.com/ipfs/interface-go-ipfs-core v0.2.3
github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d
github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018
github.com/kelseyhightower/envconfig v1.4.0
github.com/lib/pq v1.7.0

8
go.sum
View File

@ -245,6 +245,8 @@ github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816 h1:
github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8=
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg=
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434 h1:0kHszkYP3hgApcjl5x4rpwONhN9+j7XDobf6at5XfHs=
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
github.com/filecoin-project/go-data-transfer v1.0.1 h1:5sYKDbstyDsdJpVP4UGUW6+BgCNfgnH8hQgf0E3ZAno=
@ -255,8 +257,12 @@ github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg=
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335 h1:DF8eu0WdEBnSVdu71+jfT4YMk6fO7AIJk2ZiWd3l15c=
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
github.com/filecoin-project/go-fil-markets v1.0.6 h1:JTZBMKJ19YpK/vQfE0rHvjELy83NeGor5d4dBnoIlK0=
github.com/filecoin-project/go-fil-markets v1.0.6/go.mod h1:iVYc+VrHIP15F5COkHNM6ndTwKSJ7qPrHSKCfFUnve4=
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
@ -671,6 +677,8 @@ github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuu
github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs=
github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 h1:6phjU3kXvCEWOZpu+Ob0w6DzgPFZmDLgLPxJhD8RxEY=
github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw=
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d h1:iphSzTuPqyDgH7WUVZsdqUnQNzYgIblsVr1zhVNA33U=
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ=
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8=
github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA=
github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM=

View File

@ -1,113 +0,0 @@
package commp
import (
"bytes"
"math/bits"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-padreader"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
)
const commPBufPad = abi.PaddedPieceSize(8 << 20)
const CommPBuf = abi.UnpaddedPieceSize(commPBufPad - (commPBufPad / 128)) // can't use .Unpadded() for const
type Writer struct {
len int64
buf [CommPBuf]byte
leaves []cid.Cid
}
func (w *Writer) Write(p []byte) (int, error) {
n := len(p)
for len(p) > 0 {
buffered := int(w.len % int64(len(w.buf)))
toBuffer := len(w.buf) - buffered
if toBuffer > len(p) {
toBuffer = len(p)
}
copied := copy(w.buf[buffered:], p[:toBuffer])
p = p[copied:]
w.len += int64(copied)
if copied > 0 && w.len%int64(len(w.buf)) == 0 {
leaf, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, bytes.NewReader(w.buf[:]), CommPBuf)
if err != nil {
return 0, err
}
w.leaves = append(w.leaves, leaf)
}
}
return n, nil
}
func (w *Writer) Sum() (api.DataCIDSize, error) {
// process last non-zero leaf if exists
lastLen := w.len % int64(len(w.buf))
rawLen := w.len
// process remaining bit of data
if lastLen != 0 {
if len(w.leaves) != 0 {
copy(w.buf[lastLen:], make([]byte, int(int64(CommPBuf)-lastLen)))
lastLen = int64(CommPBuf)
}
r, sz := padreader.New(bytes.NewReader(w.buf[:lastLen]), uint64(lastLen))
p, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, r, sz)
if err != nil {
return api.DataCIDSize{}, err
}
if sz < CommPBuf { // special case for pieces smaller than 16MiB
return api.DataCIDSize{
PayloadSize: w.len,
PieceSize: sz.Padded(),
PieceCID: p,
}, nil
}
w.leaves = append(w.leaves, p)
}
// pad with zero pieces to power-of-two size
fillerLeaves := (1 << (bits.Len(uint(len(w.leaves) - 1)))) - len(w.leaves)
for i := 0; i < fillerLeaves; i++ {
w.leaves = append(w.leaves, zerocomm.ZeroPieceCommitment(CommPBuf))
}
if len(w.leaves) == 1 {
return api.DataCIDSize{
PayloadSize: rawLen,
PieceSize: abi.PaddedPieceSize(len(w.leaves)) * commPBufPad,
PieceCID: w.leaves[0],
}, nil
}
pieces := make([]abi.PieceInfo, len(w.leaves))
for i, leaf := range w.leaves {
pieces[i] = abi.PieceInfo{
Size: commPBufPad,
PieceCID: leaf,
}
}
p, err := ffi.GenerateUnsealedCID(abi.RegisteredSealProof_StackedDrg32GiBV1, pieces)
if err != nil {
return api.DataCIDSize{}, xerrors.Errorf("generating unsealed CID: %w", err)
}
return api.DataCIDSize{
PayloadSize: rawLen,
PieceSize: abi.PaddedPieceSize(len(w.leaves)) * commPBufPad,
PieceCID: p,
}, nil
}

View File

@ -1,88 +0,0 @@
package commp
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"testing"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-padreader"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
)
func TestWriterZero(t *testing.T) {
for i, s := range []struct {
writes []int
expect abi.PaddedPieceSize
}{
{writes: []int{200}, expect: 256},
{writes: []int{200, 200}, expect: 512},
{writes: []int{int(CommPBuf)}, expect: commPBufPad},
{writes: []int{int(CommPBuf) * 2}, expect: 2 * commPBufPad},
{writes: []int{int(CommPBuf), int(CommPBuf), int(CommPBuf)}, expect: 4 * commPBufPad},
{writes: []int{int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf)}, expect: 16 * commPBufPad},
{writes: []int{200, int(CommPBuf)}, expect: 2 * commPBufPad},
} {
s := s
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
w := &Writer{}
var rawSum int64
for _, write := range s.writes {
rawSum += int64(write)
_, err := w.Write(make([]byte, write))
require.NoError(t, err)
}
p, err := w.Sum()
require.NoError(t, err)
require.Equal(t, rawSum, p.PayloadSize)
require.Equal(t, s.expect, p.PieceSize)
require.Equal(t, zerocomm.ZeroPieceCommitment(s.expect.Unpadded()).String(), p.PieceCID.String())
})
}
}
func TestWriterData(t *testing.T) {
dataLen := float64(CommPBuf) * 6.78
data, _ := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(dataLen)))
pr, sz := padreader.New(bytes.NewReader(data), uint64(dataLen))
exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, pr, sz)
require.NoError(t, err)
w := &Writer{}
_, err = io.Copy(w, bytes.NewReader(data))
require.NoError(t, err)
res, err := w.Sum()
require.NoError(t, err)
require.Equal(t, exp.String(), res.PieceCID.String())
}
func BenchmarkWriterZero(b *testing.B) {
buf := make([]byte, int(CommPBuf)*b.N)
b.SetBytes(int64(CommPBuf))
b.ResetTimer()
w := &Writer{}
_, err := w.Write(buf)
require.NoError(b, err)
o, err := w.Sum()
b.StopTimer()
require.NoError(b, err)
require.Equal(b, zerocomm.ZeroPieceCommitment(o.PieceSize.Unpadded()).String(), o.PieceCID.String())
require.Equal(b, int64(CommPBuf)*int64(b.N), o.PayloadSize)
}

View File

@ -53,16 +53,24 @@ type PeerMgr struct {
h host.Host
dht *dht.IpfsDHT
notifee *net.NotifyBundle
filPeerEmitter event.Emitter
notifee *net.NotifyBundle
emitter event.Emitter
done chan struct{}
}
type NewFilPeer struct {
Id peer.ID
type FilPeerEvt struct {
Type FilPeerEvtType
ID peer.ID
}
type FilPeerEvtType int
const (
AddFilPeerEvt FilPeerEvtType = iota
RemoveFilPeerEvt
)
func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes.BootstrapPeers) (*PeerMgr, error) {
pm := &PeerMgr{
h: h,
@ -77,16 +85,16 @@ func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes
done: make(chan struct{}),
}
emitter, err := h.EventBus().Emitter(new(NewFilPeer))
emitter, err := h.EventBus().Emitter(new(FilPeerEvt))
if err != nil {
return nil, xerrors.Errorf("creating NewFilPeer emitter: %w", err)
return nil, xerrors.Errorf("creating FilPeerEvt emitter: %w", err)
}
pm.filPeerEmitter = emitter
pm.emitter = emitter
lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
return multierr.Combine(
pm.filPeerEmitter.Close(),
pm.emitter.Close(),
pm.Stop(ctx),
)
},
@ -104,7 +112,7 @@ func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes
}
func (pmgr *PeerMgr) AddFilecoinPeer(p peer.ID) {
_ = pmgr.filPeerEmitter.Emit(NewFilPeer{Id: p}) //nolint:errcheck
_ = pmgr.emitter.Emit(FilPeerEvt{Type: AddFilPeerEvt, ID: p}) //nolint:errcheck
pmgr.peersLk.Lock()
defer pmgr.peersLk.Unlock()
pmgr.peers[p] = time.Duration(0)
@ -127,10 +135,19 @@ func (pmgr *PeerMgr) SetPeerLatency(p peer.ID, latency time.Duration) {
}
func (pmgr *PeerMgr) Disconnect(p peer.ID) {
disconnected := false
if pmgr.h.Network().Connectedness(p) == net.NotConnected {
pmgr.peersLk.Lock()
defer pmgr.peersLk.Unlock()
delete(pmgr.peers, p)
_, disconnected = pmgr.peers[p]
if disconnected {
delete(pmgr.peers, p)
}
pmgr.peersLk.Unlock()
}
if disconnected {
_ = pmgr.emitter.Emit(FilPeerEvt{Type: RemoveFilPeerEvt, ID: p}) //nolint:errcheck
}
}

View File

@ -1,6 +1,7 @@
package marketevents
import (
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
@ -29,6 +30,23 @@ func RetrievalProviderLogger(event retrievalmarket.ProviderEvent, deal retrieval
log.Infow("retrieval event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message)
}
// DataTransferLogger logs events from the data transfer module
func DataTransferLogger(event datatransfer.Event, state datatransfer.ChannelState) {
log.Debugw("data transfer event",
"name", datatransfer.Events[event.Code],
"status", datatransfer.Statuses[state.Status()],
"transfer ID", state.TransferID(),
"channel ID", state.ChannelID(),
"sent", state.Sent(),
"received", state.Received(),
"queued", state.Queued(),
"received count", len(state.ReceivedCids()),
"total size", state.TotalSize(),
"remote peer", state.OtherPeer(),
"event message", event.Message,
"channel message", state.Message())
}
// ReadyLogger returns a function to log the results of module initialization
func ReadyLogger(module string) func(error) {
return func(err error) {

View File

@ -26,6 +26,7 @@ import (
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
pubsub "github.com/libp2p/go-libp2p-pubsub"
record "github.com/libp2p/go-libp2p-record"
"github.com/libp2p/go-libp2p/p2p/net/conngater"
"github.com/multiformats/go-multiaddr"
"go.uber.org/fx"
"golang.org/x/xerrors"
@ -98,6 +99,7 @@ var (
ConnectionManagerKey = special{9} // Libp2p option
AutoNATSvcKey = special{10} // Libp2p option
BandwidthReporterKey = special{11} // Libp2p option
ConnGaterKey = special{12} // libp2p option
)
type invoke int
@ -220,6 +222,9 @@ func libp2p() Option {
Override(PstoreAddSelfKeysKey, lp2p.PstoreAddSelfKeys),
Override(StartListeningKey, lp2p.StartListening(config.DefaultFullNode().Libp2p.ListenAddresses)),
Override(new(*conngater.BasicConnectionGater), lp2p.ConnGater),
Override(ConnGaterKey, lp2p.ConnGaterOption),
)
}

View File

@ -33,9 +33,10 @@ import (
"go.uber.org/fx"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-commp-utils/ffiwrapper"
"github.com/filecoin-project/go-commp-utils/writer"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/discovery"
"github.com/filecoin-project/go-fil-markets/pieceio"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
rm "github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/shared"
@ -49,7 +50,6 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/commp"
"github.com/filecoin-project/lotus/markets/utils"
"github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/node/impl/paych"
@ -642,7 +642,7 @@ func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Addre
func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) {
// Hard-code the sector type to 32GiBV1_1, because:
// - pieceio.GeneratePieceCommitment requires a RegisteredSealProof
// - ffiwrapper.GeneratePieceCIDFromFile requires a RegisteredSealProof
// - commP itself is sector-size independent, with rather low probability of that changing
// ( note how the final rust call is identical for every RegSP type )
// https://github.com/filecoin-project/rust-filecoin-proofs-api/blob/v5.0.0/src/seal.rs#L1040-L1050
@ -662,7 +662,8 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet
return nil, err
}
commP, pieceSize, err := pieceio.GeneratePieceCommitment(arbitraryProofType, rdr, uint64(stat.Size()))
pieceReader, pieceSize := padreader.New(rdr, uint64(stat.Size()))
commP, err := ffiwrapper.GeneratePieceCIDFromFile(arbitraryProofType, pieceReader, pieceSize)
if err != nil {
return nil, xerrors.Errorf("computing commP failed: %w", err)
@ -702,8 +703,8 @@ func (a *API) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, e
func (a *API) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) {
dag := merkledag.NewDAGService(blockservice.New(a.CombinedBstore, offline.Exchange(a.CombinedBstore)))
w := &commp.Writer{}
bw := bufio.NewWriterSize(w, int(commp.CommPBuf))
w := &writer.Writer{}
bw := bufio.NewWriterSize(w, int(writer.CommPBuf))
err := car.WriteCar(ctx, dag, []cid.Cid{root}, w)
if err != nil {
@ -714,7 +715,8 @@ func (a *API) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCID
return api.DataCIDSize{}, err
}
return w.Sum()
dataCIDSize, err := w.Sum()
return api.DataCIDSize(dataCIDSize), err
}
func (a *API) ClientGenCar(ctx context.Context, ref api.FileRef, outputPath string) error {

View File

@ -15,6 +15,7 @@ import (
protocol "github.com/libp2p/go-libp2p-core/protocol"
swarm "github.com/libp2p/go-libp2p-swarm"
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
"github.com/libp2p/go-libp2p/p2p/net/conngater"
ma "github.com/multiformats/go-multiaddr"
"go.uber.org/fx"
"golang.org/x/xerrors"
@ -36,6 +37,7 @@ type CommonAPI struct {
RawHost lp2p.RawHost
Host host.Host
Router lp2p.BaseIpfsRouting
ConnGater *conngater.BasicConnectionGater
Reporter metrics.Reporter
Sk *dtypes.ScoreKeeper
ShutdownChan dtypes.ShutdownChan

View File

@ -0,0 +1,136 @@
package common
import (
"context"
"net"
"golang.org/x/xerrors"
logging "github.com/ipfs/go-log/v2"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/filecoin-project/lotus/api"
)
var cLog = logging.Logger("conngater")
func (a *CommonAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
for _, p := range acl.Peers {
err := a.ConnGater.BlockPeer(p)
if err != nil {
return xerrors.Errorf("error blocking peer %s: %w", p, err)
}
for _, c := range a.Host.Network().ConnsToPeer(p) {
err = c.Close()
if err != nil {
// just log this, don't fail
cLog.Warnf("error closing connection to %s: %s", p, err)
}
}
}
for _, addr := range acl.IPAddrs {
ip := net.ParseIP(addr)
if ip == nil {
return xerrors.Errorf("error parsing IP address %s", addr)
}
err := a.ConnGater.BlockAddr(ip)
if err != nil {
return xerrors.Errorf("error blocking IP address %s: %w", addr, err)
}
for _, c := range a.Host.Network().Conns() {
remote := c.RemoteMultiaddr()
remoteIP, err := manet.ToIP(remote)
if err != nil {
continue
}
if ip.Equal(remoteIP) {
err = c.Close()
if err != nil {
// just log this, don't fail
cLog.Warnf("error closing connection to %s: %s", remoteIP, err)
}
}
}
}
for _, subnet := range acl.IPSubnets {
_, cidr, err := net.ParseCIDR(subnet)
if err != nil {
return xerrors.Errorf("error parsing subnet %s: %w", subnet, err)
}
err = a.ConnGater.BlockSubnet(cidr)
if err != nil {
return xerrors.Errorf("error blocking subunet %s: %w", subnet, err)
}
for _, c := range a.Host.Network().Conns() {
remote := c.RemoteMultiaddr()
remoteIP, err := manet.ToIP(remote)
if err != nil {
continue
}
if cidr.Contains(remoteIP) {
err = c.Close()
if err != nil {
// just log this, don't fail
cLog.Warnf("error closing connection to %s: %s", remoteIP, err)
}
}
}
}
return nil
}
func (a *CommonAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
for _, p := range acl.Peers {
err := a.ConnGater.UnblockPeer(p)
if err != nil {
return xerrors.Errorf("error unblocking peer %s: %w", p, err)
}
}
for _, addr := range acl.IPAddrs {
ip := net.ParseIP(addr)
if ip == nil {
return xerrors.Errorf("error parsing IP address %s", addr)
}
err := a.ConnGater.UnblockAddr(ip)
if err != nil {
return xerrors.Errorf("error unblocking IP address %s: %w", addr, err)
}
}
for _, subnet := range acl.IPSubnets {
_, cidr, err := net.ParseCIDR(subnet)
if err != nil {
return xerrors.Errorf("error parsing subnet %s: %w", subnet, err)
}
err = a.ConnGater.UnblockSubnet(cidr)
if err != nil {
return xerrors.Errorf("error unblocking subunet %s: %w", subnet, err)
}
}
return nil
}
func (a *CommonAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
result.Peers = a.ConnGater.ListBlockedPeers()
for _, ip := range a.ConnGater.ListBlockedAddrs() {
result.IPAddrs = append(result.IPAddrs, ip.String())
}
for _, subnet := range a.ConnGater.ListBlockedSubnets() {
result.IPSubnets = append(result.IPSubnets, subnet.String())
}
return
}

View File

@ -792,6 +792,8 @@ func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.A
pci, err := stmgr.PreCommitInfo(ctx, a.StateManager, maddr, n, ts)
if err != nil {
return miner.SectorPreCommitOnChainInfo{}, err
} else if pci == nil {
return miner.SectorPreCommitOnChainInfo{}, xerrors.Errorf("precommit info is not exists")
}
return *pci, err

View File

@ -137,6 +137,7 @@ func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Grap
dt.OnReady(marketevents.ReadyLogger("client data transfer"))
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
dt.SubscribeToEvents(marketevents.DataTransferLogger)
return dt.Start(ctx)
},
OnStop: func(ctx context.Context) error {

View File

@ -0,0 +1,17 @@
package lp2p
import (
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/p2p/net/conngater"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
func ConnGater(ds dtypes.MetadataDS) (*conngater.BasicConnectionGater, error) {
return conngater.NewBasicConnectionGater(ds)
}
func ConnGaterOption(cg *conngater.BasicConnectionGater) (opts Libp2pOpts, err error) {
opts.Opts = append(opts.Opts, libp2p.ConnectionGater(cg))
return
}

View File

@ -423,15 +423,8 @@ func NewStorageAsk(ctx helpers.MetricsCtx, fapi lapi.FullNode, ds dtypes.Metadat
if err != nil {
return nil, err
}
storedAsk, err := storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress),
return storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress),
storagemarket.MaxPieceSize(abi.PaddedPieceSize(mi.SectorSize)))
if err != nil {
return nil, err
}
if err != nil {
return storedAsk, err
}
return storedAsk, nil
}
func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc,

View File

@ -64,7 +64,7 @@ cat > "${BASEDIR}/scripts/create_miner.bash" <<EOF
#!/usr/bin/env bash
set -x
lotus wallet import ~/.genesis-sectors/pre-seal-t01000.key
lotus wallet import --as-default ~/.genesis-sectors/pre-seal-t01000.key
lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json --nosync
EOF
@ -178,6 +178,7 @@ tmux send-keys -t $session:$wcli "source ${BASEDIR}/scripts/env.$shell" C-m
tmux send-keys -t $session:$wpledging "source ${BASEDIR}/scripts/env.$shell" C-m
tmux send-keys -t $session:$wshell "source ${BASEDIR}/scripts/env.$shell" C-m
tmux send-keys -t $session:$wdaemon "lotus-seed pre-seal --sector-size 2KiB --num-sectors 2" C-m
tmux send-keys -t $session:$wdaemon "lotus-seed genesis new devnet.json" C-m
tmux send-keys -t $session:$wdaemon "lotus-seed genesis add-miner devnet.json ~/.genesis-sectors/pre-seal-t01000.json" C-m
tmux send-keys -t $session:$wdaemon "lotus daemon --api 48010 --lotus-make-genesis=dev.gen --genesis-template=devnet.json --bootstrap=false 2>&1 | tee -a ${BASEDIR}/daemon.log" C-m
@ -186,7 +187,7 @@ export LOTUS_PATH="${BASEDIR}/.lotus"
${BASEDIR}/bin/lotus wait-api
tmux send-keys -t $session:$wminer "${BASEDIR}/scripts/create_miner.bash" C-m
tmux send-keys -t $session:$wminer "lotus-miner run --api 48020 --nosync 2>&1 | tee -a ${BASEDIR}/miner.log" C-m
tmux send-keys -t $session:$wminer "lotus-miner run --miner-api 48020 --nosync 2>&1 | tee -a ${BASEDIR}/miner.log" C-m
tmux send-keys -t $session:$wcli "${BASEDIR}/scripts/monitor.bash" C-m
tmux send-keys -t $session:$wpleding "${BASEDIR}/scripts/pledge_sectors.bash" C-m

View File

@ -4,6 +4,7 @@ import (
"fmt"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-commp-utils/zerocomm"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
@ -13,7 +14,6 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
"github.com/filecoin-project/lotus/genesis"
)