Merge branch 'master' into raulk/badger-index-cache

This commit is contained in:
Raúl Kripalani 2020-12-02 11:46:57 +00:00
commit 655aea1018
213 changed files with 20432 additions and 1586 deletions

View File

@ -264,22 +264,44 @@ jobs:
path: /tmp/test-artifacts/conformance-coverage.html path: /tmp/test-artifacts/conformance-coverage.html
build-lotus-soup: build-lotus-soup:
description: | description: |
Compile `lotus-soup` Testground test plan using the current version of Lotus. Compile `lotus-soup` Testground test plan
parameters: parameters:
<<: *test-params <<: *test-params
executor: << parameters.executor >> executor: << parameters.executor >>
steps: steps:
- install-deps - install-deps
- prepare - prepare
- run: cd extern/oni && git submodule sync
- run: cd extern/oni && git submodule update --init
- run: cd extern/filecoin-ffi && make - run: cd extern/filecoin-ffi && make
- run:
name: "replace lotus, filecoin-ffi, blst and fil-blst deps"
command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../blst
- run: - run:
name: "build lotus-soup testplan" name: "build lotus-soup testplan"
command: pushd extern/oni/lotus-soup && go build -tags=testground . command: pushd testplans/lotus-soup && go build -tags=testground .
trigger-testplans:
description: |
Trigger `lotus-soup` test cases on TaaS
parameters:
<<: *test-params
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run:
name: "download testground"
command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli
- run:
name: "prepare .env.toml"
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
- run:
name: "prepare testground home dir"
command: mkdir -p $HOME/testground/plans && mv testplans/lotus-soup testplans/graphsync $HOME/testground/plans/
- run:
name: "trigger deals baseline testplan on taas"
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
- run:
name: "trigger payment channel stress testplan on taas"
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
- run:
name: "trigger graphsync testplan on taas"
command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
build-macos: build-macos:
@ -473,6 +495,11 @@ workflows:
packages: "./conformance" packages: "./conformance"
vectors-branch: master vectors-branch: master
- build-lotus-soup - build-lotus-soup
- trigger-testplans:
filters:
branches:
only:
- master
- build-debug - build-debug
- build-all: - build-all:
requires: requires:

3
.gitmodules vendored
View File

@ -7,9 +7,6 @@
[submodule "extern/test-vectors"] [submodule "extern/test-vectors"]
path = extern/test-vectors path = extern/test-vectors
url = https://github.com/filecoin-project/test-vectors.git url = https://github.com/filecoin-project/test-vectors.git
[submodule "extern/oni"]
path = extern/oni
url = https://github.com/filecoin-project/oni.git
[submodule "extern/blst"] [submodule "extern/blst"]
path = extern/blst path = extern/blst
url = https://github.com/supranational/blst.git url = https://github.com/supranational/blst.git

View File

@ -1,5 +1,15 @@
# Lotus changelog # Lotus changelog
# 1.2.1 / 2020-11-20
This is a very small release of Lotus that fixes an issue users are experiencing when importing snapshots. There is no need to upgrade unless you experience an issue with creating a new `datastore` directory in the Lotus repo.
## Changes
- fix blockstore directory not created automatically (https://github.com/filecoin-project/lotus/pull/4922)
- WindowPoStScheduler.checkSectors() delete useless judgment (https://github.com/filecoin-project/lotus/pull/4918)
# 1.2.0 / 2020-11-18 # 1.2.0 / 2020-11-18
This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have update to this release (or later). This release also bumps the required version of Go to 1.15. This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have update to this release (or later). This release also bumps the required version of Go to 1.15.

View File

@ -5,9 +5,9 @@ all: build
unexport GOFLAGS unexport GOFLAGS
GOVERSION:=$(shell go version | cut -d' ' -f 3 | awk -F. '{printf "%d%03d", $$2, $$3}') GOVERSION:=$(shell go version | cut -d' ' -f 3 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
ifeq ($(shell expr $(GOVERSION) \< 15005), 1) ifeq ($(shell expr $(GOVERSION) \< 1015005), 1)
$(warning Your Golang version is go 1.$(shell expr $(GOVERSION) / 1000).$(shell expr $(GOVERSION) % 1000)) $(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000))
$(error Update Golang to version to at least 1.15.5) $(error Update Golang to version to at least 1.15.5)
endif endif

View File

@ -10,7 +10,7 @@
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a> <a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a> <a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a> <a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.14.7-blue.svg" /></a> <a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.15.5-blue.svg" /></a>
<br> <br>
</p> </p>

View File

@ -46,6 +46,11 @@ type Common interface {
// usage and current rate per protocol // usage and current rate per protocol
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error)
// ConnectionGater API
NetBlockAdd(ctx context.Context, acl NetBlockList) error
NetBlockRemove(ctx context.Context, acl NetBlockList) error
NetBlockList(ctx context.Context) (NetBlockList, error)
// MethodGroup: Common // MethodGroup: Common
// ID returns peerID of libp2p node backing this API // ID returns peerID of libp2p node backing this API

View File

@ -2,6 +2,7 @@ package api
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"time" "time"
@ -785,6 +786,22 @@ type StartDealParams struct {
VerifiedDeal bool VerifiedDeal bool
} }
func (s *StartDealParams) UnmarshalJSON(raw []byte) (err error) {
type sdpAlias StartDealParams
sdp := sdpAlias{
FastRetrieval: true,
}
if err := json.Unmarshal(raw, &sdp); err != nil {
return err
}
*s = StartDealParams(sdp)
return nil
}
type IpldObject struct { type IpldObject struct {
Cid cid.Cid Cid cid.Cid
Obj interface{} Obj interface{}

View File

@ -39,6 +39,7 @@ type GatewayAPI interface {
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error)
} }

View File

@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/specs-storage/storage"
) )
// StorageMiner is a low-level interface to the Filecoin network storage miner node // StorageMiner is a low-level interface to the Filecoin network storage miner node
@ -116,6 +117,8 @@ type StorageMiner interface {
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path // the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error CreateBackup(ctx context.Context, fpath string) error
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error)
} }
type SealRes struct { type SealRes struct {

View File

@ -23,6 +23,9 @@ type WorkerAPI interface {
storiface.WorkerCalls storiface.WorkerCalls
TaskDisable(ctx context.Context, tt sealtasks.TaskType) error
TaskEnable(ctx context.Context, tt sealtasks.TaskType) error
// Storage / Other // Storage / Other
Remove(ctx context.Context, sector abi.SectorID) error Remove(ctx context.Context, sector abi.SectorID) error

View File

@ -60,6 +60,9 @@ type CommonStruct struct {
NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"` NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"` NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"` NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"`
NetBlockAdd func(ctx context.Context, acl api.NetBlockList) error `perm:"admin"`
NetBlockRemove func(ctx context.Context, acl api.NetBlockList) error `perm:"admin"`
NetBlockList func(ctx context.Context) (api.NetBlockList, error) `perm:"read"`
ID func(context.Context) (peer.ID, error) `perm:"read"` ID func(context.Context) (peer.ID, error) `perm:"read"`
Version func(context.Context) (api.Version, error) `perm:"read"` Version func(context.Context) (api.Version, error) `perm:"read"`
@ -360,6 +363,8 @@ type StorageMinerStruct struct {
PiecesGetCIDInfo func(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"` PiecesGetCIDInfo func(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"`
CreateBackup func(ctx context.Context, fpath string) error `perm:"admin"` CreateBackup func(ctx context.Context, fpath string) error `perm:"admin"`
CheckProvable func(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
} }
} }
@ -385,6 +390,9 @@ type WorkerStruct struct {
ReadPiece func(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"` ReadPiece func(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
Fetch func(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"` Fetch func(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
TaskDisable func(ctx context.Context, tt sealtasks.TaskType) error `perm:"admin"`
TaskEnable func(ctx context.Context, tt sealtasks.TaskType) error `perm:"admin"`
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"` Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
@ -424,6 +432,7 @@ type GatewayStruct struct {
StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
StateMarketStorageDeal func(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) StateMarketStorageDeal func(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error)
StateSectorGetInfo func(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateVerifiedClientStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateWaitMsg func(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) StateWaitMsg func(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error)
} }
@ -495,6 +504,18 @@ func (c *CommonStruct) NetBandwidthStatsByProtocol(ctx context.Context) (map[pro
return c.Internal.NetBandwidthStatsByProtocol(ctx) return c.Internal.NetBandwidthStatsByProtocol(ctx)
} }
func (c *CommonStruct) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
return c.Internal.NetBlockAdd(ctx, acl)
}
func (c *CommonStruct) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
return c.Internal.NetBlockRemove(ctx, acl)
}
func (c *CommonStruct) NetBlockList(ctx context.Context) (api.NetBlockList, error) {
return c.Internal.NetBlockList(ctx)
}
func (c *CommonStruct) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { func (c *CommonStruct) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
return c.Internal.NetAgentVersion(ctx, p) return c.Internal.NetAgentVersion(ctx, p)
} }
@ -1495,6 +1516,10 @@ func (c *StorageMinerStruct) CreateBackup(ctx context.Context, fpath string) err
return c.Internal.CreateBackup(ctx, fpath) return c.Internal.CreateBackup(ctx, fpath)
} }
func (c *StorageMinerStruct) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) {
return c.Internal.CheckProvable(ctx, pp, sectors, expensive)
}
// WorkerStruct // WorkerStruct
func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) { func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) {
@ -1557,6 +1582,14 @@ func (w *WorkerStruct) Fetch(ctx context.Context, id storage.SectorRef, fileType
return w.Internal.Fetch(ctx, id, fileType, ptype, am) return w.Internal.Fetch(ctx, id, fileType, ptype, am)
} }
func (w *WorkerStruct) TaskDisable(ctx context.Context, tt sealtasks.TaskType) error {
return w.Internal.TaskDisable(ctx, tt)
}
func (w *WorkerStruct) TaskEnable(ctx context.Context, tt sealtasks.TaskType) error {
return w.Internal.TaskEnable(ctx, tt)
}
func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error { func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error {
return w.Internal.Remove(ctx, sector) return w.Internal.Remove(ctx, sector)
} }
@ -1681,6 +1714,10 @@ func (g GatewayStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSet
return g.Internal.StateNetworkVersion(ctx, tsk) return g.Internal.StateNetworkVersion(ctx, tsk)
} }
func (g GatewayStruct) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) {
return g.Internal.StateSectorGetInfo(ctx, maddr, n, tsk)
}
func (g GatewayStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { func (g GatewayStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
return g.Internal.StateVerifiedClientStatus(ctx, addr, tsk) return g.Internal.StateVerifiedClientStatus(ctx, addr, tsk)
} }

View File

@ -234,6 +234,9 @@ func init() {
}, },
}) })
addExample(storiface.ErrorCode(0)) addExample(storiface.ErrorCode(0))
addExample(map[abi.SectorNumber]string{
123: "can't acquire read lock",
})
// worker specific // worker specific
addExample(storiface.AcquireMove) addExample(storiface.AcquireMove)
@ -241,6 +244,7 @@ func init() {
addExample(map[sealtasks.TaskType]struct{}{ addExample(map[sealtasks.TaskType]struct{}{
sealtasks.TTPreCommit2: {}, sealtasks.TTPreCommit2: {},
}) })
addExample(sealtasks.TTCommit2)
} }
func exampleValue(method string, t, parent reflect.Type) interface{} { func exampleValue(method string, t, parent reflect.Type) interface{} {

View File

@ -89,7 +89,7 @@ func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeH
t.Fatal(err) t.Fatal(err)
} }
MakeDeal(t, ctx, 6, client, miner, false, false) MakeDeal(t, ctx, 6, client, miner, false, false, 0)
// Validate upgrade // Validate upgrade

View File

@ -12,6 +12,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -31,7 +33,7 @@ import (
ipld "github.com/ipfs/go-ipld-format" ipld "github.com/ipfs/go-ipld-format"
) )
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool) { func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
ctx := context.Background() ctx := context.Background()
n, sn := b(t, OneFull, OneMiner) n, sn := b(t, OneFull, OneMiner)
@ -60,14 +62,14 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport
} }
}() }()
MakeDeal(t, ctx, 6, client, miner, carExport, fastRet) MakeDeal(t, ctx, 6, client, miner, carExport, fastRet, startEpoch)
atomic.AddInt64(&mine, -1) atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining") fmt.Println("shutting down mining")
<-done <-done
} }
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ctx := context.Background() ctx := context.Background()
n, sn := b(t, OneFull, OneMiner) n, sn := b(t, OneFull, OneMiner)
@ -97,15 +99,15 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
} }
}() }()
MakeDeal(t, ctx, 6, client, miner, false, false) MakeDeal(t, ctx, 6, client, miner, false, false, startEpoch)
MakeDeal(t, ctx, 7, client, miner, false, false) MakeDeal(t, ctx, 7, client, miner, false, false, startEpoch)
atomic.AddInt64(&mine, -1) atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining") fmt.Println("shutting down mining")
<-done <-done
} }
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool) { func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
res, data, err := CreateClientFile(ctx, client, rseed) res, data, err := CreateClientFile(ctx, client, rseed)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -114,7 +116,7 @@ func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode,
fcid := res.Root fcid := res.Root
fmt.Println("FILE CID: ", fcid) fmt.Println("FILE CID: ", fcid)
deal := startDeal(t, ctx, miner, client, fcid, fastRet) deal := startDeal(t, ctx, miner, client, fcid, fastRet, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second) time.Sleep(time.Second)
@ -149,7 +151,7 @@ func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api
return res, data, nil return res, data, nil
} }
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ctx := context.Background() ctx := context.Background()
n, sn := b(t, OneFull, OneMiner) n, sn := b(t, OneFull, OneMiner)
@ -189,7 +191,7 @@ func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Durati
fmt.Println("FILE CID: ", fcid) fmt.Println("FILE CID: ", fcid)
deal := startDeal(t, ctx, miner, client, fcid, true) deal := startDeal(t, ctx, miner, client, fcid, true, startEpoch)
waitDealPublished(t, ctx, miner, deal) waitDealPublished(t, ctx, miner, deal)
fmt.Println("deal published, retrieving") fmt.Println("deal published, retrieving")
@ -203,7 +205,7 @@ func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Durati
<-done <-done
} }
func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) { func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx := context.Background() ctx := context.Background()
n, sn := b(t, OneFull, OneMiner) n, sn := b(t, OneFull, OneMiner)
@ -252,13 +254,13 @@ func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
t.Fatal(err) t.Fatal(err)
} }
deal1 := startDeal(t, ctx, miner, client, fcid1, true) deal1 := startDeal(t, ctx, miner, client, fcid1, true, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second) time.Sleep(time.Second)
waitDealSealed(t, ctx, miner, client, deal1, true) waitDealSealed(t, ctx, miner, client, deal1, true)
deal2 := startDeal(t, ctx, miner, client, fcid2, true) deal2 := startDeal(t, ctx, miner, client, fcid2, true, 0)
time.Sleep(time.Second) time.Sleep(time.Second)
waitDealSealed(t, ctx, miner, client, deal2, false) waitDealSealed(t, ctx, miner, client, deal2, false)
@ -278,7 +280,7 @@ func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
<-done <-done
} }
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool) *cid.Cid { func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
maddr, err := miner.ActorAddress(ctx) maddr, err := miner.ActorAddress(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -296,6 +298,7 @@ func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client
Wallet: addr, Wallet: addr,
Miner: maddr, Miner: maddr,
EpochPrice: types.NewInt(1000000), EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration), MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet, FastRetrieval: fastRet,
}) })
@ -313,7 +316,7 @@ loop:
t.Fatal(err) t.Fatal(err)
} }
switch di.State { switch di.State {
case storagemarket.StorageDealSealing: case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
if noseal { if noseal {
return return
} }
@ -353,7 +356,7 @@ func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode,
t.Fatal("deal failed") t.Fatal("deal failed")
case storagemarket.StorageDealError: case storagemarket.StorageDealError:
t.Fatal("deal errored", di.Message) t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealSealing, storagemarket.StorageDealActive: case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di) fmt.Println("COMPLETE", di)
return return
} }

View File

@ -186,7 +186,7 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
} }
}() }()
deal := startDeal(t, ctx, provider, client, fcid, false) deal := startDeal(t, ctx, provider, client, fcid, false, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second) time.Sleep(time.Second)

View File

@ -107,3 +107,9 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta
} }
return channel return channel
} }
type NetBlockList struct {
Peers []peer.ID
IPAddrs []string
IPSubnets []string
}

View File

@ -29,7 +29,7 @@ func buildType() string {
} }
// BuildVersion is the local build version, set by build system // BuildVersion is the local build version, set by build system
const BuildVersion = "1.2.0" const BuildVersion = "1.2.1"
func UserVersion() string { func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit return BuildVersion + buildType() + CurrentCommit

View File

@ -28,7 +28,7 @@ const (
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
AddSupportedProofTypes(types...) AddSupportedProofTypes(types...)

View File

@ -129,7 +129,7 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
} }
if err := e.tsc.add(cur[0].Val); err != nil { if err := e.tsc.add(cur[0].Val); err != nil {
log.Warn("tsc.add: adding current tipset failed: %w", err) log.Warnf("tsc.add: adding current tipset failed: %v", err)
} }
e.readyOnce.Do(func() { e.readyOnce.Do(func() {

View File

@ -38,20 +38,26 @@ func newPeerTracker(lc fx.Lifecycle, h host.Host, pmgr *peermgr.PeerMgr) *bsPeer
pmgr: pmgr, pmgr: pmgr,
} }
sub, err := h.EventBus().Subscribe(new(peermgr.NewFilPeer)) evtSub, err := h.EventBus().Subscribe(new(peermgr.FilPeerEvt))
if err != nil { if err != nil {
panic(err) panic(err)
} }
go func() { go func() {
for newPeer := range sub.Out() { for evt := range evtSub.Out() {
bsPt.addPeer(newPeer.(peermgr.NewFilPeer).Id) pEvt := evt.(peermgr.FilPeerEvt)
switch pEvt.Type {
case peermgr.AddFilPeerEvt:
bsPt.addPeer(pEvt.ID)
case peermgr.RemoveFilPeerEvt:
bsPt.removePeer(pEvt.ID)
}
} }
}() }()
lc.Append(fx.Hook{ lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error { OnStop: func(ctx context.Context) error {
return sub.Close() return evtSub.Close()
}, },
}) })

View File

@ -21,7 +21,7 @@ import (
bstore "github.com/filecoin-project/lotus/lib/blockstore" bstore "github.com/filecoin-project/lotus/lib/blockstore"
) )
func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor) (int64, *types.Actor, map[address.Address]address.Address, error) { func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor, remainder genesis.Actor) (int64, *types.Actor, map[address.Address]address.Address, error) {
if len(initialActors) > MaxAccounts { if len(initialActors) > MaxAccounts {
return 0, nil, nil, xerrors.New("too many initial actors") return 0, nil, nil, xerrors.New("too many initial actors")
} }
@ -90,6 +90,33 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
} }
} }
setupMsig := func(meta json.RawMessage) error {
var ainfo genesis.MultisigMeta
if err := json.Unmarshal(meta, &ainfo); err != nil {
return xerrors.Errorf("unmarshaling account meta: %w", err)
}
for _, e := range ainfo.Signers {
if _, ok := keyToId[e]; ok {
continue
}
fmt.Printf("init set %s t0%d\n", e, counter)
value := cbg.CborInt(counter)
if err := amap.Put(abi.AddrKey(e), &value); err != nil {
return err
}
counter = counter + 1
var err error
keyToId[e], err = address.NewIDAddress(uint64(value))
if err != nil {
return err
}
}
return nil
}
if rootVerifier.Type == genesis.TAccount { if rootVerifier.Type == genesis.TAccount {
var ainfo genesis.AccountMeta var ainfo genesis.AccountMeta
if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil { if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil {
@ -100,28 +127,15 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
return 0, nil, nil, err return 0, nil, nil, err
} }
} else if rootVerifier.Type == genesis.TMultisig { } else if rootVerifier.Type == genesis.TMultisig {
var ainfo genesis.MultisigMeta err := setupMsig(rootVerifier.Meta)
if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil {
return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
}
for _, e := range ainfo.Signers {
if _, ok := keyToId[e]; ok {
continue
}
fmt.Printf("init set %s t0%d\n", e, counter)
value := cbg.CborInt(counter)
if err := amap.Put(abi.AddrKey(e), &value); err != nil {
return 0, nil, nil, err
}
counter = counter + 1
var err error
keyToId[e], err = address.NewIDAddress(uint64(value))
if err != nil { if err != nil {
return 0, nil, nil, err return 0, nil, nil, xerrors.Errorf("setting up root verifier msig: %w", err)
}
} }
} err := setupMsig(remainder.Meta)
if err != nil {
return 0, nil, nil, xerrors.Errorf("setting up remainder msig: %w", err)
} }
amapaddr, err := amap.Root() amapaddr, err := amap.Root()

View File

@ -134,7 +134,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
// Create init actor // Create init actor
idStart, initact, keyIDs, err := SetupInitActor(bs, template.NetworkName, template.Accounts, template.VerifregRootKey) idStart, initact, keyIDs, err := SetupInitActor(bs, template.NetworkName, template.Accounts, template.VerifregRootKey, template.RemainderAccount)
if err != nil { if err != nil {
return nil, nil, xerrors.Errorf("setup init actor: %w", err) return nil, nil, xerrors.Errorf("setup init actor: %w", err)
} }

View File

@ -240,10 +240,13 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted
// check if RBF passes // check if RBF passes
minPrice := ComputeMinRBF(exms.Message.GasPremium) minPrice := ComputeMinRBF(exms.Message.GasPremium)
if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 { if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 {
log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium, log.Debugw("add with RBF", "oldpremium", exms.Message.GasPremium,
"newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce) "newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce)
} else { } else {
log.Info("add with duplicate nonce") log.Debugf("add with duplicate nonce. message from %s with nonce %d already in mpool,"+
" increase GasPremium to %s from %s to trigger replace by fee: %s",
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,
ErrRBFTooLowPremium)
return false, xerrors.Errorf("message from %s with nonce %d already in mpool,"+ return false, xerrors.Errorf("message from %s with nonce %d already in mpool,"+
" increase GasPremium to %s from %s to trigger replace by fee: %w", " increase GasPremium to %s from %s to trigger replace by fee: %w",
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium, m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,

View File

@ -6,30 +6,13 @@ import (
"encoding/binary" "encoding/binary"
"math" "math"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/network"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/specs-actors/actors/migration/nv3"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig" "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/state"
@ -38,6 +21,17 @@ import (
"github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/vm"
bstore "github.com/filecoin-project/lotus/lib/blockstore" bstore "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/bufbstore" "github.com/filecoin-project/lotus/lib/bufbstore"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/migration/nv3"
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
) )
// UpgradeFunc is a migration function run at every upgrade. // UpgradeFunc is a migration function run at every upgrade.
@ -85,6 +79,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
}, { }, {
Height: build.UpgradeTapeHeight, Height: build.UpgradeTapeHeight,
Network: network.Version5, Network: network.Version5,
Migration: nil,
}, { }, {
Height: build.UpgradeLiftoffHeight, Height: build.UpgradeLiftoffHeight,
Network: network.Version5, Network: network.Version5,

View File

@ -300,7 +300,11 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts
if err != nil { if err != nil {
return nil, err return nil, err
} else if !found { } else if !found {
return nil, xerrors.Errorf("deal %d not found", dealID) return nil, xerrors.Errorf(
"deal %d not found "+
"- deal may not have completed sealing before deal proposal "+
"start epoch, or deal may have been slashed",
dealID)
} }
states, err := state.States() states, err := state.States()

View File

@ -726,12 +726,32 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error {
log.Debug("tried to add block to tipset tracker that was already there") log.Debug("tried to add block to tipset tracker that was already there")
return nil return nil
} }
h, err := cs.GetBlock(oc)
if err == nil && h != nil {
if h.Miner == b.Miner {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid())
}
}
}
// This function is called 5 times per epoch on average
// It is also called with tipsets that are done with initial validation
// so they cannot be from the future.
// We are guaranteed not to use tipsets older than 900 epochs (fork limit)
// This means that we ideally want to keep only most recent 900 epochs in here
// Golang's map iteration starts at a random point in a map.
// With 5 tries per epoch, and 900 entries to keep, on average we will have
// ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
// Seems good enough to me
for height := range cs.tipsets {
if height < b.Height-build.Finality {
delete(cs.tipsets, height)
}
break
} }
cs.tipsets[b.Height] = append(tss, b.Cid()) cs.tipsets[b.Height] = append(tss, b.Cid())
// TODO: do we want to look for slashable submissions here? might as well...
return nil return nil
} }
@ -797,7 +817,7 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error)
return types.NewTipSet(all) return types.NewTipSet(all)
} }
inclMiners := map[address.Address]bool{b.Miner: true} inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
for _, bhc := range tsets { for _, bhc := range tsets {
if bhc == b.Cid() { if bhc == b.Cid() {
continue continue
@ -808,14 +828,14 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error)
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err) return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
} }
if inclMiners[h.Miner] { if cid, found := inclMiners[h.Miner]; found {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache", h.Miner, h.Height) log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
continue continue
} }
if types.CidArrsEqual(h.Parents, b.Parents) { if types.CidArrsEqual(h.Parents, b.Parents) {
all = append(all, h) all = append(all, h)
inclMiners[h.Miner] = true inclMiners[h.Miner] = bhc
} }
} }

View File

@ -6,9 +6,18 @@ import (
"fmt" "fmt"
"time" "time"
"golang.org/x/xerrors"
address "github.com/filecoin-project/go-address" address "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl/client"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
blocks "github.com/ipfs/go-block-format" blocks "github.com/ipfs/go-block-format"
bserv "github.com/ipfs/go-blockservice" bserv "github.com/ipfs/go-blockservice"
@ -21,19 +30,7 @@ import (
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats" "go.opencensus.io/stats"
"go.opencensus.io/tag" "go.opencensus.io/tag"
"golang.org/x/xerrors"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl/client"
) )
var log = logging.Logger("sub") var log = logging.Logger("sub")
@ -342,16 +339,14 @@ func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Me
if size := msg.Size(); size > 1<<20-1<<15 { if size := msg.Size(); size > 1<<20-1<<15 {
log.Errorf("ignoring oversize block (%dB)", size) log.Errorf("ignoring oversize block (%dB)", size)
ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, "oversize_block")) recordFailure(ctx, metrics.BlockValidationFailure, "oversize_block")
stats.Record(ctx, metrics.BlockValidationFailure.M(1))
return pubsub.ValidationIgnore return pubsub.ValidationIgnore
} }
blk, what, err := bv.decodeAndCheckBlock(msg) blk, what, err := bv.decodeAndCheckBlock(msg)
if err != nil { if err != nil {
log.Errorf("got invalid local block: %s", err) log.Errorf("got invalid local block: %s", err)
ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, what)) recordFailure(ctx, metrics.BlockValidationFailure, what)
stats.Record(ctx, metrics.BlockValidationFailure.M(1))
return pubsub.ValidationIgnore return pubsub.ValidationIgnore
} }

View File

@ -1331,7 +1331,7 @@ loop:
continue continue
} }
if !xerrors.Is(err, bstore.ErrNotFound) { if !xerrors.Is(err, bstore.ErrNotFound) {
log.Warn("loading local tipset: %s", err) log.Warnf("loading local tipset: %s", err)
} }
// NB: GetBlocks validates that the blocks are in-fact the ones we // NB: GetBlocks validates that the blocks are in-fact the ones we

View File

@ -23,6 +23,29 @@ func (f FIL) Unitless() string {
return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".") return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".")
} }
var unitPrefixes = []string{"a", "f", "p", "n", "μ", "m"}
func (f FIL) Short() string {
n := BigInt(f)
dn := uint64(1)
var prefix string
for _, p := range unitPrefixes {
if n.LessThan(NewInt(dn * 1000)) {
prefix = p
break
}
dn *= 1000
}
r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(dn)))
if r.Sign() == 0 {
return "0"
}
return strings.TrimRight(strings.TrimRight(r.FloatString(3), "0"), ".") + " " + prefix + "FIL"
}
func (f FIL) Format(s fmt.State, ch rune) { func (f FIL) Format(s fmt.State, ch rune) {
switch ch { switch ch {
case 's', 'v': case 's', 'v':

68
chain/types/fil_test.go Normal file
View File

@ -0,0 +1,68 @@
package types
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestFilShort(t *testing.T) {
for _, s := range []struct {
fil string
expect string
}{
{fil: "1", expect: "1 FIL"},
{fil: "1.1", expect: "1.1 FIL"},
{fil: "12", expect: "12 FIL"},
{fil: "123", expect: "123 FIL"},
{fil: "123456", expect: "123456 FIL"},
{fil: "123.23", expect: "123.23 FIL"},
{fil: "123456.234", expect: "123456.234 FIL"},
{fil: "123456.2341234", expect: "123456.234 FIL"},
{fil: "123456.234123445", expect: "123456.234 FIL"},
{fil: "0.1", expect: "100 mFIL"},
{fil: "0.01", expect: "10 mFIL"},
{fil: "0.001", expect: "1 mFIL"},
{fil: "0.0001", expect: "100 μFIL"},
{fil: "0.00001", expect: "10 μFIL"},
{fil: "0.000001", expect: "1 μFIL"},
{fil: "0.0000001", expect: "100 nFIL"},
{fil: "0.00000001", expect: "10 nFIL"},
{fil: "0.000000001", expect: "1 nFIL"},
{fil: "0.0000000001", expect: "100 pFIL"},
{fil: "0.00000000001", expect: "10 pFIL"},
{fil: "0.000000000001", expect: "1 pFIL"},
{fil: "0.0000000000001", expect: "100 fFIL"},
{fil: "0.00000000000001", expect: "10 fFIL"},
{fil: "0.000000000000001", expect: "1 fFIL"},
{fil: "0.0000000000000001", expect: "100 aFIL"},
{fil: "0.00000000000000001", expect: "10 aFIL"},
{fil: "0.000000000000000001", expect: "1 aFIL"},
{fil: "0.0000012", expect: "1.2 μFIL"},
{fil: "0.00000123", expect: "1.23 μFIL"},
{fil: "0.000001234", expect: "1.234 μFIL"},
{fil: "0.0000012344", expect: "1.234 μFIL"},
{fil: "0.00000123444", expect: "1.234 μFIL"},
{fil: "0.0002212", expect: "221.2 μFIL"},
{fil: "0.00022123", expect: "221.23 μFIL"},
{fil: "0.000221234", expect: "221.234 μFIL"},
{fil: "0.0002212344", expect: "221.234 μFIL"},
{fil: "0.00022123444", expect: "221.234 μFIL"},
} {
s := s
t.Run(s.fil, func(t *testing.T) {
f, err := ParseFIL(s.fil)
require.NoError(t, err)
require.Equal(t, s.expect, f.Short())
})
}
}

View File

@ -36,9 +36,9 @@ type ActorPredicate func(vmr.Runtime, rtt.VMActor) error
func ActorsVersionPredicate(ver actors.Version) ActorPredicate { func ActorsVersionPredicate(ver actors.Version) ActorPredicate {
return func(rt vmr.Runtime, v rtt.VMActor) error { return func(rt vmr.Runtime, v rtt.VMActor) error {
nver := actors.VersionForNetwork(rt.NetworkVersion()) aver := actors.VersionForNetwork(rt.NetworkVersion())
if nver != ver { if aver != ver {
return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d", v.Code(), ver, nver, rt.CurrEpoch()) return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d and nver %d", v.Code(), ver, aver, rt.CurrEpoch(), rt.NetworkVersion())
} }
return nil return nil
} }

View File

@ -5,6 +5,7 @@ import (
"context" "context"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
gruntime "runtime"
"time" "time"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -534,7 +535,7 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError
if EnableGasTracing { if EnableGasTracing {
var callers [10]uintptr var callers [10]uintptr
cout := 0 //gruntime.Callers(2+skip, callers[:]) cout := gruntime.Callers(2+skip, callers[:])
now := build.Clock.Now() now := build.Clock.Now()
if rt.lastGasCharge != nil { if rt.lastGasCharge != nil {

View File

@ -7,29 +7,26 @@ import (
goruntime "runtime" goruntime "runtime"
"sync" "sync"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor" cbor "github.com/ipfs/go-ipld-cbor"
"github.com/minio/blake2b-simd" "github.com/minio/blake2b-simd"
mh "github.com/multiformats/go-multihash" mh "github.com/multiformats/go-multihash"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/lib/sigs"
runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
) )
func init() { func init() {
@ -306,7 +303,7 @@ func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVer
sema <- struct{}{} sema <- struct{}{}
if err := ss.VerifySeal(svi); err != nil { if err := ss.VerifySeal(svi); err != nil {
log.Warnw("seal verify in batch failed", "miner", ma, "index", ix, "err", err) log.Warnw("seal verify in batch failed", "miner", ma, "sectorNumber", svi.SectorID.Number, "err", err)
res[ix] = false res[ix] = false
} else { } else {
res[ix] = true res[ix] = true

View File

@ -6,18 +6,16 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
_ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures
_ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/lib/sigs"
_ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures
_ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures
) )
var log = logging.Logger("wallet") var log = logging.Logger("wallet")
@ -270,7 +268,7 @@ func (w *LocalWallet) WalletHas(ctx context.Context, addr address.Address) (bool
return k != nil, nil return k != nil, nil
} }
func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) error { func (w *LocalWallet) walletDelete(ctx context.Context, addr address.Address) error {
k, err := w.findKey(addr) k, err := w.findKey(addr)
if err != nil { if err != nil {
@ -305,18 +303,29 @@ func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) er
delete(w.keys, addr) delete(w.keys, addr)
def, err := w.GetDefault() return nil
if err != nil {
return xerrors.Errorf("getting default address: %w", err)
} }
func (w *LocalWallet) deleteDefault() {
w.lk.Lock()
defer w.lk.Unlock()
if err := w.keystore.Delete(KDefault); err != nil {
if !xerrors.Is(err, types.ErrKeyInfoNotFound) {
log.Warnf("failed to unregister current default key: %s", err)
}
}
}
func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) error {
if err := w.walletDelete(ctx, addr); err != nil {
return xerrors.Errorf("wallet delete: %w", err)
}
if def, err := w.GetDefault(); err == nil {
if def == addr { if def == addr {
err = w.SetDefault(address.Undef) w.deleteDefault()
if err != nil {
return xerrors.Errorf("unsetting default address: %w", err)
} }
} }
return nil return nil
} }

View File

@ -466,6 +466,9 @@ var chainInspectUsage = &cli.Command{
code, err := lookupActorCode(m.Message.To) code, err := lookupActorCode(m.Message.To)
if err != nil { if err != nil {
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
continue
}
return err return err
} }

View File

@ -328,7 +328,7 @@ var clientDealCmd = &cli.Command{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "verified-deal", Name: "verified-deal",
Usage: "indicate that the deal counts towards verified client total", Usage: "indicate that the deal counts towards verified client total",
Value: false, DefaultText: "true if client is verified, false otherwise",
}, },
&cli.StringFlag{ &cli.StringFlag{
Name: "provider-collateral", Name: "provider-collateral",
@ -655,19 +655,19 @@ uiLoop:
state = "find" state = "find"
} }
case "find": case "find":
asks, err := getAsks(ctx, api) asks, err := GetAsks(ctx, api)
if err != nil { if err != nil {
return err return err
} }
for _, ask := range asks { for _, ask := range asks {
if ask.MinPieceSize > ds.PieceSize { if ask.Ask.MinPieceSize > ds.PieceSize {
continue continue
} }
if ask.MaxPieceSize < ds.PieceSize { if ask.Ask.MaxPieceSize < ds.PieceSize {
continue continue
} }
candidateAsks = append(candidateAsks, ask) candidateAsks = append(candidateAsks, ask.Ask)
} }
afmt.Printf("Found %d candidate asks\n", len(candidateAsks)) afmt.Printf("Found %d candidate asks\n", len(candidateAsks))
@ -1191,6 +1191,11 @@ var clientDealStatsCmd = &cli.Command{
var clientListAsksCmd = &cli.Command{ var clientListAsksCmd = &cli.Command{
Name: "list-asks", Name: "list-asks",
Usage: "List asks for top miners", Usage: "List asks for top miners",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "by-ping",
},
},
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
@ -1199,17 +1204,26 @@ var clientListAsksCmd = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
asks, err := getAsks(ctx, api) asks, err := GetAsks(ctx, api)
if err != nil { if err != nil {
return err return err
} }
for _, ask := range asks { if cctx.Bool("by-ping") {
fmt.Printf("%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch\n", ask.Miner, sort.Slice(asks, func(i, j int) bool {
return asks[i].Ping < asks[j].Ping
})
}
for _, a := range asks {
ask := a.Ask
fmt.Printf("%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch ping:%s\n", ask.Miner,
types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))),
types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))),
types.FIL(ask.Price), types.FIL(ask.Price),
types.FIL(ask.VerifiedPrice), types.FIL(ask.VerifiedPrice),
a.Ping,
) )
} }
@ -1217,7 +1231,12 @@ var clientListAsksCmd = &cli.Command{
}, },
} }
func getAsks(ctx context.Context, api lapi.FullNode) ([]*storagemarket.StorageAsk, error) { type QueriedAsk struct {
Ask *storagemarket.StorageAsk
Ping time.Duration
}
func GetAsks(ctx context.Context, api lapi.FullNode) ([]QueriedAsk, error) {
color.Blue(".. getting miner list") color.Blue(".. getting miner list")
miners, err := api.StateListMiners(ctx, types.EmptyTSK) miners, err := api.StateListMiners(ctx, types.EmptyTSK)
if err != nil { if err != nil {
@ -1272,7 +1291,7 @@ loop:
color.Blue(".. querying asks") color.Blue(".. querying asks")
var asks []*storagemarket.StorageAsk var asks []QueriedAsk
var queried, got int64 var queried, got int64
done = make(chan struct{}) done = make(chan struct{})
@ -1308,9 +1327,19 @@ loop:
return return
} }
rt := time.Now()
_, err = api.ClientQueryAsk(ctx, *mi.PeerId, miner)
if err != nil {
return
}
atomic.AddInt64(&got, 1) atomic.AddInt64(&got, 1)
lk.Lock() lk.Lock()
asks = append(asks, ask) asks = append(asks, QueriedAsk{
Ask: ask,
Ping: time.Now().Sub(rt),
})
lk.Unlock() lk.Unlock()
}(miner) }(miner)
} }
@ -1328,7 +1357,7 @@ loop2:
fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got))
sort.Slice(asks, func(i, j int) bool { sort.Slice(asks, func(i, j int) bool {
return asks[i].Price.LessThan(asks[j].Price) return asks[i].Ask.Price.LessThan(asks[j].Ask.Price)
}) })
return asks, nil return asks, nil
@ -1825,6 +1854,11 @@ var clientCancelTransfer = &cli.Command{
Usage: "specify only transfers where peer is/is not initiator", Usage: "specify only transfers where peer is/is not initiator",
Value: true, Value: true,
}, },
&cli.DurationFlag{
Name: "cancel-timeout",
Usage: "time to wait for cancel to be sent to storage provider",
Value: 5 * time.Second,
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() { if !cctx.Args().Present() {
@ -1868,7 +1902,9 @@ var clientCancelTransfer = &cli.Command{
} }
} }
return api.ClientCancelDataTransfer(ctx, transferID, other, initiator) timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout"))
defer cancel()
return api.ClientCancelDataTransfer(timeoutCtx, transferID, other, initiator)
}, },
} }

View File

@ -233,6 +233,7 @@ var mpoolStat = &cli.Command{
addr string addr string
past, cur, future uint64 past, cur, future uint64
belowCurr, belowPast uint64 belowCurr, belowPast uint64
gasLimit big.Int
} }
buckets := map[address.Address]*statBucket{} buckets := map[address.Address]*statBucket{}
@ -274,6 +275,7 @@ var mpoolStat = &cli.Command{
var s mpStat var s mpStat
s.addr = a.String() s.addr = a.String()
s.gasLimit = big.Zero()
for _, m := range bkt.msgs { for _, m := range bkt.msgs {
if m.Message.Nonce < act.Nonce { if m.Message.Nonce < act.Nonce {
@ -290,6 +292,8 @@ var mpoolStat = &cli.Command{
if m.Message.GasFeeCap.LessThan(minBF) { if m.Message.GasFeeCap.LessThan(minBF) {
s.belowPast++ s.belowPast++
} }
s.gasLimit = big.Add(s.gasLimit, types.NewInt(uint64(m.Message.GasLimit)))
} }
out = append(out, s) out = append(out, s)
@ -300,6 +304,7 @@ var mpoolStat = &cli.Command{
}) })
var total mpStat var total mpStat
total.gasLimit = big.Zero()
for _, stat := range out { for _, stat := range out {
total.past += stat.past total.past += stat.past
@ -307,12 +312,13 @@ var mpoolStat = &cli.Command{
total.future += stat.future total.future += stat.future
total.belowCurr += stat.belowCurr total.belowCurr += stat.belowCurr
total.belowPast += stat.belowPast total.belowPast += stat.belowPast
total.gasLimit = big.Add(total.gasLimit, stat.gasLimit)
fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d \n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast) fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit)
} }
fmt.Println("-----") fmt.Println("-----")
fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d \n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast) fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit)
return nil return nil
}, },
@ -442,7 +448,9 @@ var mpoolReplaceCmd = &cli.Command{
messagepool.CapGasFee(mff, &msg, mss.Get().MaxFee) messagepool.CapGasFee(mff, &msg, mss.Get().MaxFee)
} else { } else {
if cctx.IsSet("gas-limit") {
msg.GasLimit = cctx.Int64("gas-limit") msg.GasLimit = cctx.Int64("gas-limit")
}
msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium")) msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium"))
if err != nil { if err != nil {
return fmt.Errorf("parsing gas-premium: %w", err) return fmt.Errorf("parsing gas-premium: %w", err)

View File

@ -18,6 +18,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
atypes "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/addrutil" "github.com/filecoin-project/lotus/lib/addrutil"
) )
@ -34,6 +35,7 @@ var netCmd = &cli.Command{
netScores, netScores,
NetReachability, NetReachability,
NetBandwidthCmd, NetBandwidthCmd,
NetBlockCmd,
}, },
} }
@ -375,3 +377,202 @@ var NetBandwidthCmd = &cli.Command{
}, },
} }
var NetBlockCmd = &cli.Command{
Name: "block",
Usage: "Manage network connection gating rules",
Subcommands: []*cli.Command{
NetBlockAddCmd,
NetBlockRemoveCmd,
NetBlockListCmd,
},
}
var NetBlockAddCmd = &cli.Command{
Name: "add",
Usage: "Add connection gating rules",
Subcommands: []*cli.Command{
NetBlockAddPeer,
NetBlockAddIP,
NetBlockAddSubnet,
},
}
var NetBlockAddPeer = &cli.Command{
Name: "peer",
Usage: "Block a peer",
ArgsUsage: "<Peer> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
var peers []peer.ID
for _, s := range cctx.Args().Slice() {
p, err := peer.Decode(s)
if err != nil {
return err
}
peers = append(peers, p)
}
return api.NetBlockAdd(ctx, atypes.NetBlockList{Peers: peers})
},
}
var NetBlockAddIP = &cli.Command{
Name: "ip",
Usage: "Block an IP address",
ArgsUsage: "<IP> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
return api.NetBlockAdd(ctx, atypes.NetBlockList{IPAddrs: cctx.Args().Slice()})
},
}
var NetBlockAddSubnet = &cli.Command{
Name: "subnet",
Usage: "Block an IP subnet",
ArgsUsage: "<CIDR> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
return api.NetBlockAdd(ctx, atypes.NetBlockList{IPSubnets: cctx.Args().Slice()})
},
}
var NetBlockRemoveCmd = &cli.Command{
Name: "remove",
Usage: "Remove connection gating rules",
Subcommands: []*cli.Command{
NetBlockRemovePeer,
NetBlockRemoveIP,
NetBlockRemoveSubnet,
},
}
var NetBlockRemovePeer = &cli.Command{
Name: "peer",
Usage: "Unblock a peer",
ArgsUsage: "<Peer> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
var peers []peer.ID
for _, s := range cctx.Args().Slice() {
p, err := peer.Decode(s)
if err != nil {
return err
}
peers = append(peers, p)
}
return api.NetBlockRemove(ctx, atypes.NetBlockList{Peers: peers})
},
}
var NetBlockRemoveIP = &cli.Command{
Name: "ip",
Usage: "Unblock an IP address",
ArgsUsage: "<IP> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
return api.NetBlockRemove(ctx, atypes.NetBlockList{IPAddrs: cctx.Args().Slice()})
},
}
var NetBlockRemoveSubnet = &cli.Command{
Name: "subnet",
Usage: "Unblock an IP subnet",
ArgsUsage: "<CIDR> ...",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
return api.NetBlockRemove(ctx, atypes.NetBlockList{IPSubnets: cctx.Args().Slice()})
},
}
var NetBlockListCmd = &cli.Command{
Name: "list",
Usage: "list connection gating rules",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
acl, err := api.NetBlockList(ctx)
if err != nil {
return err
}
if len(acl.Peers) != 0 {
sort.Slice(acl.Peers, func(i, j int) bool {
return strings.Compare(string(acl.Peers[i]), string(acl.Peers[j])) > 0
})
fmt.Println("Blocked Peers:")
for _, p := range acl.Peers {
fmt.Printf("\t%s\n", p)
}
}
if len(acl.IPAddrs) != 0 {
sort.Slice(acl.IPAddrs, func(i, j int) bool {
return strings.Compare(acl.IPAddrs[i], acl.IPAddrs[j]) < 0
})
fmt.Println("Blocked IPs:")
for _, a := range acl.IPAddrs {
fmt.Printf("\t%s\n", a)
}
}
if len(acl.IPSubnets) != 0 {
sort.Slice(acl.IPSubnets, func(i, j int) bool {
return strings.Compare(acl.IPSubnets[i], acl.IPSubnets[j]) < 0
})
fmt.Println("Blocked Subnets:")
for _, n := range acl.IPSubnets {
fmt.Printf("\t%s\n", n)
}
}
return nil
},
}

View File

@ -72,6 +72,7 @@ var stateCmd = &cli.Command{
stateMinerInfo, stateMinerInfo,
stateMarketCmd, stateMarketCmd,
stateExecTraceCmd, stateExecTraceCmd,
stateNtwkVersionCmd,
}, },
} }
@ -127,6 +128,7 @@ var stateMinerInfo = &cli.Command{
} }
fmt.Printf("%s ", a) fmt.Printf("%s ", a)
} }
fmt.Println()
fmt.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed) fmt.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed)
fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize) fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize)
@ -1758,6 +1760,9 @@ var stateSectorCmd = &cli.Command{
if err != nil { if err != nil {
return err return err
} }
if si == nil {
return xerrors.Errorf("sector %d for miner %s not found", sid, maddr)
}
fmt.Println("SectorNumber: ", si.SectorNumber) fmt.Println("SectorNumber: ", si.SectorNumber)
fmt.Println("SealProof: ", si.SealProof) fmt.Println("SealProof: ", si.SealProof)
@ -1831,3 +1836,35 @@ var stateMarketBalanceCmd = &cli.Command{
return nil return nil
}, },
} }
var stateNtwkVersionCmd = &cli.Command{
Name: "network-version",
Usage: "Returns the network version",
Action: func(cctx *cli.Context) error {
if cctx.Args().Present() {
return ShowHelp(cctx, fmt.Errorf("doesn't expect any arguments"))
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
ts, err := LoadTipSet(ctx, cctx, api)
if err != nil {
return err
}
nv, err := api.StateNetworkVersion(ctx, ts.Key())
if err != nil {
return err
}
fmt.Printf("Network Version: %d\n", nv)
return nil
},
}

View File

@ -43,13 +43,14 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
require.Regexp(t, regexp.MustCompile("Ask:"), out) require.Regexp(t, regexp.MustCompile("Ask:"), out)
// Create a deal (non-interactive) // Create a deal (non-interactive)
// client deal <cid> <miner addr> 1000000attofil <duration> // client deal --start-epoch=<start epoch> <cid> <miner addr> 1000000attofil <duration>
res, _, err := test.CreateClientFile(ctx, clientNode, 1) res, _, err := test.CreateClientFile(ctx, clientNode, 1)
require.NoError(t, err) require.NoError(t, err)
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
dataCid := res.Root dataCid := res.Root
price := "1000000attofil" price := "1000000attofil"
duration := fmt.Sprintf("%d", build.MinDealDuration) duration := fmt.Sprintf("%d", build.MinDealDuration)
out = clientCLI.RunCmd("client", "deal", dataCid.String(), minerAddr.String(), price, duration) out = clientCLI.RunCmd("client", "deal", startEpoch, dataCid.String(), minerAddr.String(), price, duration)
fmt.Println("client deal", out) fmt.Println("client deal", out)
// Create a deal (interactive) // Create a deal (interactive)
@ -82,7 +83,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
fmt.Println("list-deals:\n", out) fmt.Println("list-deals:\n", out)
lines := strings.Split(out, "\n") lines := strings.Split(out, "\n")
require.Len(t, lines, 2) require.GreaterOrEqual(t, len(lines), 2)
re := regexp.MustCompile(`\s+`) re := regexp.MustCompile(`\s+`)
parts := re.Split(lines[1], -1) parts := re.Split(lines[1], -1)
if len(parts) < 4 { if len(parts) < 4 {
@ -111,7 +112,7 @@ func dealComplete(t *testing.T, dealStatus string) bool {
switch dealStatus { switch dealStatus {
case "StorageDealFailing", "StorageDealError": case "StorageDealFailing", "StorageDealError":
t.Fatal(xerrors.Errorf("Storage deal failed with status: " + dealStatus)) t.Fatal(xerrors.Errorf("Storage deal failed with status: " + dealStatus))
case "StorageDealStaged", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed": case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
return true return true
} }

View File

@ -60,7 +60,7 @@ type TipSetExec struct {
var importBenchCmd = &cli.Command{ var importBenchCmd = &cli.Command{
Name: "import", Name: "import",
Usage: "benchmark chain import and validation", Usage: "Benchmark chain import and validation",
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
importAnalyzeCmd, importAnalyzeCmd,
}, },

View File

@ -41,7 +41,9 @@ var log = logging.Logger("lotus-bench")
type BenchResults struct { type BenchResults struct {
SectorSize abi.SectorSize SectorSize abi.SectorSize
SectorNumber int
SealingSum SealingResult
SealingResults []SealingResult SealingResults []SealingResult
PostGenerateCandidates time.Duration PostGenerateCandidates time.Duration
@ -56,6 +58,26 @@ type BenchResults struct {
VerifyWindowPostHot time.Duration VerifyWindowPostHot time.Duration
} }
func (bo *BenchResults) SumSealingTime() error {
if len(bo.SealingResults) <= 0 {
return xerrors.Errorf("BenchResults SealingResults len <= 0")
}
if len(bo.SealingResults) != bo.SectorNumber {
return xerrors.Errorf("BenchResults SealingResults len(%d) != bo.SectorNumber(%d)", len(bo.SealingResults), bo.SectorNumber)
}
for _, sealing := range bo.SealingResults {
bo.SealingSum.AddPiece += sealing.AddPiece
bo.SealingSum.PreCommit1 += sealing.PreCommit1
bo.SealingSum.PreCommit2 += sealing.PreCommit2
bo.SealingSum.Commit1 += sealing.Commit1
bo.SealingSum.Commit2 += sealing.Commit2
bo.SealingSum.Verify += sealing.Verify
bo.SealingSum.Unseal += sealing.Unseal
}
return nil
}
type SealingResult struct { type SealingResult struct {
AddPiece time.Duration AddPiece time.Duration
PreCommit1 time.Duration PreCommit1 time.Duration
@ -96,11 +118,12 @@ func main() {
var sealBenchCmd = &cli.Command{ var sealBenchCmd = &cli.Command{
Name: "sealing", Name: "sealing",
Usage: "Benchmark seal and winning post and window post",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.StringFlag{ &cli.StringFlag{
Name: "storage-dir", Name: "storage-dir",
Value: "~/.lotus-bench", Value: "~/.lotus-bench",
Usage: "Path to the storage directory that will store sectors long term", Usage: "path to the storage directory that will store sectors long term",
}, },
&cli.StringFlag{ &cli.StringFlag{
Name: "sector-size", Name: "sector-size",
@ -132,16 +155,22 @@ var sealBenchCmd = &cli.Command{
Name: "skip-unseal", Name: "skip-unseal",
Usage: "skip the unseal portion of the benchmark", Usage: "skip the unseal portion of the benchmark",
}, },
&cli.StringFlag{
Name: "ticket-preimage",
Usage: "ticket random",
},
&cli.StringFlag{ &cli.StringFlag{
Name: "save-commit2-input", Name: "save-commit2-input",
Usage: "Save commit2 input to a file", Usage: "save commit2 input to a file",
}, },
&cli.IntFlag{ &cli.IntFlag{
Name: "num-sectors", Name: "num-sectors",
Usage: "select number of sectors to seal",
Value: 1, Value: 1,
}, },
&cli.IntFlag{ &cli.IntFlag{
Name: "parallel", Name: "parallel",
Usage: "num run in parallel",
Value: 1, Value: 1,
}, },
}, },
@ -213,7 +242,8 @@ var sealBenchCmd = &cli.Command{
sectorSize := abi.SectorSize(sectorSizeInt) sectorSize := abi.SectorSize(sectorSizeInt)
// Only fetch parameters if actually needed // Only fetch parameters if actually needed
if !c.Bool("skip-commit2") { skipc2 := c.Bool("skip-commit2")
if !skipc2 {
if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil { if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil {
return xerrors.Errorf("getting params: %w", err) return xerrors.Errorf("getting params: %w", err)
} }
@ -228,6 +258,8 @@ var sealBenchCmd = &cli.Command{
return err return err
} }
sectorNumber := c.Int("num-sectors")
var sealTimings []SealingResult var sealTimings []SealingResult
var sealedSectors []saproof2.SectorInfo var sealedSectors []saproof2.SectorInfo
@ -238,18 +270,11 @@ var sealBenchCmd = &cli.Command{
PreCommit2: 1, PreCommit2: 1,
Commit: 1, Commit: 1,
} }
sealTimings, sealedSectors, err = runSeals(sb, sbfs, c.Int("num-sectors"), parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), c.Bool("skip-commit2"), c.Bool("skip-unseal")) sealTimings, sealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal"))
if err != nil { if err != nil {
return xerrors.Errorf("failed to run seals: %w", err) return xerrors.Errorf("failed to run seals: %w", err)
} }
} } else {
beforePost := time.Now()
var challenge [32]byte
rand.Read(challenge[:])
if robench != "" {
// TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not) // TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not)
// TODO: this assumes we only ever benchmark a preseal // TODO: this assumes we only ever benchmark a preseal
@ -282,10 +307,19 @@ var sealBenchCmd = &cli.Command{
bo := BenchResults{ bo := BenchResults{
SectorSize: sectorSize, SectorSize: sectorSize,
SectorNumber: sectorNumber,
SealingResults: sealTimings, SealingResults: sealTimings,
} }
if err := bo.SumSealingTime(); err != nil {
return err
}
if !c.Bool("skip-commit2") { var challenge [32]byte
rand.Read(challenge[:])
beforePost := time.Now()
if !skipc2 {
log.Info("generating winning post candidates") log.Info("generating winning post candidates")
wipt, err := spt(sectorSize).RegisteredWinningPoStProof() wipt, err := spt(sectorSize).RegisteredWinningPoStProof()
if err != nil { if err != nil {
@ -420,21 +454,21 @@ var sealBenchCmd = &cli.Command{
fmt.Println(string(data)) fmt.Println(string(data))
} else { } else {
fmt.Printf("----\nresults (v28) (%d)\n", sectorSize) fmt.Printf("----\nresults (v28) SectorSize:(%d), SectorNumber:(%d)\n", sectorSize, sectorNumber)
if robench == "" { if robench == "" {
fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingResults[0].AddPiece, bps(bo.SectorSize, bo.SealingResults[0].AddPiece)) // TODO: average across multiple sealings fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingSum.AddPiece, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.AddPiece))
fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingResults[0].PreCommit1, bps(bo.SectorSize, bo.SealingResults[0].PreCommit1)) fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingSum.PreCommit1, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.PreCommit1))
fmt.Printf("seal: preCommit phase 2: %s (%s)\n", bo.SealingResults[0].PreCommit2, bps(bo.SectorSize, bo.SealingResults[0].PreCommit2)) fmt.Printf("seal: preCommit phase 2: %s (%s)\n", bo.SealingSum.PreCommit2, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.PreCommit2))
fmt.Printf("seal: commit phase 1: %s (%s)\n", bo.SealingResults[0].Commit1, bps(bo.SectorSize, bo.SealingResults[0].Commit1)) fmt.Printf("seal: commit phase 1: %s (%s)\n", bo.SealingSum.Commit1, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Commit1))
fmt.Printf("seal: commit phase 2: %s (%s)\n", bo.SealingResults[0].Commit2, bps(bo.SectorSize, bo.SealingResults[0].Commit2)) fmt.Printf("seal: commit phase 2: %s (%s)\n", bo.SealingSum.Commit2, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Commit2))
fmt.Printf("seal: verify: %s\n", bo.SealingResults[0].Verify) fmt.Printf("seal: verify: %s\n", bo.SealingSum.Verify)
if !c.Bool("skip-unseal") { if !c.Bool("skip-unseal") {
fmt.Printf("unseal: %s (%s)\n", bo.SealingResults[0].Unseal, bps(bo.SectorSize, bo.SealingResults[0].Unseal)) fmt.Printf("unseal: %s (%s)\n", bo.SealingSum.Unseal, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Unseal))
} }
fmt.Println("") fmt.Println("")
} }
if !c.Bool("skip-commit2") { if !skipc2 {
fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize*abi.SectorSize(len(bo.SealingResults)), bo.PostGenerateCandidates)) fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize, len(bo.SealingResults), bo.PostGenerateCandidates))
fmt.Printf("compute winning post proof (cold): %s\n", bo.PostWinningProofCold) fmt.Printf("compute winning post proof (cold): %s\n", bo.PostWinningProofCold)
fmt.Printf("compute winning post proof (hot): %s\n", bo.PostWinningProofHot) fmt.Printf("compute winning post proof (hot): %s\n", bo.PostWinningProofHot)
fmt.Printf("verify winning post proof (cold): %s\n", bo.VerifyWinningPostCold) fmt.Printf("verify winning post proof (cold): %s\n", bo.VerifyWinningPostCold)
@ -467,8 +501,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
if numSectors%par.PreCommit1 != 0 { if numSectors%par.PreCommit1 != 0 {
return nil, nil, fmt.Errorf("parallelism factor must cleanly divide numSectors") return nil, nil, fmt.Errorf("parallelism factor must cleanly divide numSectors")
} }
for i := abi.SectorNumber(0); i < abi.SectorNumber(numSectors); i++ {
for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ {
sid := storage.SectorRef{ sid := storage.SectorRef{
ID: abi.SectorID{ ID: abi.SectorID{
Miner: mid, Miner: mid,
@ -489,7 +522,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
pieces = append(pieces, pi) pieces = append(pieces, pi)
sealTimings[i-1].AddPiece = time.Since(start) sealTimings[i].AddPiece = time.Since(start)
} }
sectorsPerWorker := numSectors / par.PreCommit1 sectorsPerWorker := numSectors / par.PreCommit1
@ -498,10 +531,9 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
for wid := 0; wid < par.PreCommit1; wid++ { for wid := 0; wid < par.PreCommit1; wid++ {
go func(worker int) { go func(worker int) {
sealerr := func() error { sealerr := func() error {
start := 1 + (worker * sectorsPerWorker) start := worker * sectorsPerWorker
end := start + sectorsPerWorker end := start + sectorsPerWorker
for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ { for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ {
ix := int(i - 1)
sid := storage.SectorRef{ sid := storage.SectorRef{
ID: abi.SectorID{ ID: abi.SectorID{
Miner: mid, Miner: mid,
@ -516,8 +548,8 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
ticket := abi.SealRandomness(trand[:]) ticket := abi.SealRandomness(trand[:])
log.Infof("[%d] Running replication(1)...", i) log.Infof("[%d] Running replication(1)...", i)
pieces := []abi.PieceInfo{pieces[ix]} piece := []abi.PieceInfo{pieces[i]}
pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, pieces) pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, piece)
if err != nil { if err != nil {
return xerrors.Errorf("commit: %w", err) return xerrors.Errorf("commit: %w", err)
} }
@ -535,7 +567,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
precommit2 := time.Now() precommit2 := time.Now()
<-preCommit2Sema <-preCommit2Sema
sealedSectors[ix] = saproof2.SectorInfo{ sealedSectors[i] = saproof2.SectorInfo{
SealProof: sid.ProofType, SealProof: sid.ProofType,
SectorNumber: i, SectorNumber: i,
SealedCID: cids.Sealed, SealedCID: cids.Sealed,
@ -549,7 +581,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
commitSema <- struct{}{} commitSema <- struct{}{}
commitStart := time.Now() commitStart := time.Now()
log.Infof("[%d] Generating PoRep for sector (1)", i) log.Infof("[%d] Generating PoRep for sector (1)", i)
c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, pieces, cids) c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, piece, cids)
if err != nil { if err != nil {
return err return err
} }
@ -630,12 +662,12 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
} }
unseal := time.Now() unseal := time.Now()
sealTimings[ix].PreCommit1 = precommit1.Sub(start) sealTimings[i].PreCommit1 = precommit1.Sub(start)
sealTimings[ix].PreCommit2 = precommit2.Sub(pc2Start) sealTimings[i].PreCommit2 = precommit2.Sub(pc2Start)
sealTimings[ix].Commit1 = sealcommit1.Sub(commitStart) sealTimings[i].Commit1 = sealcommit1.Sub(commitStart)
sealTimings[ix].Commit2 = sealcommit2.Sub(sealcommit1) sealTimings[i].Commit2 = sealcommit2.Sub(sealcommit1)
sealTimings[ix].Verify = verifySeal.Sub(sealcommit2) sealTimings[i].Verify = verifySeal.Sub(sealcommit2)
sealTimings[ix].Unseal = unseal.Sub(verifySeal) sealTimings[i].Unseal = unseal.Sub(verifySeal)
} }
return nil return nil
}() }()
@ -660,6 +692,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
var proveCmd = &cli.Command{ var proveCmd = &cli.Command{
Name: "prove", Name: "prove",
Usage: "Benchmark a proof computation", Usage: "Benchmark a proof computation",
ArgsUsage: "[input.json]",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "no-gpu", Name: "no-gpu",
@ -711,8 +744,6 @@ var proveCmd = &cli.Command{
return err return err
} }
start := time.Now()
ref := storage.SectorRef{ ref := storage.SectorRef{
ID: abi.SectorID{ ID: abi.SectorID{
Miner: abi.ActorID(mid), Miner: abi.ActorID(mid),
@ -721,6 +752,9 @@ var proveCmd = &cli.Command{
ProofType: spt(abi.SectorSize(c2in.SectorSize)), ProofType: spt(abi.SectorSize(c2in.SectorSize)),
} }
fmt.Printf("----\nstart proof computation\n")
start := time.Now()
proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out) proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out)
if err != nil { if err != nil {
return err return err
@ -733,13 +767,14 @@ var proveCmd = &cli.Command{
fmt.Printf("----\nresults (v28) (%d)\n", c2in.SectorSize) fmt.Printf("----\nresults (v28) (%d)\n", c2in.SectorSize)
dur := sealCommit2.Sub(start) dur := sealCommit2.Sub(start)
fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), dur)) fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), 1, dur))
return nil return nil
}, },
} }
func bps(data abi.SectorSize, d time.Duration) string { func bps(sectorSize abi.SectorSize, sectorNum int, d time.Duration) string {
bdata := new(big.Int).SetUint64(uint64(data)) bdata := new(big.Int).SetUint64(uint64(sectorSize))
bdata = bdata.Mul(bdata, big.NewInt(int64(sectorNum)))
bdata = bdata.Mul(bdata, big.NewInt(time.Second.Nanoseconds())) bdata = bdata.Mul(bdata, big.NewInt(time.Second.Nanoseconds()))
bps := bdata.Div(bdata, big.NewInt(d.Nanoseconds())) bps := bdata.Div(bdata, big.NewInt(d.Nanoseconds()))
return types.SizeStr(types.BigInt{Int: bps}) + "/s" return types.SizeStr(types.BigInt{Int: bps}) + "/s"

View File

@ -87,7 +87,7 @@ var runCmd = &cli.Command{
return err return err
} }
log.Info("Remote version: %s", v.Version) log.Infof("Remote version: %s", v.Version)
from, err := address.NewFromString(cctx.String("from")) from, err := address.NewFromString(cctx.String("from"))
if err != nil { if err != nil {

View File

@ -67,6 +67,7 @@ type gatewayDepsAPI interface {
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error)
} }
@ -362,7 +363,13 @@ func (a *GatewayAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSe
return types.BigInt{}, err return types.BigInt{}, err
} }
return a.api.StateCirculatingSupply(ctx, tsk) return a.api.StateCirculatingSupply(ctx, tsk)
}
func (a *GatewayAPI) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) {
if err := a.checkTipsetKey(ctx, tsk); err != nil {
return nil, err
}
return a.api.StateSectorGetInfo(ctx, maddr, n, tsk)
} }
func (a *GatewayAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { func (a *GatewayAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {

View File

@ -171,7 +171,11 @@ func TestDealFlow(t *testing.T) {
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
defer nodes.closer() defer nodes.closer()
test.MakeDeal(t, ctx, 6, nodes.lite, nodes.miner, false, false) // For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
dealStartEpoch := abi.ChainEpoch(2 << 12)
test.MakeDeal(t, ctx, 6, nodes.lite, nodes.miner, false, false, dealStartEpoch)
} }
func TestCLIDealFlow(t *testing.T) { func TestCLIDealFlow(t *testing.T) {

View File

@ -61,6 +61,10 @@ var runCmd = &cli.Command{
Usage: "host address and port the api server will listen on", Usage: "host address and port the api server will listen on",
Value: "0.0.0.0:2346", Value: "0.0.0.0:2346",
}, },
&cli.IntFlag{
Name: "api-max-req-size",
Usage: "maximum API request size accepted by the JSON RPC server",
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
log.Info("Starting lotus gateway") log.Info("Starting lotus gateway")
@ -87,7 +91,11 @@ var runCmd = &cli.Command{
log.Info("Setting up API endpoint at " + address) log.Info("Setting up API endpoint at " + address)
rpcServer := jsonrpc.NewServer() serverOptions := make([]jsonrpc.ServerOption, 0)
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
rpcServer := jsonrpc.NewServer(serverOptions...)
rpcServer.Register("Filecoin", metrics.MetricedGatewayAPI(NewGatewayAPI(api))) rpcServer.Register("Filecoin", metrics.MetricedGatewayAPI(NewGatewayAPI(api)))
mux.Handle("/rpc/v0", rpcServer) mux.Handle("/rpc/v0", rpcServer)

View File

@ -2,12 +2,14 @@ package main
import ( import (
"fmt" "fmt"
"sort"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
) )
var infoCmd = &cli.Command{ var infoCmd = &cli.Command{
@ -49,10 +51,22 @@ var infoCmd = &cli.Command{
return xerrors.Errorf("getting info: %w", err) return xerrors.Errorf("getting info: %w", err)
} }
tt, err := api.TaskTypes(ctx)
if err != nil {
return xerrors.Errorf("getting task types: %w", err)
}
fmt.Printf("Hostname: %s\n", info.Hostname) fmt.Printf("Hostname: %s\n", info.Hostname)
fmt.Printf("CPUs: %d; GPUs: %v\n", info.Resources.CPUs, info.Resources.GPUs) fmt.Printf("CPUs: %d; GPUs: %v\n", info.Resources.CPUs, info.Resources.GPUs)
fmt.Printf("RAM: %s; Swap: %s\n", types.SizeStr(types.NewInt(info.Resources.MemPhysical)), types.SizeStr(types.NewInt(info.Resources.MemSwap))) fmt.Printf("RAM: %s; Swap: %s\n", types.SizeStr(types.NewInt(info.Resources.MemPhysical)), types.SizeStr(types.NewInt(info.Resources.MemSwap)))
fmt.Printf("Reserved memory: %s\n", types.SizeStr(types.NewInt(info.Resources.MemReserved))) fmt.Printf("Reserved memory: %s\n", types.SizeStr(types.NewInt(info.Resources.MemReserved)))
fmt.Printf("Task types: ")
for _, t := range ttList(tt) {
fmt.Printf("%s ", t.Short())
}
fmt.Println()
fmt.Println() fmt.Println()
paths, err := api.Paths(ctx) paths, err := api.Paths(ctx)
@ -80,3 +94,14 @@ var infoCmd = &cli.Command{
return nil return nil
}, },
} }
func ttList(tt map[sealtasks.TaskType]struct{}) []sealtasks.TaskType {
tasks := make([]sealtasks.TaskType, 0, len(tt))
for taskType := range tt {
tasks = append(tasks, taskType)
}
sort.Slice(tasks, func(i, j int) bool {
return tasks[i].Less(tasks[j])
})
return tasks
}

View File

@ -59,6 +59,7 @@ func main() {
storageCmd, storageCmd,
setCmd, setCmd,
waitQuietCmd, waitQuietCmd,
tasksCmd,
} }
app := &cli.App{ app := &cli.App{
@ -362,6 +363,17 @@ var runCmd = &cli.Command{
remote := stores.NewRemote(localStore, nodeApi, sminfo.AuthHeader(), cctx.Int("parallel-fetch-limit")) remote := stores.NewRemote(localStore, nodeApi, sminfo.AuthHeader(), cctx.Int("parallel-fetch-limit"))
fh := &stores.FetchHandler{Local: localStore}
remoteHandler := func(w http.ResponseWriter, r *http.Request) {
if !auth.HasPerm(r.Context(), nil, apistruct.PermAdmin) {
w.WriteHeader(401)
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"})
return
}
fh.ServeHTTP(w, r)
}
// Create / expose the worker // Create / expose the worker
wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix)) wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix))
@ -385,7 +397,7 @@ var runCmd = &cli.Command{
mux.Handle("/rpc/v0", rpcServer) mux.Handle("/rpc/v0", rpcServer)
mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
mux.PathPrefix("/remote").HandlerFunc((&stores.FetchHandler{Local: localStore}).ServeHTTP) mux.PathPrefix("/remote").HandlerFunc(remoteHandler)
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
ah := &auth.Handler{ ah := &auth.Handler{

View File

@ -0,0 +1,82 @@
package main
import (
"context"
"strings"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
)
var tasksCmd = &cli.Command{
Name: "tasks",
Usage: "Manage task processing",
Subcommands: []*cli.Command{
tasksEnableCmd,
tasksDisableCmd,
},
}
var allowSetting = map[sealtasks.TaskType]struct{}{
sealtasks.TTAddPiece: {},
sealtasks.TTPreCommit1: {},
sealtasks.TTPreCommit2: {},
sealtasks.TTCommit2: {},
sealtasks.TTUnseal: {},
}
var settableStr = func() string {
var s []string
for _, tt := range ttList(allowSetting) {
s = append(s, tt.Short())
}
return strings.Join(s, "|")
}()
var tasksEnableCmd = &cli.Command{
Name: "enable",
Usage: "Enable a task type",
ArgsUsage: "[" + settableStr + "]",
Action: taskAction(api.WorkerAPI.TaskEnable),
}
var tasksDisableCmd = &cli.Command{
Name: "disable",
Usage: "Disable a task type",
ArgsUsage: "[" + settableStr + "]",
Action: taskAction(api.WorkerAPI.TaskDisable),
}
func taskAction(tf func(a api.WorkerAPI, ctx context.Context, tt sealtasks.TaskType) error) func(cctx *cli.Context) error {
return func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return xerrors.Errorf("expected 1 argument")
}
var tt sealtasks.TaskType
for taskType := range allowSetting {
if taskType.Short() == cctx.Args().First() {
tt = taskType
break
}
}
if tt == "" {
return xerrors.Errorf("unknown task type '%s'", cctx.Args().First())
}
api, closer, err := lcli.GetWorkerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
return tf(api, ctx, tt)
}
}

View File

@ -7,6 +7,8 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"github.com/filecoin-project/go-state-types/network"
"github.com/docker/go-units" "github.com/docker/go-units"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir" "github.com/mitchellh/go-homedir"
@ -127,7 +129,7 @@ var preSealCmd = &cli.Command{
} }
sectorSize := abi.SectorSize(sectorSizeInt) sectorSize := abi.SectorSize(sectorSizeInt)
spt, err := miner.SealProofTypeFromSectorSize(sectorSize, build.NewestNetworkVersion) spt, err := miner.SealProofTypeFromSectorSize(sectorSize, network.Version0)
if err != nil { if err != nil {
return err return err
} }

View File

@ -19,9 +19,9 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi" ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"

View File

@ -2,9 +2,13 @@ package main
import ( import (
"context" "context"
"encoding/csv"
"fmt" "fmt"
"io" "io"
"os"
"strconv" "strconv"
"strings"
"time"
"github.com/filecoin-project/lotus/chain/gen/genesis" "github.com/filecoin-project/lotus/chain/gen/genesis"
@ -62,6 +66,7 @@ var auditsCmd = &cli.Command{
chainBalanceCmd, chainBalanceCmd,
chainBalanceStateCmd, chainBalanceStateCmd,
chainPledgeCmd, chainPledgeCmd,
fillBalancesCmd,
}, },
} }
@ -487,3 +492,119 @@ var chainPledgeCmd = &cli.Command{
return nil return nil
}, },
} }
const dateFmt = "1/02/06"
func parseCsv(inp string) ([]time.Time, []address.Address, error) {
fi, err := os.Open(inp)
if err != nil {
return nil, nil, err
}
r := csv.NewReader(fi)
recs, err := r.ReadAll()
if err != nil {
return nil, nil, err
}
var addrs []address.Address
for _, rec := range recs[1:] {
a, err := address.NewFromString(rec[0])
if err != nil {
return nil, nil, err
}
addrs = append(addrs, a)
}
var dates []time.Time
for _, d := range recs[0][1:] {
if len(d) == 0 {
continue
}
p := strings.Split(d, " ")
t, err := time.Parse(dateFmt, p[len(p)-1])
if err != nil {
return nil, nil, err
}
dates = append(dates, t)
}
return dates, addrs, nil
}
func heightForDate(d time.Time, ts *types.TipSet) abi.ChainEpoch {
secs := d.Unix()
gents := ts.Blocks()[0].Timestamp
gents -= uint64(30 * ts.Height())
return abi.ChainEpoch((secs - int64(gents)) / 30)
}
var fillBalancesCmd = &cli.Command{
Name: "fill-balances",
Description: "fill out balances for addresses on dates in given spreadsheet",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
dates, addrs, err := parseCsv(cctx.Args().First())
if err != nil {
return err
}
ts, err := api.ChainHead(ctx)
if err != nil {
return err
}
var tipsets []*types.TipSet
for _, d := range dates {
h := heightForDate(d, ts)
hts, err := api.ChainGetTipSetByHeight(ctx, h, ts.Key())
if err != nil {
return err
}
tipsets = append(tipsets, hts)
}
var balances [][]abi.TokenAmount
for _, a := range addrs {
var b []abi.TokenAmount
for _, hts := range tipsets {
act, err := api.StateGetActor(ctx, a, hts.Key())
if err != nil {
if !strings.Contains(err.Error(), "actor not found") {
return fmt.Errorf("error for %s at %s: %w", a, hts.Key(), err)
}
b = append(b, types.NewInt(0))
continue
}
b = append(b, act.Balance)
}
balances = append(balances, b)
}
var datestrs []string
for _, d := range dates {
datestrs = append(datestrs, "Balance at "+d.Format(dateFmt))
}
w := csv.NewWriter(os.Stdout)
w.Write(append([]string{"Wallet Address"}, datestrs...)) // nolint:errcheck
for i := 0; i < len(addrs); i++ {
row := []string{addrs[i].String()}
for _, b := range balances[i] {
row = append(row, types.FIL(b).String())
}
w.Write(row) // nolint:errcheck
}
w.Flush()
return nil
},
}

View File

@ -1,27 +1,55 @@
package main package main
import ( import (
"encoding/base64"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
commcid "github.com/filecoin-project/go-fil-commcid" commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors"
) )
var commpToCidCmd = &cli.Command{ var commpToCidCmd = &cli.Command{
Name: "commp-to-cid", Name: "commp-to-cid",
Usage: "Convert commP to Cid",
Description: "Convert a raw commP to a piece-Cid", Description: "Convert a raw commP to a piece-Cid",
ArgsUsage: "[data]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "encoding",
Value: "base64",
Usage: "specify input encoding to parse",
},
},
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() { if !cctx.Args().Present() {
return fmt.Errorf("must specify commP to convert") return fmt.Errorf("must specify commP to convert")
} }
dec, err := hex.DecodeString(cctx.Args().First()) var dec []byte
switch cctx.String("encoding") {
case "base64":
data, err := base64.StdEncoding.DecodeString(cctx.Args().First())
if err != nil { if err != nil {
return fmt.Errorf("failed to decode input as hex string: %w", err) return xerrors.Errorf("decoding base64 value: %w", err)
}
dec = data
case "hex":
data, err := hex.DecodeString(cctx.Args().First())
if err != nil {
return xerrors.Errorf("decoding hex value: %w", err)
}
dec = data
default:
return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
} }
fmt.Println(commcid.PieceCommitmentV1ToCID(dec)) cid, err := commcid.PieceCommitmentV1ToCID(dec)
if err != nil {
return err
}
fmt.Println(cid)
return nil return nil
}, },
} }

View File

@ -1,325 +0,0 @@
package main
import (
"context"
"encoding/json"
"net"
"net/http"
"sync"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
)
type dealStatsServer struct {
api api.FullNode
}
// Requested by @jbenet
// How many epochs back to look at for dealstats
var epochLookback = abi.ChainEpoch(10)
// these lists grow continuously with the network
// TODO: need to switch this to an LRU of sorts, to ensure refreshes
var knownFiltered = new(sync.Map)
var resolvedWallets = new(sync.Map)
func init() {
for _, a := range []string{
"t0100", // client for genesis miner
"t0101", // client for genesis miner
"t0102", // client for genesis miner
"t0112", // client for genesis miner
"t0113", // client for genesis miner
"t0114", // client for genesis miner
"t1nslxql4pck5pq7hddlzym3orxlx35wkepzjkm3i", // SR1 dealbot wallet
"t1stghxhdp2w53dym2nz2jtbpk6ccd4l2lxgmezlq", // SR1 dealbot wallet
"t1mcr5xkgv4jdl3rnz77outn6xbmygb55vdejgbfi", // SR1 dealbot wallet
"t1qiqdbbmrdalbntnuapriirduvxu5ltsc5mhy7si", // SR1 dealbot wallet
} {
a, err := address.NewFromString(a)
if err != nil {
panic(err)
}
knownFiltered.Store(a, true)
}
}
type dealCountResp struct {
Epoch int64 `json:"epoch"`
Endpoint string `json:"endpoint"`
Payload int64 `json:"payload"`
}
func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *http.Request) {
epoch, deals := dss.filteredDealList()
if epoch == 0 {
w.WriteHeader(500)
return
}
if err := json.NewEncoder(w).Encode(&dealCountResp{
Endpoint: "COUNT_DEALS",
Payload: int64(len(deals)),
Epoch: epoch,
}); err != nil {
log.Warnf("failed to write back deal count response: %s", err)
return
}
}
type dealAverageResp struct {
Epoch int64 `json:"epoch"`
Endpoint string `json:"endpoint"`
Payload int64 `json:"payload"`
}
func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter, r *http.Request) {
epoch, deals := dss.filteredDealList()
if epoch == 0 {
w.WriteHeader(500)
return
}
var totalBytes int64
for _, d := range deals {
totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded())
}
if err := json.NewEncoder(w).Encode(&dealAverageResp{
Endpoint: "AVERAGE_DEAL_SIZE",
Payload: totalBytes / int64(len(deals)),
Epoch: epoch,
}); err != nil {
log.Warnf("failed to write back deal average response: %s", err)
return
}
}
type dealTotalResp struct {
Epoch int64 `json:"epoch"`
Endpoint string `json:"endpoint"`
Payload int64 `json:"payload"`
}
func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r *http.Request) {
epoch, deals := dss.filteredDealList()
if epoch == 0 {
w.WriteHeader(500)
return
}
var totalBytes int64
for _, d := range deals {
totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded())
}
if err := json.NewEncoder(w).Encode(&dealTotalResp{
Endpoint: "DEAL_BYTES",
Payload: totalBytes,
Epoch: epoch,
}); err != nil {
log.Warnf("failed to write back deal average response: %s", err)
return
}
}
type clientStatsOutput struct {
Epoch int64 `json:"epoch"`
Endpoint string `json:"endpoint"`
Payload []*clientStats `json:"payload"`
}
type clientStats struct {
Client address.Address `json:"client"`
DataSize int64 `json:"data_size"`
NumCids int `json:"num_cids"`
NumDeals int `json:"num_deals"`
NumMiners int `json:"num_miners"`
cids map[cid.Cid]bool
providers map[address.Address]bool
}
func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *http.Request) {
epoch, deals := dss.filteredDealList()
if epoch == 0 {
w.WriteHeader(500)
return
}
stats := make(map[address.Address]*clientStats)
for _, d := range deals {
st, ok := stats[d.deal.Proposal.Client]
if !ok {
st = &clientStats{
Client: d.resolvedWallet,
cids: make(map[cid.Cid]bool),
providers: make(map[address.Address]bool),
}
stats[d.deal.Proposal.Client] = st
}
st.DataSize += int64(d.deal.Proposal.PieceSize.Unpadded())
st.cids[d.deal.Proposal.PieceCID] = true
st.providers[d.deal.Proposal.Provider] = true
st.NumDeals++
}
out := clientStatsOutput{
Epoch: epoch,
Endpoint: "CLIENT_DEAL_STATS",
Payload: make([]*clientStats, 0, len(stats)),
}
for _, cs := range stats {
cs.NumCids = len(cs.cids)
cs.NumMiners = len(cs.providers)
out.Payload = append(out.Payload, cs)
}
if err := json.NewEncoder(w).Encode(out); err != nil {
log.Warnf("failed to write back client stats response: %s", err)
return
}
}
type dealInfo struct {
deal api.MarketDeal
resolvedWallet address.Address
}
// filteredDealList returns the current epoch and a list of filtered deals
// on error returns an epoch of 0
func (dss *dealStatsServer) filteredDealList() (int64, map[string]dealInfo) {
ctx := context.Background()
head, err := dss.api.ChainHead(ctx)
if err != nil {
log.Warnf("failed to get chain head: %s", err)
return 0, nil
}
head, err = dss.api.ChainGetTipSetByHeight(ctx, head.Height()-epochLookback, head.Key())
if err != nil {
log.Warnf("failed to walk back %s epochs: %s", epochLookback, err)
return 0, nil
}
// Disabled as per @pooja's request
//
// // Exclude any address associated with a miner
// miners, err := dss.api.StateListMiners(ctx, head.Key())
// if err != nil {
// log.Warnf("failed to get miner list: %s", err)
// return 0, nil
// }
// for _, m := range miners {
// info, err := dss.api.StateMinerInfo(ctx, m, head.Key())
// if err != nil {
// log.Warnf("failed to get info for known miner '%s': %s", m, err)
// continue
// }
// knownFiltered.Store(info.Owner, true)
// knownFiltered.Store(info.Worker, true)
// for _, a := range info.ControlAddresses {
// knownFiltered.Store(a, true)
// }
// }
deals, err := dss.api.StateMarketDeals(ctx, head.Key())
if err != nil {
log.Warnf("failed to get market deals: %s", err)
return 0, nil
}
ret := make(map[string]dealInfo, len(deals))
for dealKey, d := range deals {
// Counting no-longer-active deals as per Pooja's request
// // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85
// if d.State.SectorStartEpoch < 0 {
// continue
// }
if _, isFiltered := knownFiltered.Load(d.Proposal.Client); isFiltered {
continue
}
if _, wasSeen := resolvedWallets.Load(d.Proposal.Client); !wasSeen {
w, err := dss.api.StateAccountKey(ctx, d.Proposal.Client, head.Key())
if err != nil {
log.Warnf("failed to resolve id '%s' to wallet address: %s", d.Proposal.Client, err)
continue
} else {
resolvedWallets.Store(d.Proposal.Client, w)
}
}
w, _ := resolvedWallets.Load(d.Proposal.Client)
if _, isFiltered := knownFiltered.Load(w); isFiltered {
continue
}
ret[dealKey] = dealInfo{
deal: d,
resolvedWallet: w.(address.Address),
}
}
return int64(head.Height()), ret
}
var serveDealStatsCmd = &cli.Command{
Name: "serve-deal-stats",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
_ = ctx
dss := &dealStatsServer{api}
mux := &http.ServeMux{}
mux.HandleFunc("/api/storagedeal/count", dss.handleStorageDealCount)
mux.HandleFunc("/api/storagedeal/averagesize", dss.handleStorageDealAverageSize)
mux.HandleFunc("/api/storagedeal/totalreal", dss.handleStorageDealTotalReal)
mux.HandleFunc("/api/storagedeal/clientstats", dss.handleStorageClientStats)
s := &http.Server{
Addr: ":7272",
Handler: mux,
}
go func() {
<-ctx.Done()
if err := s.Shutdown(context.TODO()); err != nil {
log.Error(err)
}
}()
list, err := net.Listen("tcp", ":7272") // nolint
if err != nil {
panic(err)
}
log.Warnf("deal-stat server listening on %s\n== NOTE: QUERIES ARE EXPENSIVE - YOU MUST FRONT-CACHE THIS SERVICE\n", list.Addr().String())
return s.Serve(list)
},
}

View File

@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
ledgerfil "github.com/whyrusleeping/ledger-filecoin-go" ledgerfil "github.com/whyrusleeping/ledger-filecoin-go"
@ -242,13 +243,16 @@ var ledgerSignTestCmd = &cli.Command{
if err != nil { if err != nil {
return err return err
} }
fmt.Printf("Message: %x\n", b.RawData())
sig, err := fl.SignSECP256K1(p, b.RawData()) sig, err := fl.SignSECP256K1(p, b.RawData())
if err != nil { if err != nil {
return err return err
} }
fmt.Println(sig.SignatureBytes()) sigBytes := append([]byte{byte(crypto.SigTypeSecp256k1)}, sig.SignatureBytes()...)
fmt.Printf("Signature: %x\n", sigBytes)
return nil return nil
}, },

View File

@ -30,6 +30,7 @@ func main() {
importObjectCmd, importObjectCmd,
commpToCidCmd, commpToCidCmd,
fetchParamCmd, fetchParamCmd,
postFindCmd,
proofsCmd, proofsCmd,
verifRegCmd, verifRegCmd,
miscCmd, miscCmd,
@ -39,7 +40,7 @@ func main() {
mpoolStatsCmd, mpoolStatsCmd,
exportChainCmd, exportChainCmd,
consensusCmd, consensusCmd,
serveDealStatsCmd, rollupDealStatsCmd,
syncCmd, syncCmd,
stateTreePruneCmd, stateTreePruneCmd,
datastoreCmd, datastoreCmd,

129
cmd/lotus-shed/postfind.go Normal file
View File

@ -0,0 +1,129 @@
package main
import (
"fmt"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/urfave/cli/v2"
)
var postFindCmd = &cli.Command{
Name: "post-find",
Description: "return addresses of all miners who have over zero power and have posted in the last day",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "tipset",
Usage: "specify tipset state to search on",
},
&cli.BoolFlag{
Name: "verbose",
Usage: "get more frequent print updates",
},
&cli.BoolFlag{
Name: "withpower",
Usage: "only print addrs of miners with more than zero power",
},
&cli.IntFlag{
Name: "lookback",
Usage: "number of past epochs to search for post",
Value: 2880, //default 1 day
},
},
Action: func(c *cli.Context) error {
api, acloser, err := lcli.GetFullNodeAPI(c)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(c)
verbose := c.Bool("verbose")
withpower := c.Bool("withpower")
startTs, err := lcli.LoadTipSet(ctx, c, api)
if err != nil {
return err
}
if startTs == nil {
startTs, err = api.ChainHead(ctx)
if err != nil {
return err
}
}
stopEpoch := startTs.Height() - abi.ChainEpoch(c.Int("lookback"))
if verbose {
fmt.Printf("Collecting messages between %d and %d\n", startTs.Height(), stopEpoch)
}
// Get all messages over the last day
ts := startTs
msgs := make([]*types.Message, 0)
for ts.Height() > stopEpoch {
// Get messages on ts parent
next, err := api.ChainGetParentMessages(ctx, ts.Cids()[0])
if err != nil {
return err
}
msgs = append(msgs, messagesFromAPIMessages(next)...)
// Next ts
ts, err = api.ChainGetTipSet(ctx, ts.Parents())
if err != nil {
return err
}
if verbose && int64(ts.Height())%100 == 0 {
fmt.Printf("Collected messages back to height %d\n", ts.Height())
}
}
fmt.Printf("Loaded messages to height %d\n", ts.Height())
mAddrs, err := api.StateListMiners(ctx, startTs.Key())
if err != nil {
return err
}
minersToCheck := make(map[address.Address]struct{})
for _, mAddr := range mAddrs {
// if they have no power ignore. This filters out 14k inactive miners
// so we can do 100x fewer expensive message queries
if withpower {
power, err := api.StateMinerPower(ctx, mAddr, startTs.Key())
if err != nil {
return err
}
if power.MinerPower.RawBytePower.GreaterThan(big.Zero()) {
minersToCheck[mAddr] = struct{}{}
}
} else {
minersToCheck[mAddr] = struct{}{}
}
}
fmt.Printf("Loaded %d miners to check\n", len(minersToCheck))
postedMiners := make(map[address.Address]struct{})
for _, msg := range msgs {
_, shouldCheck := minersToCheck[msg.To]
_, seenBefore := postedMiners[msg.To]
if shouldCheck && !seenBefore {
if msg.Method == builtin.MethodsMiner.SubmitWindowedPoSt {
fmt.Printf("%s\n", msg.To)
postedMiners[msg.To] = struct{}{}
}
}
}
return nil
},
}
func messagesFromAPIMessages(apiMessages []lapi.Message) []*types.Message {
messages := make([]*types.Message, len(apiMessages))
for i, apiMessage := range apiMessages {
messages[i] = apiMessage.Message
}
return messages
}

View File

@ -9,6 +9,7 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os"
"strings" "strings"
"text/scanner" "text/scanner"
@ -55,6 +56,60 @@ var rpcCmd = &cli.Command{
cs.Close() // nolint:errcheck cs.Close() // nolint:errcheck
}() }()
send := func(method, params string) error {
jreq, err := json.Marshal(struct {
Jsonrpc string `json:"jsonrpc"`
ID int `json:"id"`
Method string `json:"method"`
Params json.RawMessage `json:"params"`
}{
Jsonrpc: "2.0",
Method: "Filecoin." + method,
Params: json.RawMessage(params),
ID: 0,
})
if err != nil {
return err
}
req, err := http.NewRequest("POST", addr, bytes.NewReader(jreq))
if err != nil {
return err
}
req.Header = headers
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
rb, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
fmt.Println(string(rb))
if err := resp.Body.Close(); err != nil {
return err
}
return nil
}
if cctx.Args().Present() {
if cctx.Args().Len() > 2 {
return xerrors.Errorf("expected 1 or 2 arguments: method [params]")
}
params := cctx.Args().Get(1)
if params == "" {
// TODO: try to be smart and use zero-values for method
params = "[]"
}
return send(cctx.Args().Get(0), params)
}
cctx.App.Metadata["repoType"] = repo.FullNode cctx.App.Metadata["repoType"] = repo.FullNode
if err := lcli.VersionCmd.Action(cctx); err != nil { if err := lcli.VersionCmd.Action(cctx); err != nil {
return err return err
@ -94,40 +149,8 @@ var rpcCmd = &cli.Command{
s.Scan() s.Scan()
params := line[s.Position.Offset:] params := line[s.Position.Offset:]
jreq, err := json.Marshal(struct { if err := send(method, params); err != nil {
Jsonrpc string `json:"jsonrpc"` _, _ = fmt.Fprintf(os.Stderr, "%v", err)
ID int `json:"id"`
Method string `json:"method"`
Params json.RawMessage `json:"params"`
}{
Jsonrpc: "2.0",
Method: "Filecoin." + method,
Params: json.RawMessage(params),
ID: 0,
})
if err != nil {
return err
}
req, err := http.NewRequest("POST", addr, bytes.NewReader(jreq))
if err != nil {
return err
}
req.Header = headers
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
rb, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
fmt.Println(string(rb))
if err := resp.Body.Close(); err != nil {
return err
} }
} }

View File

@ -0,0 +1,432 @@
package main
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"sort"
"strings"
"github.com/Jeffail/gabs"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
)
// Requested by @jbenet
// How many epochs back to look at for dealstats
var epochLookback = abi.ChainEpoch(10)
var resolvedWallets = map[address.Address]address.Address{}
var knownAddrMap = map[address.Address]string{}
//
// contents of basic_stats.json
type competitionTotalOutput struct {
Epoch int64 `json:"epoch"`
Endpoint string `json:"endpoint"`
Payload competitionTotal `json:"payload"`
}
type competitionTotal struct {
UniqueCids int `json:"total_unique_cids"`
UniqueProviders int `json:"total_unique_providers"`
UniqueProjects int `json:"total_unique_projects"`
UniqueClients int `json:"total_unique_clients"`
TotalDeals int `json:"total_num_deals"`
TotalBytes int64 `json:"total_stored_data_size"`
seenProject map[string]bool
seenClient map[address.Address]bool
seenProvider map[address.Address]bool
seenPieceCid map[cid.Cid]bool
}
//
// contents of client_stats.json
type projectAggregateStatsOutput struct {
Epoch int64 `json:"epoch"`
Endpoint string `json:"endpoint"`
Payload map[string]*projectAggregateStats `json:"payload"`
}
type projectAggregateStats struct {
ProjectID string `json:"project_id"`
DataSizeMaxProvider int64 `json:"max_data_size_stored_with_single_provider"`
HighestCidDealCount int `json:"max_same_cid_deals"`
DataSize int64 `json:"total_data_size"`
NumCids int `json:"total_num_cids"`
NumDeals int `json:"total_num_deals"`
NumProviders int `json:"total_num_providers"`
ClientStats map[string]*clientAggregateStats `json:"clients"`
dataPerProvider map[address.Address]int64
cidDeals map[cid.Cid]int
}
type clientAggregateStats struct {
Client string `json:"client"`
DataSize int64 `json:"total_data_size"`
NumCids int `json:"total_num_cids"`
NumDeals int `json:"total_num_deals"`
NumProviders int `json:"total_num_providers"`
providers map[address.Address]bool
cids map[cid.Cid]bool
}
//
// contents of deals_list_{{projid}}.json
type dealListOutput struct {
Epoch int64 `json:"epoch"`
Endpoint string `json:"endpoint"`
Payload []*individualDeal `json:"payload"`
}
type individualDeal struct {
ProjectID string `json:"project_id"`
Client string `json:"client"`
DealID string `json:"deal_id"`
DealStartEpoch int64 `json:"deal_start_epoch"`
MinerID string `json:"miner_id"`
PayloadCID string `json:"payload_cid"`
PaddedSize int64 `json:"data_size"`
}
var rollupDealStatsCmd = &cli.Command{
Name: "rollup-deal-stats",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 2 || cctx.Args().Get(0) == "" || cctx.Args().Get(1) == "" {
return errors.New("must supply 2 arguments: a nonexistent target directory to write results to and a source of currently active projects")
}
outDirName := cctx.Args().Get(0)
if _, err := os.Stat(outDirName); err == nil {
return fmt.Errorf("unable to proceed: supplied stat target '%s' already exists", outDirName)
}
if err := os.MkdirAll(outDirName, 0755); err != nil {
return fmt.Errorf("creation of destination '%s' failed: %s", outDirName, err)
}
ctx := lcli.ReqContext(cctx)
projListName := cctx.Args().Get(1)
var projListFh *os.File
{
// Parses JSON input in the form:
// {
// "payload": [
// {
// "project": "5fb5f5b3ad3275e236287ce3",
// "address": "f3w3r2c6iukyh3u6f6kx62s5g6n2gf54aqp33ukqrqhje2y6xhf7k55przg4xqgahpcdal6laljz6zonma5pka"
// },
// {
// "project": "5fb608c4ad3275e236287ced",
// "address": "f3rs2khurnubol6ent27lpggidxxujqo2lg5aap5d5bmtam6yjb5wfla5cxxdgj45tqoaawgpzt5lofc3vpzfq"
// },
// ...
// ]
// }
if strings.HasPrefix(projListName, "http://") || strings.HasPrefix(projListName, "https://") {
req, err := http.NewRequestWithContext(ctx, "GET", projListName, nil)
if err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close() //nolint:errcheck
if resp.StatusCode != http.StatusOK {
return xerrors.Errorf("non-200 response: %d", resp.StatusCode)
}
projListFh, err = os.Create(outDirName + "/client_list.json")
if err != nil {
return err
}
_, err = io.Copy(projListFh, resp.Body)
if err != nil {
return err
}
} else {
return errors.New("file inputs not yet supported")
}
if _, err := projListFh.Seek(0, 0); err != nil {
return err
}
defer projListFh.Close() //nolint:errcheck
projList, err := gabs.ParseJSONBuffer(projListFh)
if err != nil {
return err
}
proj, err := projList.Search("payload").Children()
if err != nil {
return err
}
for _, p := range proj {
a, err := address.NewFromString(p.S("address").Data().(string))
if err != nil {
return err
}
knownAddrMap[a] = p.S("project").Data().(string)
}
if len(knownAddrMap) == 0 {
return fmt.Errorf("no active projects/clients found in '%s': unable to continue", projListName)
}
}
outClientStatsFd, err := os.Create(outDirName + "/client_stats.json")
if err != nil {
return err
}
defer outClientStatsFd.Close() //nolint:errcheck
outBasicStatsFd, err := os.Create(outDirName + "/basic_stats.json")
if err != nil {
return err
}
defer outBasicStatsFd.Close() //nolint:errcheck
outUnfilteredStatsFd, err := os.Create(outDirName + "/unfiltered_basic_stats.json")
if err != nil {
return err
}
defer outUnfilteredStatsFd.Close() //nolint:errcheck
api, apiCloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer apiCloser()
head, err := api.ChainHead(ctx)
if err != nil {
return err
}
head, err = api.ChainGetTipSetByHeight(ctx, head.Height()-epochLookback, head.Key())
if err != nil {
return err
}
grandTotals := competitionTotal{
seenProject: make(map[string]bool),
seenClient: make(map[address.Address]bool),
seenProvider: make(map[address.Address]bool),
seenPieceCid: make(map[cid.Cid]bool),
}
unfilteredGrandTotals := competitionTotal{
seenClient: make(map[address.Address]bool),
seenProvider: make(map[address.Address]bool),
seenPieceCid: make(map[cid.Cid]bool),
}
projStats := make(map[string]*projectAggregateStats)
projDealLists := make(map[string][]*individualDeal)
deals, err := api.StateMarketDeals(ctx, head.Key())
if err != nil {
return err
}
for dealID, dealInfo := range deals {
// Counting no-longer-active deals as per Pooja's request
// // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85
// if d.State.SectorStartEpoch < 0 {
// continue
// }
clientAddr, found := resolvedWallets[dealInfo.Proposal.Client]
if !found {
var err error
clientAddr, err = api.StateAccountKey(ctx, dealInfo.Proposal.Client, head.Key())
if err != nil {
log.Warnf("failed to resolve id '%s' to wallet address: %s", dealInfo.Proposal.Client, err)
continue
}
resolvedWallets[dealInfo.Proposal.Client] = clientAddr
}
unfilteredGrandTotals.seenClient[clientAddr] = true
unfilteredGrandTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize)
unfilteredGrandTotals.seenProvider[dealInfo.Proposal.Provider] = true
unfilteredGrandTotals.seenPieceCid[dealInfo.Proposal.PieceCID] = true
unfilteredGrandTotals.TotalDeals++
projID, projKnown := knownAddrMap[clientAddr]
if !projKnown {
continue
}
grandTotals.seenProject[projID] = true
grandTotals.seenClient[clientAddr] = true
projStatEntry, ok := projStats[projID]
if !ok {
projStatEntry = &projectAggregateStats{
ProjectID: projID,
ClientStats: make(map[string]*clientAggregateStats),
cidDeals: make(map[cid.Cid]int),
dataPerProvider: make(map[address.Address]int64),
}
projStats[projID] = projStatEntry
}
clientStatEntry, ok := projStatEntry.ClientStats[clientAddr.String()]
if !ok {
clientStatEntry = &clientAggregateStats{
Client: clientAddr.String(),
cids: make(map[cid.Cid]bool),
providers: make(map[address.Address]bool),
}
projStatEntry.ClientStats[clientAddr.String()] = clientStatEntry
}
grandTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize)
projStatEntry.DataSize += int64(dealInfo.Proposal.PieceSize)
clientStatEntry.DataSize += int64(dealInfo.Proposal.PieceSize)
grandTotals.seenProvider[dealInfo.Proposal.Provider] = true
projStatEntry.dataPerProvider[dealInfo.Proposal.Provider] += int64(dealInfo.Proposal.PieceSize)
clientStatEntry.providers[dealInfo.Proposal.Provider] = true
grandTotals.seenPieceCid[dealInfo.Proposal.PieceCID] = true
projStatEntry.cidDeals[dealInfo.Proposal.PieceCID]++
clientStatEntry.cids[dealInfo.Proposal.PieceCID] = true
grandTotals.TotalDeals++
projStatEntry.NumDeals++
clientStatEntry.NumDeals++
payloadCid := "unknown"
if c, err := cid.Parse(dealInfo.Proposal.Label); err == nil {
payloadCid = c.String()
}
projDealLists[projID] = append(projDealLists[projID], &individualDeal{
DealID: dealID,
ProjectID: projID,
Client: clientAddr.String(),
MinerID: dealInfo.Proposal.Provider.String(),
PayloadCID: payloadCid,
PaddedSize: int64(dealInfo.Proposal.PieceSize),
DealStartEpoch: int64(dealInfo.State.SectorStartEpoch),
})
}
//
// Write out per-project deal lists
for proj, dl := range projDealLists {
err := func() error {
outListFd, err := os.Create(fmt.Sprintf(outDirName+"/deals_list_%s.json", proj))
if err != nil {
return err
}
defer outListFd.Close() //nolint:errcheck
ridiculousLintMandatedRebind := dl
sort.Slice(dl, func(i, j int) bool {
return ridiculousLintMandatedRebind[j].PaddedSize < ridiculousLintMandatedRebind[i].PaddedSize
})
if err := json.NewEncoder(outListFd).Encode(
dealListOutput{
Epoch: int64(head.Height()),
Endpoint: "DEAL_LIST",
Payload: dl,
},
); err != nil {
return err
}
return nil
}()
if err != nil {
return err
}
}
//
// write out basic_stats.json and unfiltered_basic_stats.json
for _, st := range []*competitionTotal{&grandTotals, &unfilteredGrandTotals} {
st.UniqueCids = len(st.seenPieceCid)
st.UniqueClients = len(st.seenClient)
st.UniqueProviders = len(st.seenProvider)
if st.seenProject != nil {
st.UniqueProjects = len(st.seenProject)
}
}
if err := json.NewEncoder(outBasicStatsFd).Encode(
competitionTotalOutput{
Epoch: int64(head.Height()),
Endpoint: "COMPETITION_TOTALS",
Payload: grandTotals,
},
); err != nil {
return err
}
if err := json.NewEncoder(outUnfilteredStatsFd).Encode(
competitionTotalOutput{
Epoch: int64(head.Height()),
Endpoint: "NETWORK_WIDE_TOTALS",
Payload: unfilteredGrandTotals,
},
); err != nil {
return err
}
//
// write out client_stats.json
for _, ps := range projStats {
ps.NumCids = len(ps.cidDeals)
ps.NumProviders = len(ps.dataPerProvider)
for _, dealsForCid := range ps.cidDeals {
if ps.HighestCidDealCount < dealsForCid {
ps.HighestCidDealCount = dealsForCid
}
}
for _, dataForProvider := range ps.dataPerProvider {
if ps.DataSizeMaxProvider < dataForProvider {
ps.DataSizeMaxProvider = dataForProvider
}
}
for _, cs := range ps.ClientStats {
cs.NumCids = len(cs.cids)
cs.NumProviders = len(cs.providers)
}
}
if err := json.NewEncoder(outClientStatsFd).Encode(
projectAggregateStatsOutput{
Epoch: int64(head.Height()),
Endpoint: "PROJECT_DEAL_STATS",
Payload: projStats,
},
); err != nil {
return err
}
return nil
},
}

View File

@ -414,7 +414,7 @@ var actorControlList = &cli.Command{
tablewriter.Col("balance"), tablewriter.Col("balance"),
) )
postAddr, err := storage.AddressFor(ctx, api, mi, storage.PoStAddr, types.FromFil(1)) postAddr, _, err := storage.AddressFor(ctx, api, mi, storage.PoStAddr, types.FromFil(1), types.FromFil(1))
if err != nil { if err != nil {
return xerrors.Errorf("getting address for post: %w", err) return xerrors.Errorf("getting address for post: %w", err)
} }

View File

@ -70,7 +70,7 @@ func TestMinerAllInfo(t *testing.T) {
return n, sn return n, sn
} }
test.TestDealFlow(t, bp, time.Second, false, false) test.TestDealFlow(t, bp, time.Second, false, false, 0)
t.Run("post-info-all", run) t.Run("post-info-all", run)
} }

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing" sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
@ -59,7 +60,7 @@ func infoCmdAct(cctx *cli.Context) error {
ctx := lcli.ReqContext(cctx) ctx := lcli.ReqContext(cctx)
fmt.Print("Full node: ") fmt.Print("Chain: ")
head, err := api.ChainHead(ctx) head, err := api.ChainHead(ctx)
if err != nil { if err != nil {
@ -75,6 +76,20 @@ func infoCmdAct(cctx *cli.Context) error {
fmt.Printf("[%s]", color.RedString("sync behind! (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second))) fmt.Printf("[%s]", color.RedString("sync behind! (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
} }
basefee := head.MinTicketBlock().ParentBaseFee
gasCol := []color.Attribute{color.FgBlue}
switch {
case basefee.GreaterThan(big.NewInt(7000_000_000)): // 7 nFIL
gasCol = []color.Attribute{color.BgRed, color.FgBlack}
case basefee.GreaterThan(big.NewInt(3000_000_000)): // 3 nFIL
gasCol = []color.Attribute{color.FgRed}
case basefee.GreaterThan(big.NewInt(750_000_000)): // 750 uFIL
gasCol = []color.Attribute{color.FgYellow}
case basefee.GreaterThan(big.NewInt(100_000_000)): // 100 uFIL
gasCol = []color.Attribute{color.FgGreen}
}
fmt.Printf(" [basefee %s]", color.New(gasCol...).Sprint(types.FIL(basefee).Short()))
fmt.Println() fmt.Println()
maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor")) maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
@ -93,15 +108,14 @@ func infoCmdAct(cctx *cli.Context) error {
return err return err
} }
fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr))
// Sector size // Sector size
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil { if err != nil {
return err return err
} }
fmt.Printf("Sector Size: %s\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize)))) ssize := types.SizeStr(types.NewInt(uint64(mi.SectorSize)))
fmt.Printf("Miner: %s (%s sectors)\n", color.BlueString("%s", maddr), ssize)
pow, err := api.StateMinerPower(ctx, maddr, types.EmptyTSK) pow, err := api.StateMinerPower(ctx, maddr, types.EmptyTSK)
if err != nil { if err != nil {
@ -111,16 +125,16 @@ func infoCmdAct(cctx *cli.Context) error {
rpercI := types.BigDiv(types.BigMul(pow.MinerPower.RawBytePower, types.NewInt(1000000)), pow.TotalPower.RawBytePower) rpercI := types.BigDiv(types.BigMul(pow.MinerPower.RawBytePower, types.NewInt(1000000)), pow.TotalPower.RawBytePower)
qpercI := types.BigDiv(types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(1000000)), pow.TotalPower.QualityAdjPower) qpercI := types.BigDiv(types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(1000000)), pow.TotalPower.QualityAdjPower)
fmt.Printf("Byte Power: %s / %s (%0.4f%%)\n", fmt.Printf("Power: %s / %s (%0.4f%%)\n",
color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)),
types.SizeStr(pow.TotalPower.RawBytePower),
float64(rpercI.Int64())/10000)
fmt.Printf("Actual Power: %s / %s (%0.4f%%)\n",
color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)), color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)),
types.DeciStr(pow.TotalPower.QualityAdjPower), types.DeciStr(pow.TotalPower.QualityAdjPower),
float64(qpercI.Int64())/10000) float64(qpercI.Int64())/10000)
fmt.Printf("\tRaw: %s / %s (%0.4f%%)\n",
color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)),
types.SizeStr(pow.TotalPower.RawBytePower),
float64(rpercI.Int64())/10000)
secCounts, err := api.StateMinerSectorCount(ctx, maddr, types.EmptyTSK) secCounts, err := api.StateMinerSectorCount(ctx, maddr, types.EmptyTSK)
if err != nil { if err != nil {
return err return err
@ -168,6 +182,10 @@ func infoCmdAct(cctx *cli.Context) error {
var nactiveDeals, nVerifDeals, ndeals uint64 var nactiveDeals, nVerifDeals, ndeals uint64
var activeDealBytes, activeVerifDealBytes, dealBytes abi.PaddedPieceSize var activeDealBytes, activeVerifDealBytes, dealBytes abi.PaddedPieceSize
for _, deal := range deals { for _, deal := range deals {
if deal.State == storagemarket.StorageDealError {
continue
}
ndeals++ ndeals++
dealBytes += deal.Proposal.PieceSize dealBytes += deal.Proposal.PieceSize
@ -186,6 +204,8 @@ func infoCmdAct(cctx *cli.Context) error {
fmt.Printf("\tActive: %d, %s (Verified: %d, %s)\n", nactiveDeals, types.SizeStr(types.NewInt(uint64(activeDealBytes))), nVerifDeals, types.SizeStr(types.NewInt(uint64(activeVerifDealBytes)))) fmt.Printf("\tActive: %d, %s (Verified: %d, %s)\n", nactiveDeals, types.SizeStr(types.NewInt(uint64(activeDealBytes))), nVerifDeals, types.SizeStr(types.NewInt(uint64(activeVerifDealBytes))))
fmt.Println() fmt.Println()
spendable := big.Zero()
// NOTE: there's no need to unlock anything here. Funds only // NOTE: there's no need to unlock anything here. Funds only
// vest on deadline boundaries, and they're unlocked by cron. // vest on deadline boundaries, and they're unlocked by cron.
lockedFunds, err := mas.LockedFunds() lockedFunds, err := mas.LockedFunds()
@ -196,33 +216,47 @@ func infoCmdAct(cctx *cli.Context) error {
if err != nil { if err != nil {
return xerrors.Errorf("getting available balance: %w", err) return xerrors.Errorf("getting available balance: %w", err)
} }
fmt.Printf("Miner Balance: %s\n", color.YellowString("%s", types.FIL(mact.Balance))) spendable = big.Add(spendable, availBalance)
fmt.Printf("\tPreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits))
fmt.Printf("\tPledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement)) fmt.Printf("Miner Balance: %s\n", color.YellowString("%s", types.FIL(mact.Balance).Short()))
fmt.Printf("\tVesting: %s\n", types.FIL(lockedFunds.VestingFunds)) fmt.Printf(" PreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits).Short())
color.Green("\tAvailable: %s", types.FIL(availBalance)) fmt.Printf(" Pledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement).Short())
wb, err := api.WalletBalance(ctx, mi.Worker) fmt.Printf(" Vesting: %s\n", types.FIL(lockedFunds.VestingFunds).Short())
if err != nil { color.Green(" Available: %s", types.FIL(availBalance).Short())
return xerrors.Errorf("getting worker balance: %w", err)
}
color.Cyan("Worker Balance: %s", types.FIL(wb))
mb, err := api.StateMarketBalance(ctx, maddr, types.EmptyTSK) mb, err := api.StateMarketBalance(ctx, maddr, types.EmptyTSK)
if err != nil { if err != nil {
return xerrors.Errorf("getting market balance: %w", err) return xerrors.Errorf("getting market balance: %w", err)
} }
fmt.Printf("Market (Escrow): %s\n", types.FIL(mb.Escrow)) spendable = big.Add(spendable, big.Sub(mb.Escrow, mb.Locked))
fmt.Printf("Market (Locked): %s\n", types.FIL(mb.Locked))
fmt.Printf("Market Balance: %s\n", types.FIL(mb.Escrow).Short())
fmt.Printf(" Locked: %s\n", types.FIL(mb.Locked).Short())
color.Green(" Available: %s\n", types.FIL(big.Sub(mb.Escrow, mb.Locked)).Short())
wb, err := api.WalletBalance(ctx, mi.Worker)
if err != nil {
return xerrors.Errorf("getting worker balance: %w", err)
}
spendable = big.Add(spendable, wb)
color.Cyan("Worker Balance: %s", types.FIL(wb).Short())
if len(mi.ControlAddresses) > 0 {
cbsum := big.Zero()
for _, ca := range mi.ControlAddresses {
b, err := api.WalletBalance(ctx, ca)
if err != nil {
return xerrors.Errorf("getting control address balance: %w", err)
}
cbsum = big.Add(cbsum, b)
}
spendable = big.Add(spendable, cbsum)
fmt.Printf(" Control: %s\n", types.FIL(cbsum).Short())
}
fmt.Printf("Total Spendable: %s\n", color.YellowString(types.FIL(spendable).Short()))
fmt.Println() fmt.Println()
sealdur, err := nodeApi.SectorGetExpectedSealDuration(ctx)
if err != nil {
return err
}
fmt.Printf("Expected Seal Duration: %s\n\n", sealdur)
if !cctx.Bool("hide-sectors-info") { if !cctx.Bool("hide-sectors-info") {
fmt.Println("Sectors:") fmt.Println("Sectors:")
err = sectorsInfo(ctx, nodeApi) err = sectorsInfo(ctx, nodeApi)
@ -253,6 +287,7 @@ var stateList = []stateMeta{
{col: color.FgRed, state: sealing.UndefinedSectorState}, {col: color.FgRed, state: sealing.UndefinedSectorState},
{col: color.FgYellow, state: sealing.Packing}, {col: color.FgYellow, state: sealing.Packing},
{col: color.FgYellow, state: sealing.GetTicket},
{col: color.FgYellow, state: sealing.PreCommit1}, {col: color.FgYellow, state: sealing.PreCommit1},
{col: color.FgYellow, state: sealing.PreCommit2}, {col: color.FgYellow, state: sealing.PreCommit2},
{col: color.FgYellow, state: sealing.PreCommitting}, {col: color.FgYellow, state: sealing.PreCommitting},

View File

@ -2,6 +2,7 @@ package main
import ( import (
"bufio" "bufio"
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -650,6 +651,11 @@ var marketCancelTransfer = &cli.Command{
Usage: "specify only transfers where peer is/is not initiator", Usage: "specify only transfers where peer is/is not initiator",
Value: false, Value: false,
}, },
&cli.DurationFlag{
Name: "cancel-timeout",
Usage: "time to wait for cancel to be sent to client",
Value: 5 * time.Second,
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() { if !cctx.Args().Present() {
@ -693,7 +699,9 @@ var marketCancelTransfer = &cli.Command{
} }
} }
return nodeApi.MarketCancelDataTransfer(ctx, transferID, other, initiator) timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout"))
defer cancel()
return nodeApi.MarketCancelDataTransfer(timeoutCtx, transferID, other, initiator)
}, },
} }

View File

@ -10,11 +10,14 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/specs-storage/storage"
) )
var provingCmd = &cli.Command{ var provingCmd = &cli.Command{
@ -25,6 +28,7 @@ var provingCmd = &cli.Command{
provingDeadlinesCmd, provingDeadlinesCmd,
provingDeadlineInfoCmd, provingDeadlineInfoCmd,
provingFaultsCmd, provingFaultsCmd,
provingCheckProvableCmd,
}, },
} }
@ -371,3 +375,108 @@ var provingDeadlineInfoCmd = &cli.Command{
return nil return nil
}, },
} }
var provingCheckProvableCmd = &cli.Command{
Name: "check",
Usage: "Check sectors provable",
ArgsUsage: "<deadlineIdx>",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "only-bad",
Usage: "print only bad sectors",
Value: false,
},
&cli.BoolFlag{
Name: "slow",
Usage: "run slower checks",
},
},
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 1 {
return xerrors.Errorf("must pass deadline index")
}
dlIdx, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil {
return xerrors.Errorf("could not parse deadline index: %w", err)
}
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
sapi, scloser, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer scloser()
ctx := lcli.ReqContext(cctx)
addr, err := sapi.ActorAddress(ctx)
if err != nil {
return err
}
mid, err := address.IDFromAddress(addr)
if err != nil {
return err
}
info, err := api.StateMinerInfo(ctx, addr, types.EmptyTSK)
if err != nil {
return err
}
pf, err := info.SealProofType.RegisteredWindowPoStProof()
if err != nil {
return err
}
partitions, err := api.StateMinerPartitions(ctx, addr, dlIdx, types.EmptyTSK)
if err != nil {
return err
}
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
_, _ = fmt.Fprintln(tw, "deadline\tpartition\tsector\tstatus")
for parIdx, par := range partitions {
sectors := make(map[abi.SectorNumber]struct{})
sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.AllSectors, types.EmptyTSK)
if err != nil {
return err
}
var tocheck []storage.SectorRef
for _, info := range sectorInfos {
sectors[info.SectorNumber] = struct{}{}
tocheck = append(tocheck, storage.SectorRef{
ProofType: info.SealProof,
ID: abi.SectorID{
Miner: abi.ActorID(mid),
Number: info.SectorNumber,
},
})
}
bad, err := sapi.CheckProvable(ctx, pf, tocheck, cctx.Bool("slow"))
if err != nil {
return err
}
for s := range sectors {
if err, exist := bad[s]; exist {
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\n", dlIdx, parIdx, s, color.RedString("bad")+fmt.Sprintf(" (%s)", err))
} else if !cctx.Bool("only-bad") {
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\n", dlIdx, parIdx, s, color.GreenString("good"))
}
}
}
return tw.Flush()
},
}

View File

@ -136,6 +136,14 @@ var DaemonCmd = &cli.Command{
Name: "config", Name: "config",
Usage: "specify path of config file to use", Usage: "specify path of config file to use",
}, },
// FIXME: This is not the correct place to put this configuration
// option. Ideally it would be part of `config.toml` but at the
// moment that only applies to the node configuration and not outside
// components like the RPC server.
&cli.IntFlag{
Name: "api-max-req-size",
Usage: "maximum API request size accepted by the JSON RPC server",
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
isLite := cctx.Bool("lite") isLite := cctx.Bool("lite")
@ -321,7 +329,7 @@ var DaemonCmd = &cli.Command{
} }
// TODO: properly parse api endpoint (or make it a URL) // TODO: properly parse api endpoint (or make it a URL)
return serveRPC(api, stop, endpoint, shutdownChan) return serveRPC(api, stop, endpoint, shutdownChan, int64(cctx.Int("api-max-req-size")))
}, },
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
daemonStopCmd, daemonStopCmd,
@ -358,7 +366,7 @@ func importKey(ctx context.Context, api api.FullNode, f string) error {
return err return err
} }
log.Info("successfully imported key for %s", addr) log.Infof("successfully imported key for %s", addr)
return nil return nil
} }

33
cmd/lotus/pprof.go Normal file
View File

@ -0,0 +1,33 @@
package main
import (
"net/http"
"strconv"
)
func handleFractionOpt(name string, setter func(int)) http.HandlerFunc {
return func(rw http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(rw, "only POST allowed", http.StatusMethodNotAllowed)
return
}
if err := r.ParseForm(); err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
asfr := r.Form.Get("x")
if len(asfr) == 0 {
http.Error(rw, "parameter 'x' must be set", http.StatusBadRequest)
return
}
fr, err := strconv.Atoi(asfr)
if err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
log.Infof("setting %s to %d", name, fr)
setter(fr)
}
}

View File

@ -8,6 +8,7 @@ import (
_ "net/http/pprof" _ "net/http/pprof"
"os" "os"
"os/signal" "os/signal"
"runtime"
"syscall" "syscall"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -32,8 +33,12 @@ import (
var log = logging.Logger("main") var log = logging.Logger("main")
func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shutdownCh <-chan struct{}) error { func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shutdownCh <-chan struct{}, maxRequestSize int64) error {
rpcServer := jsonrpc.NewServer() serverOptions := make([]jsonrpc.ServerOption, 0)
if maxRequestSize != 0 { // config set
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(maxRequestSize))
}
rpcServer := jsonrpc.NewServer(serverOptions...)
rpcServer.Register("Filecoin", apistruct.PermissionedFullAPI(metrics.MetricedFullAPI(a))) rpcServer.Register("Filecoin", apistruct.PermissionedFullAPI(metrics.MetricedFullAPI(a)))
ah := &auth.Handler{ ah := &auth.Handler{
@ -67,6 +72,10 @@ func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shut
} }
http.Handle("/debug/metrics", exporter) http.Handle("/debug/metrics", exporter)
http.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate))
http.Handle("/debug/pprof-set/mutex", handleFractionOpt("MutexProfileFraction",
func(x int) { runtime.SetMutexProfileFraction(x) },
))
lst, err := manet.Listen(addr) lst, err := manet.Listen(addr)
if err != nil { if err != nil {

View File

@ -12,6 +12,7 @@ import (
"path/filepath" "path/filepath"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
@ -70,6 +71,11 @@ var extractCmd = &cli.Command{
Usage: "optionally, the block CID the message was included in, to avoid expensive chain scanning", Usage: "optionally, the block CID the message was included in, to avoid expensive chain scanning",
Destination: &extractFlags.block, Destination: &extractFlags.block,
}, },
&cli.StringFlag{
Name: "exec-block",
Usage: "optionally, the block CID of a block where this message was executed, to avoid expensive chain scanning",
Destination: &extractFlags.block,
},
&cli.StringFlag{ &cli.StringFlag{
Name: "cid", Name: "cid",
Usage: "message CID to generate test vector from", Usage: "message CID to generate test vector from",
@ -143,7 +149,7 @@ func doExtract(opts extractOpts) error {
return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err) return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err)
} }
related, found, err := findMsgAndPrecursors(opts.precursor, msg, msgs) related, found, err := findMsgAndPrecursors(opts.precursor, mcid, msg.From, msgs)
if err != nil { if err != nil {
return fmt.Errorf("failed while finding message and precursors: %w", err) return fmt.Errorf("failed while finding message and precursors: %w", err)
} }
@ -496,19 +502,19 @@ func fetchThisAndPrevTipset(ctx context.Context, api api.FullNode, target types.
// findMsgAndPrecursors ranges through the canonical messages slice, locating // findMsgAndPrecursors ranges through the canonical messages slice, locating
// the target message and returning precursors in accordance to the supplied // the target message and returning precursors in accordance to the supplied
// mode. // mode.
func findMsgAndPrecursors(mode string, target *types.Message, msgs []api.Message) (related []*types.Message, found bool, err error) { func findMsgAndPrecursors(mode string, msgCid cid.Cid, sender address.Address, msgs []api.Message) (related []*types.Message, found bool, err error) {
// Range through canonicalised messages, selecting only the precursors based // Range through canonicalised messages, selecting only the precursors based
// on selection mode. // on selection mode.
for _, other := range msgs { for _, other := range msgs {
switch { switch {
case mode == PrecursorSelectAll: case mode == PrecursorSelectAll:
fallthrough fallthrough
case mode == PrecursorSelectSender && other.Message.From == target.From: case mode == PrecursorSelectSender && other.Message.From == sender:
related = append(related, other.Message) related = append(related, other.Message)
} }
// this message is the target; we're done. // this message is the target; we're done.
if other.Cid == target.Cid() { if other.Cid == msgCid {
return related, true, nil return related, true, nil
} }
} }

View File

@ -10,6 +10,8 @@
* [Auth](#Auth) * [Auth](#Auth)
* [AuthNew](#AuthNew) * [AuthNew](#AuthNew)
* [AuthVerify](#AuthVerify) * [AuthVerify](#AuthVerify)
* [Check](#Check)
* [CheckProvable](#CheckProvable)
* [Create](#Create) * [Create](#Create)
* [CreateBackup](#CreateBackup) * [CreateBackup](#CreateBackup)
* [Deals](#Deals) * [Deals](#Deals)
@ -53,6 +55,9 @@
* [NetBandwidthStats](#NetBandwidthStats) * [NetBandwidthStats](#NetBandwidthStats)
* [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer) * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer)
* [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol) * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol)
* [NetBlockAdd](#NetBlockAdd)
* [NetBlockList](#NetBlockList)
* [NetBlockRemove](#NetBlockRemove)
* [NetConnect](#NetConnect) * [NetConnect](#NetConnect)
* [NetConnectedness](#NetConnectedness) * [NetConnectedness](#NetConnectedness)
* [NetDisconnect](#NetDisconnect) * [NetDisconnect](#NetDisconnect)
@ -215,6 +220,30 @@ Inputs:
Response: `null` Response: `null`
## Check
### CheckProvable
There are not yet any comments for this method.
Perms: admin
Inputs:
```json
[
8,
null,
true
]
```
Response:
```json
{
"123": "can't acquire read lock"
}
```
## Create ## Create
@ -549,7 +578,8 @@ Response:
"Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"ID": 3 "ID": 3
} },
"SectorNumber": 9
} }
``` ```
@ -798,6 +828,58 @@ Response:
} }
``` ```
### NetBlockAdd
Perms: admin
Inputs:
```json
[
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
]
```
Response: `{}`
### NetBlockList
Perms: read
Inputs: `null`
Response:
```json
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
```
### NetBlockRemove
Perms: admin
Inputs:
```json
[
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
]
```
Response: `{}`
### NetConnect ### NetConnect

View File

@ -29,6 +29,8 @@
* [Storage](#Storage) * [Storage](#Storage)
* [StorageAddLocal](#StorageAddLocal) * [StorageAddLocal](#StorageAddLocal)
* [Task](#Task) * [Task](#Task)
* [TaskDisable](#TaskDisable)
* [TaskEnable](#TaskEnable)
* [TaskTypes](#TaskTypes) * [TaskTypes](#TaskTypes)
* [Unseal](#Unseal) * [Unseal](#Unseal)
* [UnsealPiece](#UnsealPiece) * [UnsealPiece](#UnsealPiece)
@ -502,6 +504,34 @@ Response: `{}`
## Task ## Task
### TaskDisable
There are not yet any comments for this method.
Perms: admin
Inputs:
```json
[
"seal/v0/commit/2"
]
```
Response: `{}`
### TaskEnable
There are not yet any comments for this method.
Perms: admin
Inputs:
```json
[
"seal/v0/commit/2"
]
```
Response: `{}`
### TaskTypes ### TaskTypes
TaskType -> Weight TaskType -> Weight

View File

@ -110,6 +110,9 @@
* [NetBandwidthStats](#NetBandwidthStats) * [NetBandwidthStats](#NetBandwidthStats)
* [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer) * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer)
* [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol) * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol)
* [NetBlockAdd](#NetBlockAdd)
* [NetBlockList](#NetBlockList)
* [NetBlockRemove](#NetBlockRemove)
* [NetConnect](#NetConnect) * [NetConnect](#NetConnect)
* [NetConnectedness](#NetConnectedness) * [NetConnectedness](#NetConnectedness)
* [NetDisconnect](#NetDisconnect) * [NetDisconnect](#NetDisconnect)
@ -2639,6 +2642,58 @@ Response:
} }
``` ```
### NetBlockAdd
Perms: admin
Inputs:
```json
[
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
]
```
Response: `{}`
### NetBlockList
Perms: read
Inputs: `null`
Response:
```json
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
```
### NetBlockRemove
Perms: admin
Inputs:
```json
[
{
"Peers": null,
"IPAddrs": null,
"IPSubnets": null
}
]
```
Response: `{}`
### NetConnect ### NetConnect

1
extern/oni vendored

@ -1 +0,0 @@
Subproject commit 10ed9ef576836186de3b8513c03cdc3fb18c44ed

View File

@ -2,13 +2,16 @@ package sectorstorage
import ( import (
"context" "context"
"crypto/rand"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"golang.org/x/xerrors" "golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
@ -16,12 +19,12 @@ import (
// FaultTracker TODO: Track things more actively // FaultTracker TODO: Track things more actively
type FaultTracker interface { type FaultTracker interface {
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error)
} }
// CheckProvable returns unprovable sectors // CheckProvable returns unprovable sectors
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error) { func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
var bad []abi.SectorID var bad = make(map[abi.SectorID]string)
ssize, err := pp.SectorSize() ssize, err := pp.SectorSize()
if err != nil { if err != nil {
@ -40,21 +43,21 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
} }
if !locked { if !locked {
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector, "sealed") log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector)
bad = append(bad, sector.ID) bad[sector.ID] = fmt.Sprint("can't acquire read lock")
return nil return nil
} }
lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
if err != nil { if err != nil {
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
bad = append(bad, sector.ID) bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
return nil return nil
} }
if lp.Sealed == "" || lp.Cache == "" { if lp.Sealed == "" || lp.Cache == "" {
log.Warnw("CheckProvable Sector FAULT: cache an/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache) log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache)
bad = append(bad, sector.ID) bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", lp.Cache, lp.Sealed)
return nil return nil
} }
@ -70,19 +73,62 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
st, err := os.Stat(p) st, err := os.Stat(p)
if err != nil { if err != nil {
log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err) log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err)
bad = append(bad, sector.ID) bad[sector.ID] = fmt.Sprintf("%s", err)
return nil return nil
} }
if sz != 0 { if sz != 0 {
if st.Size() != int64(ssize)*sz { if st.Size() != int64(ssize)*sz {
log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz) log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz)
bad = append(bad, sector.ID) bad[sector.ID] = fmt.Sprintf("%s is wrong size (got %d, expect %d)", p, st.Size(), int64(ssize)*sz)
return nil return nil
} }
} }
} }
if rg != nil {
wpp, err := sector.ProofType.RegisteredWindowPoStProof()
if err != nil {
return err
}
var pr abi.PoStRandomness = make([]byte, abi.RandomnessLength)
_, _ = rand.Read(pr)
pr[31] &= 0x3f
ch, err := ffi.GeneratePoStFallbackSectorChallenges(wpp, sector.ID.Miner, pr, []abi.SectorNumber{
sector.ID.Number,
})
if err != nil {
log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err)
bad[sector.ID] = fmt.Sprintf("generating fallback challenges: %s", err)
return nil
}
commr, err := rg(ctx, sector.ID)
if err != nil {
log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err)
bad[sector.ID] = fmt.Sprintf("getting commR: %s", err)
return nil
}
_, err = ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{
SectorInfo: proof.SectorInfo{
SealProof: sector.ProofType,
SectorNumber: sector.ID.Number,
SealedCID: commr,
},
CacheDirPath: lp.Cache,
PoStProofType: wpp,
SealedSectorPath: lp.Sealed,
}, ch.Challenges[sector.ID.Number])
if err != nil {
log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err)
bad[sector.ID] = fmt.Sprintf("generating vanilla proof: %s", err)
return nil
}
}
return nil return nil
}() }()
if err != nil { if err != nil {

View File

@ -1,53 +0,0 @@
package ffiwrapper
import (
"io"
"os"
"sync"
"golang.org/x/xerrors"
)
func ToReadableFile(r io.Reader, n int64) (*os.File, func() error, error) {
f, ok := r.(*os.File)
if ok {
return f, func() error { return nil }, nil
}
var w *os.File
f, w, err := os.Pipe()
if err != nil {
return nil, nil, err
}
var wait sync.Mutex
var werr error
wait.Lock()
go func() {
defer wait.Unlock()
var copied int64
copied, werr = io.CopyN(w, r, n)
if werr != nil {
log.Warnf("toReadableFile: copy error: %+v", werr)
}
err := w.Close()
if werr == nil && err != nil {
werr = err
log.Warnf("toReadableFile: close error: %+v", err)
return
}
if copied != n {
log.Warnf("copied different amount than expected: %d != %d", copied, n)
werr = xerrors.Errorf("copied different amount than expected: %d != %d", copied, n)
}
}()
return f, func() error {
wait.Lock()
return werr
}, nil
}

View File

@ -20,9 +20,10 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
"github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32" "github.com/filecoin-project/lotus/extern/sector-storage/fr32"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
) )
var _ Storage = &Sealer{} var _ Storage = &Sealer{}
@ -175,7 +176,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi
} }
func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) { func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) {
prf, werr, err := ToReadableFile(bytes.NewReader(in), int64(len(in))) prf, werr, err := commpffi.ToReadableFile(bytes.NewReader(in), int64(len(in)))
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err) return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err)
} }
@ -610,20 +611,6 @@ func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error {
return xerrors.Errorf("not supported at this layer") // happens in localworker return xerrors.Errorf("not supported at this layer") // happens in localworker
} }
func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) {
f, werr, err := ToReadableFile(piece, int64(pieceSize))
if err != nil {
return cid.Undef, err
}
pieceCID, err := ffi.GeneratePieceCIDFromFile(proofType, f, pieceSize)
if err != nil {
return cid.Undef, err
}
return pieceCID, werr()
}
func GetRequiredPadding(oldLength abi.PaddedPieceSize, newPieceLength abi.PaddedPieceSize) ([]abi.PaddedPieceSize, abi.PaddedPieceSize) { func GetRequiredPadding(oldLength abi.PaddedPieceSize, newPieceLength abi.PaddedPieceSize) ([]abi.PaddedPieceSize, abi.PaddedPieceSize) {
padPieces := make([]abi.PaddedPieceSize, 0) padPieces := make([]abi.PaddedPieceSize, 0)

View File

@ -15,6 +15,8 @@ import (
"testing" "testing"
"time" "time"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -465,7 +467,7 @@ func BenchmarkWriteWithAlignment(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
rf, w, _ := ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt)) rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt))
tf, _ := ioutil.TempFile("/tmp/", "scrb-") tf, _ := ioutil.TempFile("/tmp/", "scrb-")
b.StartTimer() b.StartTimer()
@ -524,7 +526,7 @@ func TestGenerateUnsealedCID(t *testing.T) {
ups := int(abi.PaddedPieceSize(2048).Unpadded()) ups := int(abi.PaddedPieceSize(2048).Unpadded())
commP := func(b []byte) cid.Cid { commP := func(b []byte) cid.Cid {
pf, werr, err := ToReadableFile(bytes.NewReader(b), int64(len(b))) pf, werr, err := commpffi.ToReadableFile(bytes.NewReader(b), int64(len(b)))
require.NoError(t, err) require.NoError(t, err)
c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b))) c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b)))

View File

@ -7,11 +7,12 @@ import (
"os" "os"
"testing" "testing"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32" "github.com/filecoin-project/lotus/extern/sector-storage/fr32"
ffi "github.com/filecoin-project/filecoin-ffi" ffi "github.com/filecoin-project/filecoin-ffi"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -29,7 +30,7 @@ func TestWriteTwoPcs(t *testing.T) {
buf := bytes.Repeat([]byte{0xab * byte(i)}, int(paddedSize.Unpadded())) buf := bytes.Repeat([]byte{0xab * byte(i)}, int(paddedSize.Unpadded()))
rawBytes = append(rawBytes, buf...) rawBytes = append(rawBytes, buf...)
rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(buf), int64(len(buf)))
_, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil)
if err != nil { if err != nil {

View File

@ -9,15 +9,15 @@ import (
"testing" "testing"
ffi "github.com/filecoin-project/filecoin-ffi" ffi "github.com/filecoin-project/filecoin-ffi"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32" "github.com/filecoin-project/lotus/extern/sector-storage/fr32"
) )
func padFFI(buf []byte) []byte { func padFFI(buf []byte) []byte {
rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(buf), int64(len(buf)))
tf, _ := ioutil.TempFile("/tmp/", "scrb-") tf, _ := ioutil.TempFile("/tmp/", "scrb-")
_, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil)

View File

@ -58,6 +58,10 @@ type SectorManager interface {
type WorkerID uuid.UUID // worker session UUID type WorkerID uuid.UUID // worker session UUID
var ClosedWorkerID = uuid.UUID{} var ClosedWorkerID = uuid.UUID{}
func (w WorkerID) String() string {
return uuid.UUID(w).String()
}
type Manager struct { type Manager struct {
ls stores.LocalStorage ls stores.LocalStorage
storage *stores.Remote storage *stores.Remote

View File

@ -146,7 +146,7 @@ func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params
switch ws.Status { switch ws.Status {
case wsStarted: case wsStarted:
log.Warn("canceling started (not running) work %s", wid) log.Warnf("canceling started (not running) work %s", wid)
if err := m.work.Get(wid).End(); err != nil { if err := m.work.Get(wid).End(); err != nil {
log.Errorf("cancel: failed to cancel started work %s: %+v", wid, err) log.Errorf("cancel: failed to cancel started work %s: %+v", wid, err)
@ -154,9 +154,9 @@ func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params
} }
case wsDone: case wsDone:
// TODO: still remove? // TODO: still remove?
log.Warn("cancel called on work %s in 'done' state", wid) log.Warnf("cancel called on work %s in 'done' state", wid)
case wsRunning: case wsRunning:
log.Warn("cancel called on work %s in 'running' state (manager shutting down?)", wid) log.Warnf("cancel called on work %s in 'running' state (manager shutting down?)", wid)
} }
}, nil }, nil

View File

@ -11,6 +11,7 @@ import (
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper"
commcid "github.com/filecoin-project/go-fil-commcid" commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
@ -76,7 +77,7 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef,
var b bytes.Buffer var b bytes.Buffer
tr := io.TeeReader(r, &b) tr := io.TeeReader(r, &b)
c, err := ffiwrapper.GeneratePieceCIDFromFile(sectorID.ProofType, tr, size) c, err := ffiwrapper2.GeneratePieceCIDFromFile(sectorID.ProofType, tr, size)
if err != nil { if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err) return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err)
} }
@ -404,14 +405,14 @@ func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) erro
return nil return nil
} }
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef) ([]abi.SectorID, error) { func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
var bad []abi.SectorID bad := map[abi.SectorID]string{}
for _, sid := range ids { for _, sid := range ids {
_, found := mgr.sectors[sid.ID] _, found := mgr.sectors[sid.ID]
if !found || mgr.sectors[sid.ID].failed { if !found || mgr.sectors[sid.ID].failed {
bad = append(bad, sid.ID) bad[sid.ID] = "mock fail"
} }
} }

View File

@ -348,24 +348,25 @@ func (sh *scheduler) trySched() {
sh.workersLk.RLock() sh.workersLk.RLock()
defer sh.workersLk.RUnlock() defer sh.workersLk.RUnlock()
windows := make([]schedWindow, len(sh.openWindows)) windowsLen := len(sh.openWindows)
acceptableWindows := make([][]int, sh.schedQueue.Len()) queuneLen := sh.schedQueue.Len()
log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) log.Debugf("SCHED %d queued; %d open windows", queuneLen, windowsLen)
if len(sh.openWindows) == 0 { if windowsLen == 0 || queuneLen == 0 {
// nothing to schedule on // nothing to schedule on
return return
} }
windows := make([]schedWindow, windowsLen)
acceptableWindows := make([][]int, queuneLen)
// Step 1 // Step 1
concurrency := len(sh.openWindows) throttle := make(chan struct{}, windowsLen)
throttle := make(chan struct{}, concurrency)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(sh.schedQueue.Len()) wg.Add(queuneLen)
for i := 0; i < queuneLen; i++ {
for i := 0; i < sh.schedQueue.Len(); i++ {
throttle <- struct{}{} throttle <- struct{}{}
go func(sqi int) { go func(sqi int) {
@ -436,7 +437,7 @@ func (sh *scheduler) trySched() {
r, err := task.sel.Cmp(rpcCtx, task.taskType, wi, wj) r, err := task.sel.Cmp(rpcCtx, task.taskType, wi, wj)
if err != nil { if err != nil {
log.Error("selecting best worker: %s", err) log.Errorf("selecting best worker: %s", err)
} }
return r return r
}) })
@ -450,8 +451,9 @@ func (sh *scheduler) trySched() {
// Step 2 // Step 2
scheduled := 0 scheduled := 0
rmQueue := make([]int, 0, queuneLen)
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { for sqi := 0; sqi < queuneLen; sqi++ {
task := (*sh.schedQueue)[sqi] task := (*sh.schedQueue)[sqi]
needRes := ResourceTable[task.taskType][task.sector.ProofType] needRes := ResourceTable[task.taskType][task.sector.ProofType]
@ -486,11 +488,16 @@ func (sh *scheduler) trySched() {
windows[selectedWindow].todo = append(windows[selectedWindow].todo, task) windows[selectedWindow].todo = append(windows[selectedWindow].todo, task)
sh.schedQueue.Remove(sqi) rmQueue = append(rmQueue, sqi)
sqi--
scheduled++ scheduled++
} }
if len(rmQueue) > 0 {
for i := len(rmQueue) - 1; i >= 0; i-- {
sh.schedQueue.Remove(rmQueue[i])
}
}
// Step 3 // Step 3
if scheduled == 0 { if scheduled == 0 {
@ -515,7 +522,7 @@ func (sh *scheduler) trySched() {
} }
// Rewrite sh.openWindows array, removing scheduled windows // Rewrite sh.openWindows array, removing scheduled windows
newOpenWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)-len(scheduledWindows)) newOpenWindows := make([]*schedWindowRequest, 0, windowsLen-len(scheduledWindows))
for wnd, window := range sh.openWindows { for wnd, window := range sh.openWindows {
if _, scheduled := scheduledWindows[wnd]; scheduled { if _, scheduled := scheduledWindows[wnd]; scheduled {
// keep unscheduled windows open // keep unscheduled windows open

View File

@ -49,25 +49,25 @@ func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, call
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
if minNeedMem > res.MemPhysical { if minNeedMem > res.MemPhysical {
log.Debugf("sched: not scheduling on worker %d for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib) log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib)
return false return false
} }
maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
if maxNeedMem > res.MemSwap+res.MemPhysical { if maxNeedMem > res.MemSwap+res.MemPhysical {
log.Debugf("sched: not scheduling on worker %d for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
return false return false
} }
if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs { if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs {
log.Debugf("sched: not scheduling on worker %d for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs) log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs)
return false return false
} }
if len(res.GPUs) > 0 && needRes.CanGPU { if len(res.GPUs) > 0 && needRes.CanGPU {
if a.gpuUsed { if a.gpuUsed {
log.Debugf("sched: not scheduling on worker %d for %s; GPU in use", wid, caller) log.Debugf("sched: not scheduling on worker %s for %s; GPU in use", wid, caller)
return false return false
} }
} }

View File

@ -368,7 +368,7 @@ assignLoop:
err := sw.startProcessingTask(sw.taskDone, todo) err := sw.startProcessingTask(sw.taskDone, todo)
if err != nil { if err != nil {
log.Error("startProcessingTask error: %+v", err) log.Errorf("startProcessingTask error: %+v", err)
go todo.respond(xerrors.Errorf("startProcessingTask error: %w", err)) go todo.respond(xerrors.Errorf("startProcessingTask error: %w", err))
} }
@ -486,6 +486,6 @@ func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) {
} }
sh.openWindows = newWindows sh.openWindows = newWindows
log.Debugf("worker %d dropped", wid) log.Debugf("worker %s dropped", wid)
} }
} }

View File

@ -1,8 +1,11 @@
package storiface package storiface
import ( import (
"context"
"errors" "errors"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
) )
@ -15,3 +18,5 @@ func (i UnpaddedByteIndex) Padded() PaddedByteIndex {
} }
type PaddedByteIndex uint64 type PaddedByteIndex uint64
type RGetter func(ctx context.Context, id abi.SectorID) (cid.Cid, error)

View File

@ -49,6 +49,7 @@ type LocalWorker struct {
ct *workerCallTracker ct *workerCallTracker
acceptTasks map[sealtasks.TaskType]struct{} acceptTasks map[sealtasks.TaskType]struct{}
running sync.WaitGroup running sync.WaitGroup
taskLk sync.Mutex
session uuid.UUID session uuid.UUID
testDisable int64 testDisable int64
@ -457,9 +458,28 @@ func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector st
} }
func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) {
l.taskLk.Lock()
defer l.taskLk.Unlock()
return l.acceptTasks, nil return l.acceptTasks, nil
} }
func (l *LocalWorker) TaskDisable(ctx context.Context, tt sealtasks.TaskType) error {
l.taskLk.Lock()
defer l.taskLk.Unlock()
delete(l.acceptTasks, tt)
return nil
}
func (l *LocalWorker) TaskEnable(ctx context.Context, tt sealtasks.TaskType) error {
l.taskLk.Lock()
defer l.taskLk.Unlock()
l.acceptTasks[tt] = struct{}{}
return nil
}
func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
return l.localStore.Local(ctx) return l.localStore.Local(ctx)
} }

View File

@ -1,56 +0,0 @@
package zerocomm
import (
"math/bits"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
)
const Levels = 37
const Skip = 2 // can't generate for 32, 64b
var PieceComms = [Levels - Skip][32]byte{
{0x37, 0x31, 0xbb, 0x99, 0xac, 0x68, 0x9f, 0x66, 0xee, 0xf5, 0x97, 0x3e, 0x4a, 0x94, 0xda, 0x18, 0x8f, 0x4d, 0xdc, 0xae, 0x58, 0x7, 0x24, 0xfc, 0x6f, 0x3f, 0xd6, 0xd, 0xfd, 0x48, 0x83, 0x33},
{0x64, 0x2a, 0x60, 0x7e, 0xf8, 0x86, 0xb0, 0x4, 0xbf, 0x2c, 0x19, 0x78, 0x46, 0x3a, 0xe1, 0xd4, 0x69, 0x3a, 0xc0, 0xf4, 0x10, 0xeb, 0x2d, 0x1b, 0x7a, 0x47, 0xfe, 0x20, 0x5e, 0x5e, 0x75, 0xf},
{0x57, 0xa2, 0x38, 0x1a, 0x28, 0x65, 0x2b, 0xf4, 0x7f, 0x6b, 0xef, 0x7a, 0xca, 0x67, 0x9b, 0xe4, 0xae, 0xde, 0x58, 0x71, 0xab, 0x5c, 0xf3, 0xeb, 0x2c, 0x8, 0x11, 0x44, 0x88, 0xcb, 0x85, 0x26},
{0x1f, 0x7a, 0xc9, 0x59, 0x55, 0x10, 0xe0, 0x9e, 0xa4, 0x1c, 0x46, 0xb, 0x17, 0x64, 0x30, 0xbb, 0x32, 0x2c, 0xd6, 0xfb, 0x41, 0x2e, 0xc5, 0x7c, 0xb1, 0x7d, 0x98, 0x9a, 0x43, 0x10, 0x37, 0x2f},
{0xfc, 0x7e, 0x92, 0x82, 0x96, 0xe5, 0x16, 0xfa, 0xad, 0xe9, 0x86, 0xb2, 0x8f, 0x92, 0xd4, 0x4a, 0x4f, 0x24, 0xb9, 0x35, 0x48, 0x52, 0x23, 0x37, 0x6a, 0x79, 0x90, 0x27, 0xbc, 0x18, 0xf8, 0x33},
{0x8, 0xc4, 0x7b, 0x38, 0xee, 0x13, 0xbc, 0x43, 0xf4, 0x1b, 0x91, 0x5c, 0xe, 0xed, 0x99, 0x11, 0xa2, 0x60, 0x86, 0xb3, 0xed, 0x62, 0x40, 0x1b, 0xf9, 0xd5, 0x8b, 0x8d, 0x19, 0xdf, 0xf6, 0x24},
{0xb2, 0xe4, 0x7b, 0xfb, 0x11, 0xfa, 0xcd, 0x94, 0x1f, 0x62, 0xaf, 0x5c, 0x75, 0xf, 0x3e, 0xa5, 0xcc, 0x4d, 0xf5, 0x17, 0xd5, 0xc4, 0xf1, 0x6d, 0xb2, 0xb4, 0xd7, 0x7b, 0xae, 0xc1, 0xa3, 0x2f},
{0xf9, 0x22, 0x61, 0x60, 0xc8, 0xf9, 0x27, 0xbf, 0xdc, 0xc4, 0x18, 0xcd, 0xf2, 0x3, 0x49, 0x31, 0x46, 0x0, 0x8e, 0xae, 0xfb, 0x7d, 0x2, 0x19, 0x4d, 0x5e, 0x54, 0x81, 0x89, 0x0, 0x51, 0x8},
{0x2c, 0x1a, 0x96, 0x4b, 0xb9, 0xb, 0x59, 0xeb, 0xfe, 0xf, 0x6d, 0xa2, 0x9a, 0xd6, 0x5a, 0xe3, 0xe4, 0x17, 0x72, 0x4a, 0x8f, 0x7c, 0x11, 0x74, 0x5a, 0x40, 0xca, 0xc1, 0xe5, 0xe7, 0x40, 0x11},
{0xfe, 0xe3, 0x78, 0xce, 0xf1, 0x64, 0x4, 0xb1, 0x99, 0xed, 0xe0, 0xb1, 0x3e, 0x11, 0xb6, 0x24, 0xff, 0x9d, 0x78, 0x4f, 0xbb, 0xed, 0x87, 0x8d, 0x83, 0x29, 0x7e, 0x79, 0x5e, 0x2, 0x4f, 0x2},
{0x8e, 0x9e, 0x24, 0x3, 0xfa, 0x88, 0x4c, 0xf6, 0x23, 0x7f, 0x60, 0xdf, 0x25, 0xf8, 0x3e, 0xe4, 0xd, 0xca, 0x9e, 0xd8, 0x79, 0xeb, 0x6f, 0x63, 0x52, 0xd1, 0x50, 0x84, 0xf5, 0xad, 0xd, 0x3f},
{0x75, 0x2d, 0x96, 0x93, 0xfa, 0x16, 0x75, 0x24, 0x39, 0x54, 0x76, 0xe3, 0x17, 0xa9, 0x85, 0x80, 0xf0, 0x9, 0x47, 0xaf, 0xb7, 0xa3, 0x5, 0x40, 0xd6, 0x25, 0xa9, 0x29, 0x1c, 0xc1, 0x2a, 0x7},
{0x70, 0x22, 0xf6, 0xf, 0x7e, 0xf6, 0xad, 0xfa, 0x17, 0x11, 0x7a, 0x52, 0x61, 0x9e, 0x30, 0xce, 0xa8, 0x2c, 0x68, 0x7, 0x5a, 0xdf, 0x1c, 0x66, 0x77, 0x86, 0xec, 0x50, 0x6e, 0xef, 0x2d, 0x19},
{0xd9, 0x98, 0x87, 0xb9, 0x73, 0x57, 0x3a, 0x96, 0xe1, 0x13, 0x93, 0x64, 0x52, 0x36, 0xc1, 0x7b, 0x1f, 0x4c, 0x70, 0x34, 0xd7, 0x23, 0xc7, 0xa9, 0x9f, 0x70, 0x9b, 0xb4, 0xda, 0x61, 0x16, 0x2b},
{0xd0, 0xb5, 0x30, 0xdb, 0xb0, 0xb4, 0xf2, 0x5c, 0x5d, 0x2f, 0x2a, 0x28, 0xdf, 0xee, 0x80, 0x8b, 0x53, 0x41, 0x2a, 0x2, 0x93, 0x1f, 0x18, 0xc4, 0x99, 0xf5, 0xa2, 0x54, 0x8, 0x6b, 0x13, 0x26},
{0x84, 0xc0, 0x42, 0x1b, 0xa0, 0x68, 0x5a, 0x1, 0xbf, 0x79, 0x5a, 0x23, 0x44, 0x6, 0x4f, 0xe4, 0x24, 0xbd, 0x52, 0xa9, 0xd2, 0x43, 0x77, 0xb3, 0x94, 0xff, 0x4c, 0x4b, 0x45, 0x68, 0xe8, 0x11},
{0x65, 0xf2, 0x9e, 0x5d, 0x98, 0xd2, 0x46, 0xc3, 0x8b, 0x38, 0x8c, 0xfc, 0x6, 0xdb, 0x1f, 0x6b, 0x2, 0x13, 0x3, 0xc5, 0xa2, 0x89, 0x0, 0xb, 0xdc, 0xe8, 0x32, 0xa9, 0xc3, 0xec, 0x42, 0x1c},
{0xa2, 0x24, 0x75, 0x8, 0x28, 0x58, 0x50, 0x96, 0x5b, 0x7e, 0x33, 0x4b, 0x31, 0x27, 0xb0, 0xc0, 0x42, 0xb1, 0xd0, 0x46, 0xdc, 0x54, 0x40, 0x21, 0x37, 0x62, 0x7c, 0xd8, 0x79, 0x9c, 0xe1, 0x3a},
{0xda, 0xfd, 0xab, 0x6d, 0xa9, 0x36, 0x44, 0x53, 0xc2, 0x6d, 0x33, 0x72, 0x6b, 0x9f, 0xef, 0xe3, 0x43, 0xbe, 0x8f, 0x81, 0x64, 0x9e, 0xc0, 0x9, 0xaa, 0xd3, 0xfa, 0xff, 0x50, 0x61, 0x75, 0x8},
{0xd9, 0x41, 0xd5, 0xe0, 0xd6, 0x31, 0x4a, 0x99, 0x5c, 0x33, 0xff, 0xbd, 0x4f, 0xbe, 0x69, 0x11, 0x8d, 0x73, 0xd4, 0xe5, 0xfd, 0x2c, 0xd3, 0x1f, 0xf, 0x7c, 0x86, 0xeb, 0xdd, 0x14, 0xe7, 0x6},
{0x51, 0x4c, 0x43, 0x5c, 0x3d, 0x4, 0xd3, 0x49, 0xa5, 0x36, 0x5f, 0xbd, 0x59, 0xff, 0xc7, 0x13, 0x62, 0x91, 0x11, 0x78, 0x59, 0x91, 0xc1, 0xa3, 0xc5, 0x3a, 0xf2, 0x20, 0x79, 0x74, 0x1a, 0x2f},
{0xad, 0x6, 0x85, 0x39, 0x69, 0xd3, 0x7d, 0x34, 0xff, 0x8, 0xe0, 0x9f, 0x56, 0x93, 0xa, 0x4a, 0xd1, 0x9a, 0x89, 0xde, 0xf6, 0xc, 0xbf, 0xee, 0x7e, 0x1d, 0x33, 0x81, 0xc1, 0xe7, 0x1c, 0x37},
{0x39, 0x56, 0xe, 0x7b, 0x13, 0xa9, 0x3b, 0x7, 0xa2, 0x43, 0xfd, 0x27, 0x20, 0xff, 0xa7, 0xcb, 0x3e, 0x1d, 0x2e, 0x50, 0x5a, 0xb3, 0x62, 0x9e, 0x79, 0xf4, 0x63, 0x13, 0x51, 0x2c, 0xda, 0x6},
{0xcc, 0xc3, 0xc0, 0x12, 0xf5, 0xb0, 0x5e, 0x81, 0x1a, 0x2b, 0xbf, 0xdd, 0xf, 0x68, 0x33, 0xb8, 0x42, 0x75, 0xb4, 0x7b, 0xf2, 0x29, 0xc0, 0x5, 0x2a, 0x82, 0x48, 0x4f, 0x3c, 0x1a, 0x5b, 0x3d},
{0x7d, 0xf2, 0x9b, 0x69, 0x77, 0x31, 0x99, 0xe8, 0xf2, 0xb4, 0xb, 0x77, 0x91, 0x9d, 0x4, 0x85, 0x9, 0xee, 0xd7, 0x68, 0xe2, 0xc7, 0x29, 0x7b, 0x1f, 0x14, 0x37, 0x3, 0x4f, 0xc3, 0xc6, 0x2c},
{0x66, 0xce, 0x5, 0xa3, 0x66, 0x75, 0x52, 0xcf, 0x45, 0xc0, 0x2b, 0xcc, 0x4e, 0x83, 0x92, 0x91, 0x9b, 0xde, 0xac, 0x35, 0xde, 0x2f, 0xf5, 0x62, 0x71, 0x84, 0x8e, 0x9f, 0x7b, 0x67, 0x51, 0x7},
{0xd8, 0x61, 0x2, 0x18, 0x42, 0x5a, 0xb5, 0xe9, 0x5b, 0x1c, 0xa6, 0x23, 0x9d, 0x29, 0xa2, 0xe4, 0x20, 0xd7, 0x6, 0xa9, 0x6f, 0x37, 0x3e, 0x2f, 0x9c, 0x9a, 0x91, 0xd7, 0x59, 0xd1, 0x9b, 0x1},
{0x6d, 0x36, 0x4b, 0x1e, 0xf8, 0x46, 0x44, 0x1a, 0x5a, 0x4a, 0x68, 0x86, 0x23, 0x14, 0xac, 0xc0, 0xa4, 0x6f, 0x1, 0x67, 0x17, 0xe5, 0x34, 0x43, 0xe8, 0x39, 0xee, 0xdf, 0x83, 0xc2, 0x85, 0x3c},
{0x7, 0x7e, 0x5f, 0xde, 0x35, 0xc5, 0xa, 0x93, 0x3, 0xa5, 0x50, 0x9, 0xe3, 0x49, 0x8a, 0x4e, 0xbe, 0xdf, 0xf3, 0x9c, 0x42, 0xb7, 0x10, 0xb7, 0x30, 0xd8, 0xec, 0x7a, 0xc7, 0xaf, 0xa6, 0x3e},
{0xe6, 0x40, 0x5, 0xa6, 0xbf, 0xe3, 0x77, 0x79, 0x53, 0xb8, 0xad, 0x6e, 0xf9, 0x3f, 0xf, 0xca, 0x10, 0x49, 0xb2, 0x4, 0x16, 0x54, 0xf2, 0xa4, 0x11, 0xf7, 0x70, 0x27, 0x99, 0xce, 0xce, 0x2},
{0x25, 0x9d, 0x3d, 0x6b, 0x1f, 0x4d, 0x87, 0x6d, 0x11, 0x85, 0xe1, 0x12, 0x3a, 0xf6, 0xf5, 0x50, 0x1a, 0xf0, 0xf6, 0x7c, 0xf1, 0x5b, 0x52, 0x16, 0x25, 0x5b, 0x7b, 0x17, 0x8d, 0x12, 0x5, 0x1d},
{0x3f, 0x9a, 0x4d, 0x41, 0x1d, 0xa4, 0xef, 0x1b, 0x36, 0xf3, 0x5f, 0xf0, 0xa1, 0x95, 0xae, 0x39, 0x2a, 0xb2, 0x3f, 0xee, 0x79, 0x67, 0xb7, 0xc4, 0x1b, 0x3, 0xd1, 0x61, 0x3f, 0xc2, 0x92, 0x39},
{0xfe, 0x4e, 0xf3, 0x28, 0xc6, 0x1a, 0xa3, 0x9c, 0xfd, 0xb2, 0x48, 0x4e, 0xaa, 0x32, 0xa1, 0x51, 0xb1, 0xfe, 0x3d, 0xfd, 0x1f, 0x96, 0xdd, 0x8c, 0x97, 0x11, 0xfd, 0x86, 0xd6, 0xc5, 0x81, 0x13},
{0xf5, 0x5d, 0x68, 0x90, 0xe, 0x2d, 0x83, 0x81, 0xec, 0xcb, 0x81, 0x64, 0xcb, 0x99, 0x76, 0xf2, 0x4b, 0x2d, 0xe0, 0xdd, 0x61, 0xa3, 0x1b, 0x97, 0xce, 0x6e, 0xb2, 0x38, 0x50, 0xd5, 0xe8, 0x19},
{0xaa, 0xaa, 0x8c, 0x4c, 0xb4, 0xa, 0xac, 0xee, 0x1e, 0x2, 0xdc, 0x65, 0x42, 0x4b, 0x2a, 0x6c, 0x8e, 0x99, 0xf8, 0x3, 0xb7, 0x2f, 0x79, 0x29, 0xc4, 0x10, 0x1d, 0x7f, 0xae, 0x6b, 0xff, 0x32},
}
func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid {
level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32
commP, _ := commcid.PieceCommitmentV1ToCID(PieceComms[level][:])
return commP
}

View File

@ -1,115 +0,0 @@
package zerocomm_test
import (
"bytes"
"fmt"
"io"
"testing"
commcid "github.com/filecoin-project/go-fil-commcid"
abi "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
)
func TestComms(t *testing.T) {
t.Skip("don't have enough ram") // no, but seriously, currently this needs like 3tb of /tmp
var expPieceComms [zerocomm.Levels - zerocomm.Skip]cid.Cid
{
l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127)
if err != nil {
t.Fatal(err)
}
expPieceComms[0] = l2
}
for i := 1; i < zerocomm.Levels-2; i++ {
var err error
sz := abi.UnpaddedPieceSize(127 << uint(i))
fmt.Println(i, sz)
r := io.LimitReader(&NullReader{}, int64(sz))
expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz)
if err != nil {
t.Fatal(err)
}
}
for i, comm := range expPieceComms {
c, err := commcid.CIDToPieceCommitmentV1(comm)
if err != nil {
t.Fatal(err)
}
if string(c) != string(zerocomm.PieceComms[i][:]) {
t.Errorf("zero commitment %d didn't match", i)
}
}
for _, comm := range expPieceComms { // Could do codegen, but this is good enough
fmt.Printf("%#v,\n", comm)
}
}
func TestCommsSmall(t *testing.T) {
var expPieceComms [8]cid.Cid
lvls := len(expPieceComms) + zerocomm.Skip
{
l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127)
if err != nil {
t.Fatal(err)
}
expPieceComms[0] = l2
}
for i := 1; i < lvls-2; i++ {
var err error
sz := abi.UnpaddedPieceSize(127 << uint(i))
fmt.Println(i, sz)
r := io.LimitReader(&NullReader{}, int64(sz))
expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz)
if err != nil {
t.Fatal(err)
}
}
for i, comm := range expPieceComms {
c, err := commcid.CIDToPieceCommitmentV1(comm)
if err != nil {
t.Fatal(err)
}
if string(c) != string(zerocomm.PieceComms[i][:]) {
t.Errorf("zero commitment %d didn't match", i)
}
}
for _, comm := range expPieceComms { // Could do codegen, but this is good enough
fmt.Printf("%#v,\n", comm)
}
}
func TestForSise(t *testing.T) {
exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 1016)), 1016)
if err != nil {
return
}
actual := zerocomm.ZeroPieceCommitment(1016)
if !exp.Equals(actual) {
t.Errorf("zero commitment didn't match")
}
}
type NullReader struct{}
func (NullReader) Read(out []byte) (int, error) {
for i := range out {
out[i] = 0
}
return len(out), nil
}

View File

@ -12,9 +12,9 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
) )
// TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting // TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting

View File

@ -159,8 +159,12 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorFaultReported{}, FaultReported), on(SectorFaultReported{}, FaultReported),
), ),
FaultReported: final, // not really supported right now
FaultedFinal: final, FaultedFinal: final,
Removed: final, Removed: final,
FailedUnrecoverable: final,
} }
func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(statemachine.Context, SectorInfo) error, uint64, error) { func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(statemachine.Context, SectorInfo) error, uint64, error) {

View File

@ -160,3 +160,18 @@ func TestPlanCommittingHandlesSectorCommitFailed(t *testing.T) {
require.Equal(t, CommitFailed, m.state.State) require.Equal(t, CommitFailed, m.state.State)
} }
func TestPlannerList(t *testing.T) {
for state := range ExistSectorStateList {
_, ok := fsmPlanners[state]
require.True(t, ok, "state %s", state)
}
for state := range fsmPlanners {
if state == UndefinedSectorState {
continue
}
_, ok := ExistSectorStateList[state]
require.True(t, ok, "state %s", state)
}
}

View File

@ -283,6 +283,7 @@ func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error {
// Caller should hold m.unsealedInfoMap.lk // Caller should hold m.unsealedInfoMap.lk
func (m *Sealing) getSectorAndPadding(ctx context.Context, size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) { func (m *Sealing) getSectorAndPadding(ctx context.Context, size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) {
for tries := 0; tries < 100; tries++ {
for k, v := range m.unsealedInfoMap.infos { for k, v := range m.unsealedInfoMap.infos {
pads, padLength := ffiwrapper.GetRequiredPadding(v.stored, size.Padded()) pads, padLength := ffiwrapper.GetRequiredPadding(v.stored, size.Padded())
@ -291,21 +292,43 @@ func (m *Sealing) getSectorAndPadding(ctx context.Context, size abi.UnpaddedPiec
} }
} }
ns, ssize, err := m.newDealSector(ctx) if len(m.unsealedInfoMap.infos) > 0 {
if err != nil { log.Infow("tried to put a piece into an open sector, found none with enough space", "open", len(m.unsealedInfoMap.infos), "size", size, "tries", tries)
return 0, nil, err
} }
ns, ssize, err := m.newDealSector(ctx)
switch err {
case nil:
m.unsealedInfoMap.infos[ns] = UnsealedSectorInfo{ m.unsealedInfoMap.infos[ns] = UnsealedSectorInfo{
numDeals: 0, numDeals: 0,
stored: 0, stored: 0,
pieceSizes: nil, pieceSizes: nil,
ssize: ssize, ssize: ssize,
} }
case errTooManySealing:
m.unsealedInfoMap.lk.Unlock()
select {
case <-time.After(2 * time.Second):
case <-ctx.Done():
m.unsealedInfoMap.lk.Lock()
return 0, nil, xerrors.Errorf("getting sector for piece: %w", ctx.Err())
}
m.unsealedInfoMap.lk.Lock()
continue
default:
return 0, nil, xerrors.Errorf("creating new sector: %w", err)
}
return ns, nil, nil return ns, nil, nil
} }
return 0, nil, xerrors.Errorf("failed to allocate piece to a sector")
}
var errTooManySealing = errors.New("too many sectors sealing")
// newDealSector creates a new sector for deal storage // newDealSector creates a new sector for deal storage
func (m *Sealing) newDealSector(ctx context.Context) (abi.SectorNumber, abi.SectorSize, error) { func (m *Sealing) newDealSector(ctx context.Context) (abi.SectorNumber, abi.SectorSize, error) {
// First make sure we don't have too many 'open' sectors // First make sure we don't have too many 'open' sectors
@ -321,26 +344,13 @@ func (m *Sealing) newDealSector(ctx context.Context) (abi.SectorNumber, abi.Sect
} }
} }
if cfg.MaxWaitDealsSectors > 0 { if cfg.MaxWaitDealsSectors > 0 && uint64(len(m.unsealedInfoMap.infos)) >= cfg.MaxWaitDealsSectors {
// run in a loop because we have to drop the map lock here for a bit // Too many sectors are sealing in parallel. Start sealing one, and retry
tries := 0 // allocating the piece to a sector (we're dropping the lock here, so in
// case other goroutines are also trying to create a sector, we retry in
// we have to run in a loop as we're dropping unsealedInfoMap.lk // getSectorAndPadding instead of here - otherwise if we have lots of
// to actually call StartPacking. When we do that, another entry can // parallel deals in progress, we can start creating a ton of sectors
// get added to unsealedInfoMap. // with just a single deal in them)
for uint64(len(m.unsealedInfoMap.infos)) >= cfg.MaxWaitDealsSectors {
if tries > 10 {
// whatever...
break
}
if tries > 0 {
m.unsealedInfoMap.lk.Unlock()
time.Sleep(time.Second)
m.unsealedInfoMap.lk.Lock()
}
tries++
var mostStored abi.PaddedPieceSize = math.MaxUint64 var mostStored abi.PaddedPieceSize = math.MaxUint64
var best abi.SectorNumber = math.MaxUint64 var best abi.SectorNumber = math.MaxUint64
@ -350,18 +360,18 @@ func (m *Sealing) newDealSector(ctx context.Context) (abi.SectorNumber, abi.Sect
} }
} }
if best == math.MaxUint64 { if best != math.MaxUint64 {
// probably not possible, but who knows m.unsealedInfoMap.lk.Unlock()
break err := m.StartPacking(best)
m.unsealedInfoMap.lk.Lock()
if err != nil {
log.Errorf("newDealSector StartPacking error: %+v", err)
// let's pretend this is fine
}
} }
m.unsealedInfoMap.lk.Unlock() return 0, 0, errTooManySealing // will wait a bit and retry
if err := m.StartPacking(best); err != nil {
log.Error("newDealSector StartPacking error: %+v", err)
continue // let's pretend this is fine
}
m.unsealedInfoMap.lk.Lock()
}
} }
spt, err := m.currentSealProof(ctx) spt, err := m.currentSealProof(ctx)

View File

@ -6,6 +6,7 @@ var ExistSectorStateList = map[SectorState]struct{}{
Empty: {}, Empty: {},
WaitDeals: {}, WaitDeals: {},
Packing: {}, Packing: {},
GetTicket: {},
PreCommit1: {}, PreCommit1: {},
PreCommit2: {}, PreCommit2: {},
PreCommitting: {}, PreCommitting: {},
@ -75,7 +76,7 @@ const (
func toStatState(st SectorState) statSectorState { func toStatState(st SectorState) statSectorState {
switch st { switch st {
case Empty, WaitDeals, Packing, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, CommitWait, FinalizeSector: case Empty, WaitDeals, Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector:
return sstSealing return sstSealing
case Proving, Removed, Removing: case Proving, Removed, Removing:
return sstProving return sstProving

View File

@ -13,7 +13,7 @@ import (
"github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/go-statemachine"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" "github.com/filecoin-project/go-commp-utils/zerocomm"
) )
const minRetryTime = 1 * time.Minute const minRetryTime = 1 * time.Minute
@ -137,12 +137,12 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI
if pci, is := m.checkPreCommitted(ctx, sector); is && pci != nil { if pci, is := m.checkPreCommitted(ctx, sector); is && pci != nil {
if sector.PreCommitMessage == nil { if sector.PreCommitMessage == nil {
log.Warn("sector %d is precommitted on chain, but we don't have precommit message", sector.SectorNumber) log.Warnf("sector %d is precommitted on chain, but we don't have precommit message", sector.SectorNumber)
return ctx.Send(SectorPreCommitLanded{TipSet: tok}) return ctx.Send(SectorPreCommitLanded{TipSet: tok})
} }
if pci.Info.SealedCID != *sector.CommR { if pci.Info.SealedCID != *sector.CommR {
log.Warn("sector %d is precommitted on chain, with different CommR: %x != %x", sector.SectorNumber, pci.Info.SealedCID, sector.CommR) log.Warnf("sector %d is precommitted on chain, with different CommR: %x != %x", sector.SectorNumber, pci.Info.SealedCID, sector.CommR)
return nil // TODO: remove when the actor allows re-precommit return nil // TODO: remove when the actor allows re-precommit
} }
@ -387,7 +387,7 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn
if p.DealInfo.PublishCid == nil { if p.DealInfo.PublishCid == nil {
// TODO: check if we are in an early enough state try to remove this piece // TODO: check if we are in an early enough state try to remove this piece
log.Error("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID) log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID)
// Not much to do here (and this can only happen for old spacerace sectors) // Not much to do here (and this can only happen for old spacerace sectors)
return ctx.Send(SectorRemove{}) return ctx.Send(SectorRemove{})
} }

View File

@ -59,6 +59,10 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
return ctx.Send(SectorPacked{FillerPieces: fillerPieces}) return ctx.Send(SectorPacked{FillerPieces: fillerPieces})
} }
func checkTicketExpired(sector SectorInfo, epoch abi.ChainEpoch) bool {
return epoch-sector.TicketEpoch > MaxTicketAge // TODO: allow configuring expected seal durations
}
func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.SealRandomness, abi.ChainEpoch, error) { func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.SealRandomness, abi.ChainEpoch, error) {
tok, epoch, err := m.api.ChainHead(ctx.Context()) tok, epoch, err := m.api.ChainHead(ctx.Context())
if err != nil { if err != nil {
@ -79,6 +83,10 @@ func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.Se
if pci != nil { if pci != nil {
ticketEpoch = pci.Info.SealRandEpoch ticketEpoch = pci.Info.SealRandEpoch
if checkTicketExpired(sector, ticketEpoch) {
return nil, 0, xerrors.Errorf("ticket expired for precommitted sector")
}
} }
rand, err := m.api.ChainGetRandomnessFromTickets(ctx.Context(), tok, crypto.DomainSeparationTag_SealRandomness, ticketEpoch, buf.Bytes()) rand, err := m.api.ChainGetRandomnessFromTickets(ctx.Context(), tok, crypto.DomainSeparationTag_SealRandomness, ticketEpoch, buf.Bytes())
@ -93,8 +101,8 @@ func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) e
ticketValue, ticketEpoch, err := m.getTicket(ctx, sector) ticketValue, ticketEpoch, err := m.getTicket(ctx, sector)
if err != nil { if err != nil {
allocated, aerr := m.api.StateMinerSectorAllocated(ctx.Context(), m.maddr, sector.SectorNumber, nil) allocated, aerr := m.api.StateMinerSectorAllocated(ctx.Context(), m.maddr, sector.SectorNumber, nil)
if aerr == nil { if aerr != nil {
log.Errorf("error checking if sector is allocated: %+v", err) log.Errorf("error checking if sector is allocated: %+v", aerr)
} }
if allocated { if allocated {
@ -132,27 +140,16 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
} }
} }
tok, height, err := m.api.ChainHead(ctx.Context()) _, height, err := m.api.ChainHead(ctx.Context())
if err != nil { if err != nil {
log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err) log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err)
return nil return nil
} }
if height-sector.TicketEpoch > MaxTicketAge { if checkTicketExpired(sector, height) {
pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok)
if err != nil {
log.Errorf("getting precommit info: %+v", err)
}
if pci == nil {
return ctx.Send(SectorOldTicket{}) // go get new ticket return ctx.Send(SectorOldTicket{}) // go get new ticket
} }
// TODO: allow configuring expected seal durations, if we're here, it's
// pretty unlikely that we'll precommit on time (unless the miner
// process has just restarted and the worker had the result ready)
}
pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos()) pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
if err != nil { if err != nil {
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)}) return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
@ -297,8 +294,10 @@ func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInf
switch mw.Receipt.ExitCode { switch mw.Receipt.ExitCode {
case exitcode.Ok: case exitcode.Ok:
// this is what we expect // this is what we expect
case exitcode.SysErrInsufficientFunds:
fallthrough
case exitcode.SysErrOutOfGas: case exitcode.SysErrOutOfGas:
// gas estimator guessed a wrong number // gas estimator guessed a wrong number / out of funds:
return ctx.Send(SectorRetryPreCommit{}) return ctx.Send(SectorRetryPreCommit{})
default: default:
log.Error("sector precommit failed: ", mw.Receipt.ExitCode) log.Error("sector precommit failed: ", mw.Receipt.ExitCode)
@ -476,8 +475,10 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo)
switch mw.Receipt.ExitCode { switch mw.Receipt.ExitCode {
case exitcode.Ok: case exitcode.Ok:
// this is what we expect // this is what we expect
case exitcode.SysErrInsufficientFunds:
fallthrough
case exitcode.SysErrOutOfGas: case exitcode.SysErrOutOfGas:
// gas estimator guessed a wrong number // gas estimator guessed a wrong number / out of funds
return ctx.Send(SectorRetrySubmitCommit{}) return ctx.Send(SectorRetrySubmitCommit{})
default: default:
return ctx.Send(SectorCommitFailed{xerrors.Errorf("submitting sector proof failed (exit=%d, msg=%s) (t:%x; s:%x(%d); p:%x)", mw.Receipt.ExitCode, sector.CommitMessage, sector.TicketValue, sector.SeedValue, sector.SeedEpoch, sector.Proof)}) return ctx.Send(SectorCommitFailed{xerrors.Errorf("submitting sector proof failed (exit=%d, msg=%s) (t:%x; s:%x(%d); p:%x)", mw.Receipt.ExitCode, sector.CommitMessage, sector.TicketValue, sector.SeedValue, sector.SeedEpoch, sector.Proof)})

20
go.mod
View File

@ -1,6 +1,6 @@
module github.com/filecoin-project/lotus module github.com/filecoin-project/lotus
go 1.14 go 1.15
require ( require (
contrib.go.opencensus.io/exporter/jaeger v0.1.0 contrib.go.opencensus.io/exporter/jaeger v0.1.0
@ -8,6 +8,7 @@ require (
github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml v0.3.1
github.com/GeertJohan/go.rice v1.0.0 github.com/GeertJohan/go.rice v1.0.0
github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee
github.com/Jeffail/gabs v1.4.0
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129
@ -27,11 +28,12 @@ require (
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect
github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816 github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
github.com/filecoin-project/go-data-transfer v1.2.0 github.com/filecoin-project/go-data-transfer v1.2.2
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335 github.com/filecoin-project/go-fil-markets v1.0.9
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49 github.com/filecoin-project/go-jsonrpc v0.1.2
github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-multistore v0.0.3
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261
@ -67,7 +69,7 @@ require (
github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459
github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-filestore v1.0.0
github.com/ipfs/go-fs-lock v0.0.6 github.com/ipfs/go-fs-lock v0.0.6
github.com/ipfs/go-graphsync v0.5.0 github.com/ipfs/go-graphsync v0.5.1
github.com/ipfs/go-ipfs-blockstore v1.0.3 github.com/ipfs/go-ipfs-blockstore v1.0.3
github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-chunker v0.0.5
github.com/ipfs/go-ipfs-ds-help v1.0.0 github.com/ipfs/go-ipfs-ds-help v1.0.0
@ -87,7 +89,7 @@ require (
github.com/ipfs/go-path v0.0.7 github.com/ipfs/go-path v0.0.7
github.com/ipfs/go-unixfs v0.2.4 github.com/ipfs/go-unixfs v0.2.4
github.com/ipfs/interface-go-ipfs-core v0.2.3 github.com/ipfs/interface-go-ipfs-core v0.2.3
github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d
github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018
github.com/kelseyhightower/envconfig v1.4.0 github.com/kelseyhightower/envconfig v1.4.0
github.com/lib/pq v1.7.0 github.com/lib/pq v1.7.0
@ -131,10 +133,10 @@ require (
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
go.opencensus.io v0.22.4 go.opencensus.io v0.22.5
go.uber.org/dig v1.10.0 // indirect go.uber.org/dig v1.10.0 // indirect
go.uber.org/fx v1.9.0 go.uber.org/fx v1.9.0
go.uber.org/multierr v1.5.0 go.uber.org/multierr v1.6.0
go.uber.org/zap v1.16.0 go.uber.org/zap v1.16.0
golang.org/x/net v0.0.0-20201021035429-f5854403a974 golang.org/x/net v0.0.0-20201021035429-f5854403a974
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9

66
go.sum
View File

@ -40,6 +40,8 @@ github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voi
github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0=
github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ=
github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY=
github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo=
github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
@ -245,26 +247,29 @@ github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816 h1:
github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8=
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg=
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434 h1:0kHszkYP3hgApcjl5x4rpwONhN9+j7XDobf6at5XfHs=
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
github.com/filecoin-project/go-data-transfer v1.0.1 h1:5sYKDbstyDsdJpVP4UGUW6+BgCNfgnH8hQgf0E3ZAno= github.com/filecoin-project/go-data-transfer v1.0.1 h1:5sYKDbstyDsdJpVP4UGUW6+BgCNfgnH8hQgf0E3ZAno=
github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo=
github.com/filecoin-project/go-data-transfer v1.2.0 h1:LM+K+J+y9t8e3gYskJHWDlyHJsF6aaxoHOP+HIiVE1U= github.com/filecoin-project/go-data-transfer v1.2.2 h1:zBeUNqSXgYbHqyl3mnwQU5GdOM1h0ecbqc6yvqmHsCQ=
github.com/filecoin-project/go-data-transfer v1.2.0/go.mod h1:ZAH51JZFR8NZC4FPiDPG+swjgui0q6zTMJbztc6pHhY= github.com/filecoin-project/go-data-transfer v1.2.2/go.mod h1:ZAH51JZFR8NZC4FPiDPG+swjgui0q6zTMJbztc6pHhY=
github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ=
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335 h1:DF8eu0WdEBnSVdu71+jfT4YMk6fO7AIJk2ZiWd3l15c= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg=
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
github.com/filecoin-project/go-fil-markets v1.0.9 h1:bGWo6xoXV9zMPYgbplQDtUREogDuKPiSY1CYwxV5cOY=
github.com/filecoin-project/go-fil-markets v1.0.9/go.mod h1:uOikzYK7aNbSWMczCp6Ru257ML4PplLRBfDk/NAOgaY=
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI=
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49 h1:FSY245KeXFCUgyfFEu+bhrZNk8BGGJyfpSmQl2aiPU8= github.com/filecoin-project/go-jsonrpc v0.1.2 h1:MTebUawBHLxxY9gDi1WXuGc89TWIDmsgoDqeZSk9KRw=
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-jsonrpc v0.1.2/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4=
github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI=
github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ=
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg=
@ -273,7 +278,6 @@ github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab h1:cEDC5Ei8UuT99hPWhCjA72SM9AuRtnpvdSTIYbnzN8I=
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc h1:+hbMY4Pcx2oizrfH08VWXwrj5mU8aJT6g0UNxGHFCGU= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc h1:+hbMY4Pcx2oizrfH08VWXwrj5mU8aJT6g0UNxGHFCGU=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
@ -284,7 +288,6 @@ github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZO
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg=
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8=
github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4=
github.com/filecoin-project/specs-actors v0.9.12 h1:iIvk58tuMtmloFNHhAOQHG+4Gci6Lui0n7DYQGi3cJk=
github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
github.com/filecoin-project/specs-actors v0.9.13 h1:rUEOQouefi9fuVY/2HOroROJlZbOzWYXXeIh41KF2M4= github.com/filecoin-project/specs-actors v0.9.13 h1:rUEOQouefi9fuVY/2HOroROJlZbOzWYXXeIh41KF2M4=
github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
@ -376,7 +379,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws=
github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@ -385,7 +387,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@ -401,7 +402,6 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -501,7 +501,6 @@ github.com/ipfs/go-blockservice v0.0.3/go.mod h1:/NNihwTi6V2Yr6g8wBI+BSwPuURpBRM
github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So=
github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 h1:hFJoI1D2a3MqiNkSb4nKwrdkhCngUxUTFNwVwovZX2s=
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
github.com/ipfs/go-blockservice v0.1.4 h1:Vq+MlsH8000KbbUciRyYMEw/NNP8UAGmcqKi4uWmFGA= github.com/ipfs/go-blockservice v0.1.4 h1:Vq+MlsH8000KbbUciRyYMEw/NNP8UAGmcqKi4uWmFGA=
github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
@ -526,7 +525,6 @@ github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRV
github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8=
github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg=
github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs=
@ -555,19 +553,18 @@ github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPi
github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0=
github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM=
github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
github.com/ipfs/go-graphsync v0.4.2 h1:Y/jt5r619yj0LI7OLtGKh4jYm8goYUcuJ09y7TZ3zMo=
github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0=
github.com/ipfs/go-graphsync v0.4.3 h1:2t+oCpufufs1oqChoWiIK7V5uC1XCtf06PK9nqMV6pM=
github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY=
github.com/ipfs/go-graphsync v0.5.0 h1:iaByvxq88Ys1KcaQzTS1wmRhNsNEo3SaUiSGqTSbGmM= github.com/ipfs/go-graphsync v0.5.0 h1:iaByvxq88Ys1KcaQzTS1wmRhNsNEo3SaUiSGqTSbGmM=
github.com/ipfs/go-graphsync v0.5.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= github.com/ipfs/go-graphsync v0.5.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk=
github.com/ipfs/go-graphsync v0.5.1 h1:4fXBRvRKicTgTmCFMmEua/H5jvmAOLgU9Z7PCPWt2ec=
github.com/ipfs/go-graphsync v0.5.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk=
github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk= github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk=
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ=
github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU=
github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w=
github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE=
github.com/ipfs/go-ipfs-blockstore v1.0.3 h1:RDhK6fdg5YsonkpMuMpdvk/pRtOQlrIRIybuQfkvB2M= github.com/ipfs/go-ipfs-blockstore v1.0.3 h1:RDhK6fdg5YsonkpMuMpdvk/pRtOQlrIRIybuQfkvB2M=
github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE=
@ -669,15 +666,14 @@ github.com/ipfs/iptb v1.4.0 h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo=
github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg=
github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuuGA= github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuuGA=
github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs= github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs=
github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 h1:6phjU3kXvCEWOZpu+Ob0w6DzgPFZmDLgLPxJhD8RxEY=
github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw=
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d h1:iphSzTuPqyDgH7WUVZsdqUnQNzYgIblsVr1zhVNA33U=
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ=
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8=
github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA=
github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM=
github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 h1:RbRHv8epkmvBYA5cGfz68GUSbOgx5j/7ObLIl4Rsif0= github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 h1:RbRHv8epkmvBYA5cGfz68GUSbOgx5j/7ObLIl4Rsif0=
github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM=
github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs=
github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es=
github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0=
github.com/ipld/go-ipld-prime-proto v0.1.0 h1:j7gjqrfwbT4+gXpHwEx5iMssma3mnctC7YaCimsFP70= github.com/ipld/go-ipld-prime-proto v0.1.0 h1:j7gjqrfwbT4+gXpHwEx5iMssma3mnctC7YaCimsFP70=
github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE=
@ -732,7 +728,6 @@ github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3/go.mod h1:BYpt4
github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0=
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f h1:qET3Wx0v8tMtoTOQnsJXVvqvCopSf48qobR6tcJuDHo=
github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s=
github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s=
github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW06AlUGT5jnpj6nqQSILebcsikSjA= github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW06AlUGT5jnpj6nqQSILebcsikSjA=
@ -852,7 +847,6 @@ github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqe
github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskWE7pvqs=
github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.7.0 h1:4a0TMjrWNTZlNvcqxZmrMRDi/NQWrhwO2pkTuLSQ/IQ= github.com/libp2p/go-libp2p-core v0.7.0 h1:4a0TMjrWNTZlNvcqxZmrMRDi/NQWrhwO2pkTuLSQ/IQ=
github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
@ -902,7 +896,6 @@ github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8
github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q=
github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ=
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
github.com/libp2p/go-libp2p-noise v0.1.1 h1:vqYQWvnIcHpIoWJKC7Al4D6Hgj0H012TuXRhPwSMGpQ=
github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM=
github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk= github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk=
github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE=
@ -958,7 +951,6 @@ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaT
github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM=
github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y=
github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA=
github.com/libp2p/go-libp2p-swarm v0.2.8 h1:cIUUvytBzNQmGSjnXFlI6UpoBGsaud82mJPIJVfkDlg=
github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM=
github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
github.com/libp2p/go-libp2p-swarm v0.3.1 h1:UTobu+oQHGdXTOGpZ4RefuVqYoJXcT0EBtSR74m2LkI= github.com/libp2p/go-libp2p-swarm v0.3.1 h1:UTobu+oQHGdXTOGpZ4RefuVqYoJXcT0EBtSR74m2LkI=
@ -969,7 +961,6 @@ github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MB
github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8 h1:v4dvk7YEW8buwCdIVWnhpv0Hp/AAJKRWIxBhmLRZrsk=
github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc=
github.com/libp2p/go-libp2p-testing v0.3.0 h1:ZiBYstPamsi7y6NJZebRudUzsYmVkt998hltyLqf8+g= github.com/libp2p/go-libp2p-testing v0.3.0 h1:ZiBYstPamsi7y6NJZebRudUzsYmVkt998hltyLqf8+g=
github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g=
@ -991,9 +982,7 @@ github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZ
github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw=
github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA=
github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU=
github.com/libp2p/go-libp2p-yamux v0.2.8 h1:0s3ELSLu2O7hWKfX1YjzudBKCP0kZ+m9e2+0veXzkn4=
github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4=
github.com/libp2p/go-libp2p-yamux v0.4.0 h1:qunEZzWwwmfSBYTtSyd81PlD1TjB5uuWcGYHWVXLbUg=
github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30=
github.com/libp2p/go-libp2p-yamux v0.4.1 h1:TJxRVPY9SjH7TNrNC80l1OJMBiWhs1qpKmeB+1Ug3xU= github.com/libp2p/go-libp2p-yamux v0.4.1 h1:TJxRVPY9SjH7TNrNC80l1OJMBiWhs1qpKmeB+1Ug3xU=
github.com/libp2p/go-libp2p-yamux v0.4.1/go.mod h1:FA/NjRYRVNjqOzpGuGqcruH7jAU2mYIjtKBicVOL3dc= github.com/libp2p/go-libp2p-yamux v0.4.1/go.mod h1:FA/NjRYRVNjqOzpGuGqcruH7jAU2mYIjtKBicVOL3dc=
@ -1007,7 +996,6 @@ github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTW
github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0=
github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU=
github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk=
github.com/libp2p/go-mplex v0.1.2 h1:qOg1s+WdGLlpkrczDqmhYzyk3vCfsQ8+RxRTQjOZWwI=
github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk=
github.com/libp2p/go-mplex v0.2.0 h1:Ov/D+8oBlbRkjBs1R1Iua8hJ8cUfbdiW8EOdZuxcgaI= github.com/libp2p/go-mplex v0.2.0 h1:Ov/D+8oBlbRkjBs1R1Iua8hJ8cUfbdiW8EOdZuxcgaI=
github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ=
@ -1071,9 +1059,7 @@ github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ
github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.3.7 h1:v40A1eSPJDIZwz2AvrV3cxpTZEGDP11QJbukmEhYyQI=
github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux v1.4.0 h1:7nqe0T95T2CWh40IdJ/tp8RMor4ubc9/wYZpB2a/Hx0=
github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI=
github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
@ -1100,14 +1086,12 @@ github.com/marten-seemann/qtls-go1-15 v0.1.0/go.mod h1:GyFwywLKkRt+6mfU99csTEY1j
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
@ -1534,12 +1518,16 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/dig v1.10.0 h1:yLmDDj9/zuDjv3gz8GQGviXMs9TfysIUMUilCpgzUJY= go.uber.org/dig v1.10.0 h1:yLmDDj9/zuDjv3gz8GQGviXMs9TfysIUMUilCpgzUJY=
go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw=
go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY= go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY=
@ -1551,12 +1539,13 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
@ -1592,7 +1581,6 @@ golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
@ -1670,7 +1658,6 @@ golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@ -1689,7 +1676,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1754,17 +1740,14 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c h1:38q6VNPWR010vN82/SB121GujZNIfAUb4YttE2rhGuc=
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -1811,7 +1794,6 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc=
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696 h1:Bfazo+enXJET5SbHeh95NtxabJF6fJ9r/jpfRJgd3j4= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696 h1:Bfazo+enXJET5SbHeh95NtxabJF6fJ9r/jpfRJgd3j4=
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
@ -1881,7 +1863,6 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
@ -1893,11 +1874,9 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1944,7 +1923,6 @@ launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80
modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM= modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM=
modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254= modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254=

View File

@ -1,113 +0,0 @@
package commp
import (
"bytes"
"math/bits"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-padreader"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
)
const commPBufPad = abi.PaddedPieceSize(8 << 20)
const CommPBuf = abi.UnpaddedPieceSize(commPBufPad - (commPBufPad / 128)) // can't use .Unpadded() for const
type Writer struct {
len int64
buf [CommPBuf]byte
leaves []cid.Cid
}
func (w *Writer) Write(p []byte) (int, error) {
n := len(p)
for len(p) > 0 {
buffered := int(w.len % int64(len(w.buf)))
toBuffer := len(w.buf) - buffered
if toBuffer > len(p) {
toBuffer = len(p)
}
copied := copy(w.buf[buffered:], p[:toBuffer])
p = p[copied:]
w.len += int64(copied)
if copied > 0 && w.len%int64(len(w.buf)) == 0 {
leaf, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, bytes.NewReader(w.buf[:]), CommPBuf)
if err != nil {
return 0, err
}
w.leaves = append(w.leaves, leaf)
}
}
return n, nil
}
func (w *Writer) Sum() (api.DataCIDSize, error) {
// process last non-zero leaf if exists
lastLen := w.len % int64(len(w.buf))
rawLen := w.len
// process remaining bit of data
if lastLen != 0 {
if len(w.leaves) != 0 {
copy(w.buf[lastLen:], make([]byte, int(int64(CommPBuf)-lastLen)))
lastLen = int64(CommPBuf)
}
r, sz := padreader.New(bytes.NewReader(w.buf[:lastLen]), uint64(lastLen))
p, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, r, sz)
if err != nil {
return api.DataCIDSize{}, err
}
if sz < CommPBuf { // special case for pieces smaller than 16MiB
return api.DataCIDSize{
PayloadSize: w.len,
PieceSize: sz.Padded(),
PieceCID: p,
}, nil
}
w.leaves = append(w.leaves, p)
}
// pad with zero pieces to power-of-two size
fillerLeaves := (1 << (bits.Len(uint(len(w.leaves) - 1)))) - len(w.leaves)
for i := 0; i < fillerLeaves; i++ {
w.leaves = append(w.leaves, zerocomm.ZeroPieceCommitment(CommPBuf))
}
if len(w.leaves) == 1 {
return api.DataCIDSize{
PayloadSize: rawLen,
PieceSize: abi.PaddedPieceSize(len(w.leaves)) * commPBufPad,
PieceCID: w.leaves[0],
}, nil
}
pieces := make([]abi.PieceInfo, len(w.leaves))
for i, leaf := range w.leaves {
pieces[i] = abi.PieceInfo{
Size: commPBufPad,
PieceCID: leaf,
}
}
p, err := ffi.GenerateUnsealedCID(abi.RegisteredSealProof_StackedDrg32GiBV1, pieces)
if err != nil {
return api.DataCIDSize{}, xerrors.Errorf("generating unsealed CID: %w", err)
}
return api.DataCIDSize{
PayloadSize: rawLen,
PieceSize: abi.PaddedPieceSize(len(w.leaves)) * commPBufPad,
PieceCID: p,
}, nil
}

View File

@ -1,88 +0,0 @@
package commp
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"testing"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-padreader"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
)
func TestWriterZero(t *testing.T) {
for i, s := range []struct {
writes []int
expect abi.PaddedPieceSize
}{
{writes: []int{200}, expect: 256},
{writes: []int{200, 200}, expect: 512},
{writes: []int{int(CommPBuf)}, expect: commPBufPad},
{writes: []int{int(CommPBuf) * 2}, expect: 2 * commPBufPad},
{writes: []int{int(CommPBuf), int(CommPBuf), int(CommPBuf)}, expect: 4 * commPBufPad},
{writes: []int{int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf)}, expect: 16 * commPBufPad},
{writes: []int{200, int(CommPBuf)}, expect: 2 * commPBufPad},
} {
s := s
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
w := &Writer{}
var rawSum int64
for _, write := range s.writes {
rawSum += int64(write)
_, err := w.Write(make([]byte, write))
require.NoError(t, err)
}
p, err := w.Sum()
require.NoError(t, err)
require.Equal(t, rawSum, p.PayloadSize)
require.Equal(t, s.expect, p.PieceSize)
require.Equal(t, zerocomm.ZeroPieceCommitment(s.expect.Unpadded()).String(), p.PieceCID.String())
})
}
}
func TestWriterData(t *testing.T) {
dataLen := float64(CommPBuf) * 6.78
data, _ := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(dataLen)))
pr, sz := padreader.New(bytes.NewReader(data), uint64(dataLen))
exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, pr, sz)
require.NoError(t, err)
w := &Writer{}
_, err = io.Copy(w, bytes.NewReader(data))
require.NoError(t, err)
res, err := w.Sum()
require.NoError(t, err)
require.Equal(t, exp.String(), res.PieceCID.String())
}
func BenchmarkWriterZero(b *testing.B) {
buf := make([]byte, int(CommPBuf)*b.N)
b.SetBytes(int64(CommPBuf))
b.ResetTimer()
w := &Writer{}
_, err := w.Write(buf)
require.NoError(b, err)
o, err := w.Sum()
b.StopTimer()
require.NoError(b, err)
require.Equal(b, zerocomm.ZeroPieceCommitment(o.PieceSize.Unpadded()).String(), o.PieceCID.String())
require.Equal(b, int64(CommPBuf)*int64(b.N), o.PayloadSize)
}

View File

@ -54,15 +54,23 @@ type PeerMgr struct {
dht *dht.IpfsDHT dht *dht.IpfsDHT
notifee *net.NotifyBundle notifee *net.NotifyBundle
filPeerEmitter event.Emitter emitter event.Emitter
done chan struct{} done chan struct{}
} }
type NewFilPeer struct { type FilPeerEvt struct {
Id peer.ID Type FilPeerEvtType
ID peer.ID
} }
type FilPeerEvtType int
const (
AddFilPeerEvt FilPeerEvtType = iota
RemoveFilPeerEvt
)
func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes.BootstrapPeers) (*PeerMgr, error) { func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes.BootstrapPeers) (*PeerMgr, error) {
pm := &PeerMgr{ pm := &PeerMgr{
h: h, h: h,
@ -77,16 +85,16 @@ func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes
done: make(chan struct{}), done: make(chan struct{}),
} }
emitter, err := h.EventBus().Emitter(new(NewFilPeer)) emitter, err := h.EventBus().Emitter(new(FilPeerEvt))
if err != nil { if err != nil {
return nil, xerrors.Errorf("creating NewFilPeer emitter: %w", err) return nil, xerrors.Errorf("creating FilPeerEvt emitter: %w", err)
} }
pm.filPeerEmitter = emitter pm.emitter = emitter
lc.Append(fx.Hook{ lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error { OnStop: func(ctx context.Context) error {
return multierr.Combine( return multierr.Combine(
pm.filPeerEmitter.Close(), pm.emitter.Close(),
pm.Stop(ctx), pm.Stop(ctx),
) )
}, },
@ -104,7 +112,7 @@ func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes
} }
func (pmgr *PeerMgr) AddFilecoinPeer(p peer.ID) { func (pmgr *PeerMgr) AddFilecoinPeer(p peer.ID) {
_ = pmgr.filPeerEmitter.Emit(NewFilPeer{Id: p}) //nolint:errcheck _ = pmgr.emitter.Emit(FilPeerEvt{Type: AddFilPeerEvt, ID: p}) //nolint:errcheck
pmgr.peersLk.Lock() pmgr.peersLk.Lock()
defer pmgr.peersLk.Unlock() defer pmgr.peersLk.Unlock()
pmgr.peers[p] = time.Duration(0) pmgr.peers[p] = time.Duration(0)
@ -127,11 +135,20 @@ func (pmgr *PeerMgr) SetPeerLatency(p peer.ID, latency time.Duration) {
} }
func (pmgr *PeerMgr) Disconnect(p peer.ID) { func (pmgr *PeerMgr) Disconnect(p peer.ID) {
disconnected := false
if pmgr.h.Network().Connectedness(p) == net.NotConnected { if pmgr.h.Network().Connectedness(p) == net.NotConnected {
pmgr.peersLk.Lock() pmgr.peersLk.Lock()
defer pmgr.peersLk.Unlock() _, disconnected = pmgr.peers[p]
if disconnected {
delete(pmgr.peers, p) delete(pmgr.peers, p)
} }
pmgr.peersLk.Unlock()
}
if disconnected {
_ = pmgr.emitter.Emit(FilPeerEvt{Type: RemoveFilPeerEvt, ID: p}) //nolint:errcheck
}
} }
func (pmgr *PeerMgr) Stop(ctx context.Context) error { func (pmgr *PeerMgr) Stop(ctx context.Context) error {

Some files were not shown because too many files have changed in this diff Show More