Merge branch 'master' into fix/logging-quieter

This commit is contained in:
Łukasz Magiera 2020-11-12 20:45:41 +01:00 committed by GitHub
commit d3d5f8d7f0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 2555 additions and 43 deletions

View File

@ -359,6 +359,7 @@ jobs:
steps:
- install-deps
- prepare
- run: make deps
- run: make docsgen
- run: git --no-pager diff
- run: git --no-pager diff --quiet

View File

@ -304,7 +304,9 @@ method-gen:
gen: type-gen method-gen
docsgen:
go run ./api/docgen > documentation/en/api-methods.md
go run ./api/docgen "api/api_full.go" "FullNode" > documentation/en/api-methods.md
go run ./api/docgen "api/api_storage.go" "StorageMiner" > documentation/en/api-methods-miner.md
go run ./api/docgen "api/api_worker.go" "WorkerAPI" > documentation/en/api-methods-worker.md
print-%:
@echo $*=$($*)

View File

@ -6,12 +6,14 @@ import (
"go/ast"
"go/parser"
"go/token"
"os"
"reflect"
"sort"
"strings"
"time"
"unicode"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-filestore"
metrics "github.com/libp2p/go-libp2p-core/metrics"
@ -24,6 +26,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
filestore2 "github.com/filecoin-project/go-fil-markets/filestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-multistore"
@ -36,6 +39,10 @@ import (
"github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@ -117,17 +124,17 @@ func init() {
addExample(network.ReachabilityPublic)
addExample(build.NewestNetworkVersion)
addExample(&types.ExecutionTrace{
Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
Msg: exampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: exampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
})
addExample(map[string]types.Actor{
"t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor),
"t01236": exampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
})
addExample(map[string]api.MarketDeal{
"t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
"t026363": exampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
})
addExample(map[string]api.MarketBalance{
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
"t026363": exampleValue("init", reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
})
addExample(map[string]*pubsub.TopicScoreSnapshot{
"/blocks": {
@ -162,9 +169,80 @@ func init() {
// because reflect.TypeOf(maddr) returns the concrete type...
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
// miner specific
addExample(filestore2.Path(".lotusminer/fstmp123"))
si := multistore.StoreID(12)
addExample(&si)
addExample(retrievalmarket.DealID(5))
addExample(abi.ActorID(1000))
addExample(map[string][]api.SealedRef{
"98000": {
api.SealedRef{
SectorID: 100,
Offset: 10 << 20,
Size: 1 << 20,
},
},
})
addExample(api.SectorState(sealing.Proving))
addExample(stores.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"))
addExample(storiface.FTUnsealed)
addExample(storiface.PathSealing)
addExample(map[stores.ID][]stores.Decl{
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": {
{
SectorID: abi.SectorID{Miner: 1000, Number: 100},
SectorFileType: storiface.FTSealed,
},
},
})
addExample(map[stores.ID]string{
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path",
})
addExample(map[uuid.UUID][]storiface.WorkerJob{
uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
{
ID: storiface.CallID{
Sector: abi.SectorID{Miner: 1000, Number: 100},
ID: uuid.MustParse("76081ba0-61bd-45a5-bc08-af05f1c26e5d"),
},
Sector: abi.SectorID{Miner: 1000, Number: 100},
Task: sealtasks.TTPreCommit2,
RunWait: 0,
Start: time.Unix(1605172927, 0).UTC(),
Hostname: "host",
},
},
})
addExample(map[uuid.UUID]storiface.WorkerStats{
uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
Info: storiface.WorkerInfo{
Hostname: "host",
Resources: storiface.WorkerResources{
MemPhysical: 256 << 30,
MemSwap: 120 << 30,
MemReserved: 2 << 30,
CPUs: 64,
GPUs: []string{"aGPU 1337"},
},
},
Enabled: true,
MemUsedMin: 0,
MemUsedMax: 0,
GpuUsed: false,
CpuUse: 0,
},
})
// worker specific
addExample(storiface.AcquireMove)
addExample(storiface.UnpaddedByteIndex(abi.PaddedPieceSize(1 << 20).Unpadded()))
addExample(map[sealtasks.TaskType]struct{}{
sealtasks.TTPreCommit2: {},
})
}
func exampleValue(t, parent reflect.Type) interface{} {
func exampleValue(method string, t, parent reflect.Type) interface{} {
v, ok := ExampleValues[t]
if ok {
return v
@ -173,25 +251,25 @@ func exampleValue(t, parent reflect.Type) interface{} {
switch t.Kind() {
case reflect.Slice:
out := reflect.New(t).Elem()
reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t)))
reflect.Append(out, reflect.ValueOf(exampleValue(method, t.Elem(), t)))
return out.Interface()
case reflect.Chan:
return exampleValue(t.Elem(), nil)
return exampleValue(method, t.Elem(), nil)
case reflect.Struct:
es := exampleStruct(t, parent)
es := exampleStruct(method, t, parent)
v := reflect.ValueOf(es).Elem().Interface()
ExampleValues[t] = v
return v
case reflect.Array:
out := reflect.New(t).Elem()
for i := 0; i < t.Len(); i++ {
out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t)))
out.Index(i).Set(reflect.ValueOf(exampleValue(method, t.Elem(), t)))
}
return out.Interface()
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct {
es := exampleStruct(t.Elem(), t)
es := exampleStruct(method, t.Elem(), t)
//ExampleValues[t] = es
return es
}
@ -199,10 +277,10 @@ func exampleValue(t, parent reflect.Type) interface{} {
return struct{}{}
}
panic(fmt.Sprintf("No example value for type: %s", t))
panic(fmt.Sprintf("No example value for type: %s (method '%s')", t, method))
}
func exampleStruct(t, parent reflect.Type) interface{} {
func exampleStruct(method string, t, parent reflect.Type) interface{} {
ns := reflect.New(t)
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
@ -210,7 +288,7 @@ func exampleStruct(t, parent reflect.Type) interface{} {
continue
}
if strings.Title(f.Name) == f.Name {
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t)))
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(method, f.Type, t)))
}
}
@ -218,6 +296,7 @@ func exampleStruct(t, parent reflect.Type) interface{} {
}
type Visitor struct {
Root string
Methods map[string]ast.Node
}
@ -227,7 +306,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
return v
}
if st.Name.Name != "FullNode" {
if st.Name.Name != v.Root {
return nil
}
@ -243,7 +322,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
const noComment = "There are not yet any comments for this method."
func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
func parseApiASTInfo(apiFile, iface string) (map[string]string, map[string]string) { //nolint:golint
fset := token.NewFileSet()
pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments)
if err != nil {
@ -252,11 +331,11 @@ func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
ap := pkgs["api"]
f := ap.Files["api/api_full.go"]
f := ap.Files[apiFile]
cmap := ast.NewCommentMap(fset, f, f.Comments)
v := &Visitor{make(map[string]ast.Node)}
v := &Visitor{iface, make(map[string]ast.Node)}
ast.Walk(v, pkgs["api"])
groupDocs := make(map[string]string)
@ -312,13 +391,30 @@ func methodGroupFromName(mn string) string {
}
func main() {
comments, groupComments := parseApiASTInfo()
comments, groupComments := parseApiASTInfo(os.Args[1], os.Args[2])
groups := make(map[string]*MethodGroup)
var api struct{ api.FullNode }
t := reflect.TypeOf(api)
var t reflect.Type
var permStruct, commonPermStruct reflect.Type
switch os.Args[2] {
case "FullNode":
t = reflect.TypeOf(new(struct{ api.FullNode })).Elem()
permStruct = reflect.TypeOf(apistruct.FullNodeStruct{}.Internal)
commonPermStruct = reflect.TypeOf(apistruct.CommonStruct{}.Internal)
case "StorageMiner":
t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem()
permStruct = reflect.TypeOf(apistruct.StorageMinerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(apistruct.CommonStruct{}.Internal)
case "WorkerAPI":
t = reflect.TypeOf(new(struct{ api.WorkerAPI })).Elem()
permStruct = reflect.TypeOf(apistruct.WorkerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(apistruct.WorkerStruct{}.Internal)
default:
panic("unknown type")
}
for i := 0; i < t.NumMethod(); i++ {
m := t.Method(i)
@ -336,7 +432,7 @@ func main() {
ft := m.Func.Type()
for j := 2; j < ft.NumIn(); j++ {
inp := ft.In(j)
args = append(args, exampleValue(inp, nil))
args = append(args, exampleValue(m.Name, inp, nil))
}
v, err := json.MarshalIndent(args, "", " ")
@ -344,7 +440,7 @@ func main() {
panic(err)
}
outv := exampleValue(ft.Out(0), nil)
outv := exampleValue(m.Name, ft.Out(0), nil)
ov, err := json.MarshalIndent(outv, "", " ")
if err != nil {
@ -377,9 +473,6 @@ func main() {
}
}
permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal)
commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal)
for _, g := range groupslice {
g := g
fmt.Printf("## %s\n", g.GroupName)

View File

@ -49,7 +49,7 @@ func (s *server) HandleStream(stream inet.Stream) {
log.Warnf("failed to read block sync request: %s", err)
return
}
log.Infow("block sync request",
log.Debugw("block sync request",
"start", req.Head, "len", req.Length)
resp, err := s.processRequest(ctx, &req)

View File

@ -264,7 +264,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted
}
if strict && nonceGap {
log.Warnf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)",
log.Debugf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)",
m.Message.From, m.Message.Nonce, nextNonce)
}
@ -1219,7 +1219,7 @@ func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.Si
if smsg != nil {
out = append(out, smsg)
} else {
log.Warnf("could not recover signature for bls message %s", msg.Cid())
log.Debugf("could not recover signature for bls message %s", msg.Cid())
}
}
}

View File

@ -268,7 +268,7 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange
log.Warn("chain head sub exit loop")
return
}
if len(out) > 0 {
if len(out) > 5 {
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
}
select {

View File

@ -95,7 +95,10 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha
}
took := build.Clock.Since(start)
log.Infow("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took)
log.Debugw("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took)
if took > 3*time.Second {
log.Warnw("Slow msg fetch", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took)
}
if delay := build.Clock.Now().Unix() - int64(blk.Header.Timestamp); delay > 5 {
log.Warnf("Received block with large delay %d from miner %s", delay, blk.Header.Miner)
}

View File

@ -278,7 +278,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
for _, blk := range fts.TipSet().Blocks() {
miners = append(miners, blk.Miner.String())
}
log.Infow("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids())
log.Debugw("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids())
return false
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,537 @@
# Groups
* [](#)
* [Enabled](#Enabled)
* [Fetch](#Fetch)
* [Info](#Info)
* [Paths](#Paths)
* [Remove](#Remove)
* [Session](#Session)
* [Version](#Version)
* [Add](#Add)
* [AddPiece](#AddPiece)
* [Finalize](#Finalize)
* [FinalizeSector](#FinalizeSector)
* [Move](#Move)
* [MoveStorage](#MoveStorage)
* [Process](#Process)
* [ProcessSession](#ProcessSession)
* [Read](#Read)
* [ReadPiece](#ReadPiece)
* [Release](#Release)
* [ReleaseUnsealed](#ReleaseUnsealed)
* [Seal](#Seal)
* [SealCommit1](#SealCommit1)
* [SealCommit2](#SealCommit2)
* [SealPreCommit1](#SealPreCommit1)
* [SealPreCommit2](#SealPreCommit2)
* [Set](#Set)
* [SetEnabled](#SetEnabled)
* [Storage](#Storage)
* [StorageAddLocal](#StorageAddLocal)
* [Task](#Task)
* [TaskTypes](#TaskTypes)
* [Unseal](#Unseal)
* [UnsealPiece](#UnsealPiece)
* [Wait](#Wait)
* [WaitQuiet](#WaitQuiet)
##
### Enabled
There are not yet any comments for this method.
Perms: admin
Inputs: `null`
Response: `true`
### Fetch
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
1,
"sealing",
"move"
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
### Info
There are not yet any comments for this method.
Perms: admin
Inputs: `null`
Response:
```json
{
"Hostname": "string value",
"Resources": {
"MemPhysical": 42,
"MemSwap": 42,
"MemReserved": 42,
"CPUs": 42,
"GPUs": null
}
}
```
### Paths
There are not yet any comments for this method.
Perms: admin
Inputs: `null`
Response: `null`
### Remove
Storage / Other
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
}
]
```
Response: `{}`
### Session
Like ProcessSession, but returns an error when worker is disabled
Perms: admin
Inputs: `null`
Response: `"07070707-0707-0707-0707-070707070707"`
### Version
TODO: Info() (name, ...) ?
Perms: admin
Inputs: `null`
Response: `4352`
## Add
### AddPiece
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
null,
1024,
{}
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Finalize
### FinalizeSector
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
null
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Move
### MoveStorage
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
1
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Process
### ProcessSession
returns a random UUID of worker session, generated randomly when worker
process starts
Perms: admin
Inputs: `null`
Response: `"07070707-0707-0707-0707-070707070707"`
## Read
### ReadPiece
Perms: admin
Inputs:
```json
[
{},
{
"Miner": 1000,
"Number": 9
},
1040384,
1024
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Release
### ReleaseUnsealed
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
null
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Seal
### SealCommit1
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
null,
null,
null,
{
"Unsealed": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"Sealed": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
}
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
### SealCommit2
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
null
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
### SealPreCommit1
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
null,
null
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
### SealPreCommit2
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
null
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Set
### SetEnabled
SetEnabled marks the worker as enabled/disabled. Not that this setting
may take a few seconds to propagate to task scheduler
Perms: admin
Inputs:
```json
[
true
]
```
Response: `{}`
## Storage
### StorageAddLocal
There are not yet any comments for this method.
Perms: admin
Inputs:
```json
[
"string value"
]
```
Response: `{}`
## Task
### TaskTypes
TaskType -> Weight
Perms: admin
Inputs: `null`
Response:
```json
{
"seal/v0/precommit/2": {}
}
```
## Unseal
### UnsealPiece
Perms: admin
Inputs:
```json
[
{
"Miner": 1000,
"Number": 9
},
1040384,
1024,
null,
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Wait
### WaitQuiet
WaitQuiet blocks until there are no tasks running
Perms: admin
Inputs: `null`
Response: `{}`

View File

@ -2,6 +2,7 @@ package stores
import (
"context"
"errors"
"net/url"
gopath "path"
"sort"
@ -35,7 +36,7 @@ type StorageInfo struct {
type HealthReport struct {
Stat fsutil.FsStat
Err error
Err string
}
type SectorStorageInfo struct {
@ -175,7 +176,9 @@ func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthRep
}
ent.fsi = report.Stat
ent.heartbeatErr = report.Err
if report.Err != "" {
ent.heartbeatErr = errors.New(report.Err)
}
ent.lastHeartbeat = time.Now()
return nil

View File

@ -308,11 +308,12 @@ func (st *Local) reportStorage(ctx context.Context) {
toReport := map[ID]HealthReport{}
for id, p := range st.paths {
stat, err := p.stat(st.localStorage)
toReport[id] = HealthReport{
Stat: stat,
Err: err,
r := HealthReport{Stat: stat}
if err != nil {
r.Err = err.Error()
}
toReport[id] = r
}
st.localLk.RUnlock()

View File

@ -150,6 +150,8 @@ func (m *Miner) mine(ctx context.Context) {
ctx, span := trace.StartSpan(ctx, "/mine")
defer span.End()
go m.doWinPoStWarmup(ctx)
var lastBase MiningBase
minerLoop:
for {

82
miner/warmup.go Normal file
View File

@ -0,0 +1,82 @@
package miner
import (
"context"
"crypto/rand"
"math"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/lotus/chain/types"
)
func (m *Miner) winPoStWarmup(ctx context.Context) error {
deadlines, err := m.api.StateMinerDeadlines(ctx, m.address, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting deadlines: %w", err)
}
var sector abi.SectorNumber = math.MaxUint64
for dlIdx := range deadlines {
partitions, err := m.api.StateMinerPartitions(ctx, m.address, uint64(dlIdx), types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err)
}
for _, partition := range partitions {
b, err := partition.ActiveSectors.First()
if err == bitfield.ErrNoBitsSet {
continue
}
if err != nil {
return err
}
sector = abi.SectorNumber(b)
}
}
if sector == math.MaxUint64 {
log.Info("skipping winning PoSt warmup, no sectors")
return nil
}
log.Infow("starting winning PoSt warmup", "sector", sector)
start := time.Now()
var r abi.PoStRandomness = make([]byte, abi.RandomnessLength)
_, _ = rand.Read(r)
si, err := m.api.StateSectorGetInfo(ctx, m.address, sector, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting sector info: %w", err)
}
_, err = m.epp.ComputeProof(ctx, []proof2.SectorInfo{
{
SealProof: si.SealProof,
SectorNumber: sector,
SealedCID: si.SealedCID,
},
}, r)
if err != nil {
return xerrors.Errorf("failed to compute proof: %w", err)
}
log.Infow("winning PoSt warmup successful", "took", time.Now().Sub(start))
return nil
}
func (m *Miner) doWinPoStWarmup(ctx context.Context) {
err := m.winPoStWarmup(ctx)
if err != nil {
log.Errorw("winning PoSt warmup failed", "error", err)
}
}

View File

@ -118,7 +118,7 @@ func (hs *Service) HandleStream(s inet.Stream) {
hs.h.ConnManager().TagPeer(s.Conn().RemotePeer(), "fcpeer", 10)
// don't bother informing about genesis
log.Infof("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer())
log.Debugf("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer())
hs.syncer.InformNewHead(s.Conn().RemotePeer(), ts)
}
@ -162,7 +162,6 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error {
err := cborutil.ReadCborRPC(s, lmsg)
if err != nil {
log.Debugw("reading latency message", "error", err)
// TODO: should we just return right here?
}
t3 := build.Clock.Now()
@ -178,7 +177,9 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error {
t2 := time.Unix(0, lmsg.TSent)
offset := t0.Sub(t1) + t3.Sub(t2)
offset /= 2
log.Debugw("time offset", "offset", offset.Seconds(), "peerid", pid.String())
if offset > 5*time.Second || offset < -5*time.Second {
log.Infow("time offset", "offset", offset.Seconds(), "peerid", pid.String())
}
}
}
}()