Merge remote-tracking branch 'origin/master' into feat/merge-master
Signed-off-by: Jakub Sztandera <kubuxu@protocol.ai>
This commit is contained in:
commit
491f080a37
@ -91,15 +91,16 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if parent.Height() > rheight {
|
|
||||||
return nil, xerrors.Errorf("cache is inconsistent")
|
|
||||||
}
|
|
||||||
|
|
||||||
rheight -= ci.skipLength
|
rheight -= ci.skipLength
|
||||||
|
|
||||||
skipTarget, err := ci.walkBack(parent, rheight)
|
var skipTarget *types.TipSet
|
||||||
if err != nil {
|
if parent.Height() < rheight {
|
||||||
return nil, err
|
skipTarget = parent
|
||||||
|
} else {
|
||||||
|
skipTarget, err = ci.walkBack(parent, rheight)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("fillCache walkback: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lbe := &lbEntry{
|
lbe := &lbEntry{
|
||||||
@ -113,8 +114,9 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) {
|
|||||||
return lbe, nil
|
return lbe, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// floors to nearest skipLength multiple
|
||||||
func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch {
|
func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch {
|
||||||
return abi.ChainEpoch(h/ci.skipLength) * ci.skipLength
|
return (h / ci.skipLength) * ci.skipLength
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) {
|
func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) {
|
||||||
@ -146,6 +148,8 @@ func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.Ti
|
|||||||
}
|
}
|
||||||
|
|
||||||
if to > pts.Height() {
|
if to > pts.Height() {
|
||||||
|
// in case pts is lower than the epoch we're looking for (null blocks)
|
||||||
|
// return a tipset above that height
|
||||||
return ts, nil
|
return ts, nil
|
||||||
}
|
}
|
||||||
if to == pts.Height() {
|
if to == pts.Height() {
|
||||||
|
80
chain/store/index_test.go
Normal file
80
chain/store/index_test.go
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
package store_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
datastore "github.com/ipfs/go-datastore"
|
||||||
|
syncds "github.com/ipfs/go-datastore/sync"
|
||||||
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIndexSeeks(t *testing.T) {
|
||||||
|
cg, err := gen.NewGenerator()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gencar, err := cg.GenesisCar()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gen := cg.Genesis()
|
||||||
|
|
||||||
|
ctx := context.TODO()
|
||||||
|
|
||||||
|
nbs := blockstore.NewBlockstore(syncds.MutexWrap(datastore.NewMapDatastore()))
|
||||||
|
cs := store.NewChainStore(nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil)
|
||||||
|
|
||||||
|
_, err = cs.Import(bytes.NewReader(gencar))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cur := mock.TipSet(gen)
|
||||||
|
if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
cs.SetGenesis(gen)
|
||||||
|
|
||||||
|
// Put 113 blocks from genesis
|
||||||
|
for i := 0; i < 113; i++ {
|
||||||
|
nextts := mock.TipSet(mock.MkBlock(cur, 1, 1))
|
||||||
|
|
||||||
|
if err := cs.PutTipSet(ctx, nextts); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
cur = nextts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put 50 null epochs + 1 block
|
||||||
|
skip := mock.MkBlock(cur, 1, 1)
|
||||||
|
skip.Height += 50
|
||||||
|
|
||||||
|
skipts := mock.TipSet(skip)
|
||||||
|
|
||||||
|
if err := cs.PutTipSet(ctx, skipts); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := cs.GetTipsetByHeight(ctx, skip.Height-10, skipts, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, abi.ChainEpoch(164), ts.Height())
|
||||||
|
|
||||||
|
for i := 0; i <= 113; i++ {
|
||||||
|
ts3, err := cs.GetTipsetByHeight(ctx, abi.ChainEpoch(i), skipts, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, abi.ChainEpoch(i), ts3.Height())
|
||||||
|
}
|
||||||
|
}
|
@ -18,7 +18,6 @@ import (
|
|||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
@ -964,10 +963,6 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t
|
|||||||
return ts, nil
|
return ts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if ts.Height()-h > build.ForkLengthThreshold {
|
|
||||||
log.Warnf("expensive call to GetTipsetByHeight, seeking %d levels", ts.Height()-h)
|
|
||||||
}
|
|
||||||
|
|
||||||
lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h)
|
lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -60,8 +60,6 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha
|
|||||||
src := msg.GetFrom()
|
src := msg.GetFrom()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
log.Infof("New block over pubsub: %s", blk.Cid())
|
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Debug("about to fetch messages for block from pubsub")
|
log.Debug("about to fetch messages for block from pubsub")
|
||||||
bmsgs, err := s.Bsync.FetchMessagesByCids(context.TODO(), blk.BlsMessages)
|
bmsgs, err := s.Bsync.FetchMessagesByCids(context.TODO(), blk.BlsMessages)
|
||||||
@ -145,8 +143,7 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
|||||||
// track validation time
|
// track validation time
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
end := time.Now()
|
log.Debugf("block validation time: %s", time.Since(begin))
|
||||||
log.Infof("block validation time: %s", end.Sub(begin))
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
stats.Record(ctx, metrics.BlockReceived.M(1))
|
stats.Record(ctx, metrics.BlockReceived.M(1))
|
||||||
|
@ -429,6 +429,8 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
|
|||||||
return xerrors.Errorf("collectChain failed: %w", err)
|
return xerrors.Errorf("collectChain failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// At this point we have accepted and synced to the new `maybeHead`
|
||||||
|
// (`StageSyncComplete`).
|
||||||
if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil {
|
if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil {
|
||||||
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
|
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
|
||||||
span.SetStatus(trace.Status{
|
span.SetStatus(trace.Status{
|
||||||
@ -528,8 +530,17 @@ func blockSanityChecks(h *types.BlockHeader) error {
|
|||||||
|
|
||||||
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
|
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
|
||||||
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) error {
|
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) error {
|
||||||
|
validationStart := time.Now()
|
||||||
|
defer func() {
|
||||||
|
dur := time.Since(validationStart)
|
||||||
|
durMilli := dur.Seconds() * float64(1000)
|
||||||
|
stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(durMilli))
|
||||||
|
log.Infow("block validation", "took", dur, "height", b.Header.Height)
|
||||||
|
}()
|
||||||
|
|
||||||
ctx, span := trace.StartSpan(ctx, "validateBlock")
|
ctx, span := trace.StartSpan(ctx, "validateBlock")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
if build.InsecurePoStValidation {
|
if build.InsecurePoStValidation {
|
||||||
log.Warn("insecure test validation is enabled, if you see this outside of a test, it is a severe bug!")
|
log.Warn("insecure test validation is enabled, if you see this outside of a test, it is a severe bug!")
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@ type SyncManager struct {
|
|||||||
|
|
||||||
syncStates []*SyncerState
|
syncStates []*SyncerState
|
||||||
|
|
||||||
|
// Normally this handler is set to `(*Syncer).Sync()`.
|
||||||
doSync func(context.Context, *types.TipSet) error
|
doSync func(context.Context, *types.TipSet) error
|
||||||
|
|
||||||
stop chan struct{}
|
stop chan struct{}
|
||||||
@ -280,7 +281,7 @@ func (sm *SyncManager) syncScheduler() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
|
func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
|
||||||
log.Info("scheduling incoming tipset sync: ", ts.Cids())
|
log.Debug("scheduling incoming tipset sync: ", ts.Cids())
|
||||||
if sm.getBootstrapState() == BSStateSelected {
|
if sm.getBootstrapState() == BSStateSelected {
|
||||||
sm.setBootstrapState(BSStateScheduled)
|
sm.setBootstrapState(BSStateScheduled)
|
||||||
sm.syncTargets <- ts
|
sm.syncTargets <- ts
|
||||||
|
@ -49,6 +49,11 @@ func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pstateRoot := c
|
||||||
|
if parents != nil {
|
||||||
|
pstateRoot = parents.Blocks()[0].ParentStateRoot
|
||||||
|
}
|
||||||
|
|
||||||
var pcids []cid.Cid
|
var pcids []cid.Cid
|
||||||
var height abi.ChainEpoch
|
var height abi.ChainEpoch
|
||||||
weight := types.NewInt(weightInc)
|
weight := types.NewInt(weightInc)
|
||||||
@ -72,7 +77,7 @@ func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types
|
|||||||
ParentWeight: weight,
|
ParentWeight: weight,
|
||||||
Messages: c,
|
Messages: c,
|
||||||
Height: height,
|
Height: height,
|
||||||
ParentStateRoot: c,
|
ParentStateRoot: pstateRoot,
|
||||||
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")},
|
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
@ -138,7 +137,7 @@ func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
|
|||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
if ar, ok := r.(aerrors.ActorError); ok {
|
if ar, ok := r.(aerrors.ActorError); ok {
|
||||||
log.Errorf("VM.Call failure: %+v", ar)
|
log.Warnf("VM.Call failure: %+v", ar)
|
||||||
aerr = ar
|
aerr = ar
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -308,7 +307,6 @@ func (rt *Runtime) Context() context.Context {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) Abortf(code exitcode.ExitCode, msg string, args ...interface{}) {
|
func (rt *Runtime) Abortf(code exitcode.ExitCode, msg string, args ...interface{}) {
|
||||||
log.Error("Abortf: ", fmt.Sprintf(msg, args...))
|
|
||||||
panic(aerrors.NewfSkip(2, code, msg, args...))
|
panic(aerrors.NewfSkip(2, code, msg, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
37
cli/chain.go
37
cli/chain.go
@ -12,7 +12,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
@ -230,7 +229,7 @@ var chainStatObjCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Links: %d\n", stats.Links)
|
fmt.Printf("Links: %d\n", stats.Links)
|
||||||
fmt.Printf("Size: %s (%d)\n", units.BytesSize(float64(stats.Size)), stats.Size)
|
fmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size)
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -842,6 +841,10 @@ var slashConsensusFault = &cli.Command{
|
|||||||
Name: "miner",
|
Name: "miner",
|
||||||
Usage: "Miner address",
|
Usage: "Miner address",
|
||||||
},
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "extra",
|
||||||
|
Usage: "Extra block cid",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
@ -886,10 +889,34 @@ var slashConsensusFault = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
params, err := actors.SerializeParams(&miner.ReportConsensusFaultParams{
|
params := miner.ReportConsensusFaultParams{
|
||||||
BlockHeader1: bh1,
|
BlockHeader1: bh1,
|
||||||
BlockHeader2: bh2,
|
BlockHeader2: bh2,
|
||||||
})
|
}
|
||||||
|
|
||||||
|
if cctx.String("extra") != "" {
|
||||||
|
cExtra, err := cid.Parse(cctx.String("extra"))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parsing cid extra: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bExtra, err := api.ChainGetBlock(ctx, cExtra)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting block extra: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
be, err := cborutil.Dump(bExtra)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
params.BlockHeaderExtra = be
|
||||||
|
}
|
||||||
|
|
||||||
|
enc, err := actors.SerializeParams(¶ms)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if cctx.String("miner") == "" {
|
if cctx.String("miner") == "" {
|
||||||
return xerrors.Errorf("--miner flag is required")
|
return xerrors.Errorf("--miner flag is required")
|
||||||
@ -907,7 +934,7 @@ var slashConsensusFault = &cli.Command{
|
|||||||
GasPrice: types.NewInt(1),
|
GasPrice: types.NewInt(1),
|
||||||
GasLimit: 10000000,
|
GasLimit: 10000000,
|
||||||
Method: builtin.MethodsMiner.ReportConsensusFault,
|
Method: builtin.MethodsMiner.ReportConsensusFault,
|
||||||
Params: params,
|
Params: enc,
|
||||||
}
|
}
|
||||||
|
|
||||||
smsg, err := api.MpoolPushMessage(ctx, msg)
|
smsg, err := api.MpoolPushMessage(ctx, msg)
|
||||||
|
@ -143,7 +143,7 @@ var clientCommPCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("CID: ", encoder.Encode(ret.Root))
|
fmt.Println("CID: ", encoder.Encode(ret.Root))
|
||||||
fmt.Println("Piece size: ", ret.Size)
|
fmt.Println("Piece size: ", types.SizeStr(types.NewInt(uint64(ret.Size))))
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -203,7 +203,7 @@ var clientLocalCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range list {
|
for _, v := range list {
|
||||||
fmt.Printf("%s %s %d %s\n", encoder.Encode(v.Key), v.FilePath, v.Size, v.Status)
|
fmt.Printf("%s %s %s %s\n", encoder.Encode(v.Key), v.FilePath, types.SizeStr(types.NewInt(v.Size)), v.Status)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@ -371,7 +371,7 @@ var clientFindCmd = &cli.Command{
|
|||||||
fmt.Printf("ERR %s@%s: %s\n", offer.Miner, offer.MinerPeerID, offer.Err)
|
fmt.Printf("ERR %s@%s: %s\n", offer.Miner, offer.MinerPeerID, offer.Err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Printf("RETRIEVAL %s@%s-%sfil-%db\n", offer.Miner, offer.MinerPeerID, types.FIL(offer.MinPrice), offer.Size)
|
fmt.Printf("RETRIEVAL %s@%s-%sfil-%s\n", offer.Miner, offer.MinerPeerID, types.FIL(offer.MinPrice), types.SizeStr(types.NewInt(offer.Size)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -520,7 +520,7 @@ var clientQueryAskCmd = &cli.Command{
|
|||||||
|
|
||||||
fmt.Printf("Ask: %s\n", maddr)
|
fmt.Printf("Ask: %s\n", maddr)
|
||||||
fmt.Printf("Price per GiB: %s\n", types.FIL(ask.Ask.Price))
|
fmt.Printf("Price per GiB: %s\n", types.FIL(ask.Ask.Price))
|
||||||
fmt.Printf("Max Piece size: %d\n", ask.Ask.MaxPieceSize)
|
fmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.Ask.MaxPieceSize))))
|
||||||
|
|
||||||
size := cctx.Int64("size")
|
size := cctx.Int64("size")
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
@ -597,7 +597,7 @@ var clientListDeals = &cli.Command{
|
|||||||
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch)
|
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%d\t%s\n", d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, storagemarket.DealStates[d.LocalDeal.State], onChain, slashed, d.LocalDeal.PieceCID, d.LocalDeal.Size, d.LocalDeal.PricePerEpoch, d.LocalDeal.Duration, d.LocalDeal.Message)
|
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, storagemarket.DealStates[d.LocalDeal.State], onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), d.LocalDeal.PricePerEpoch, d.LocalDeal.Duration, d.LocalDeal.Message)
|
||||||
}
|
}
|
||||||
return w.Flush()
|
return w.Flush()
|
||||||
},
|
},
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
@ -153,7 +152,7 @@ var stateMinerInfo = &cli.Command{
|
|||||||
fmt.Printf("Owner:\t%s\n", mi.Owner)
|
fmt.Printf("Owner:\t%s\n", mi.Owner)
|
||||||
fmt.Printf("Worker:\t%s\n", mi.Worker)
|
fmt.Printf("Worker:\t%s\n", mi.Worker)
|
||||||
fmt.Printf("PeerID:\t%s\n", mi.PeerId)
|
fmt.Printf("PeerID:\t%s\n", mi.PeerId)
|
||||||
fmt.Printf("SectorSize:\t%s (%d)\n", units.BytesSize(float64(mi.SectorSize)), mi.SectorSize)
|
fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@ -707,7 +706,7 @@ var stateSectorSizeCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%d\n", mi.SectorSize)
|
fmt.Printf("%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize)
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
"go.opencensus.io/plugin/runmetrics"
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/stats/view"
|
"go.opencensus.io/stats/view"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
@ -114,6 +115,13 @@ var DaemonCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
|
err := runmetrics.Enable(runmetrics.RunMetricOptions{
|
||||||
|
EnableCPU: true,
|
||||||
|
EnableMemory: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("enabling runtime metrics: %w", err)
|
||||||
|
}
|
||||||
if prof := cctx.String("pprof"); prof != "" {
|
if prof := cctx.String("pprof"); prof != "" {
|
||||||
profile, err := os.Create(prof)
|
profile, err := os.Create(prof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -58,8 +58,8 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Use Lotus with systemd",
|
"title": "Use Lotus with systemd",
|
||||||
"slug": "en+install-system-services",
|
"slug": "en+install-systemd-services",
|
||||||
"github": "en/install-system-services.md",
|
"github": "en/install-systemd-services.md",
|
||||||
"value": null
|
"value": null
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -71,6 +71,12 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"title": "Architecture",
|
||||||
|
"slug": "en+arch",
|
||||||
|
"github": "en/architecture.md",
|
||||||
|
"value": null,
|
||||||
|
"posts": []
|
||||||
|
}, {
|
||||||
"title": "Storage Mining",
|
"title": "Storage Mining",
|
||||||
"slug": "en+mining",
|
"slug": "en+mining",
|
||||||
"github": "en/mining.md",
|
"github": "en/mining.md",
|
||||||
|
394
documentation/en/architecture.md
Normal file
394
documentation/en/architecture.md
Normal file
@ -0,0 +1,394 @@
|
|||||||
|
# Lotus
|
||||||
|
|
||||||
|
Lotus is an implementation of the [Filecoin Distributed Storage Network](https://filecoin.io/).
|
||||||
|
A Lotus node syncs blockchains that follow the
|
||||||
|
Filecoin protocol, validating the blocks and state transitions.
|
||||||
|
The specification for the Filecoin protocol can be found [here](https://filecoin-project.github.io/specs/).
|
||||||
|
|
||||||
|
For information on how to setup and operate a Lotus node,
|
||||||
|
please follow the instructions [here](https://lotu.sh/en+getting-started).
|
||||||
|
|
||||||
|
# Components
|
||||||
|
|
||||||
|
At a high level, a Lotus node comprises the following components:
|
||||||
|
|
||||||
|
FIXME: No mention of block production here, cross-reference with schomatis's miner doc
|
||||||
|
- The Syncer, which manages the process of syncing the blockchain
|
||||||
|
- The State Manager, which can compute the state at any given point in the chain
|
||||||
|
- The Virtual Machine (VM), which executes messages
|
||||||
|
- The Repository, where all data is stored
|
||||||
|
- P2P stuff (FIXME missing libp2p listed under other PL dependencies)? allows hello, blocksync, retrieval, storage
|
||||||
|
- API / CLI (FIXME missing, in scratchpad)
|
||||||
|
- Other Filecoin dependencies (specs actors, proofs, storage, etc., FIXME missing)
|
||||||
|
- Is the Builder worth its own component?
|
||||||
|
- Other PL dependencies (IPFS, libp2p, IPLD? FIXME, missing)
|
||||||
|
- External libraries used by Lotus and other deps (FIXME, missing)
|
||||||
|
|
||||||
|
# Preliminaries
|
||||||
|
|
||||||
|
We discuss some key Filecoin concepts here, aiming to explain them by contrasting them with analogous concepts
|
||||||
|
in other well-known blockchains like Ethereum. We only provide brief descriptions here; elaboration
|
||||||
|
can be found in the [spec](https://filecoin-project.github.io/specs/).
|
||||||
|
|
||||||
|
### Tipsets
|
||||||
|
|
||||||
|
Unlike in Ethereum, a block can have multiple parents in Filecoin. We thus refer to the parent set of a block,
|
||||||
|
instead of a single parent.
|
||||||
|
A [tipset](https://filecoin-project.github.io/specs/#systems__filecoin_blockchain__struct__tipset)
|
||||||
|
is any set of blocks that share the same parent set.
|
||||||
|
|
||||||
|
There is no concept of "block difficulty" in Filecoin. Instead,
|
||||||
|
the weight of a tipset is simply the number of blocks in the chain that ends in that tipset. Note that a longer chain
|
||||||
|
can have less weight than a shorter chain with more blocks per tipset.
|
||||||
|
|
||||||
|
We also allow for "null" tipsets, which include zero blocks. This allows miners to "skip" a round, and build on top
|
||||||
|
of an imaginary empty tipset if they want to.
|
||||||
|
|
||||||
|
We call the heaviest tipset in a chain the "head" of the chain.
|
||||||
|
|
||||||
|
### Actors and Messages
|
||||||
|
|
||||||
|
An [Actor](https://filecoin-project.github.io/specs/#systems__filecoin_vm__actor)
|
||||||
|
is analogous to a smart contract in Ethereum. Filecoin does not allow users to define their own
|
||||||
|
actors, but comes with several [builtin actors](https://github.com/filecoin-project/specs-actors),
|
||||||
|
which can be thought of as pre-compiled contracts.
|
||||||
|
|
||||||
|
A [Message](https://filecoin-project.github.io/specs/#systems__filecoin_vm__message)
|
||||||
|
is analogous to transactions in Ethereum.
|
||||||
|
|
||||||
|
# Sync
|
||||||
|
|
||||||
|
Sync refers to the process by which a Lotus node synchronizes to the heaviest chain being advertised by its peers.
|
||||||
|
At a high-level, Lotus syncs in a manner similar to most other blockchains; a Lotus node listens to the various
|
||||||
|
chains its peers claim to be at, picks the heaviest one, requests the blocks in the chosen chain,
|
||||||
|
and validates each block in that chain, running all state transitions along the way.
|
||||||
|
|
||||||
|
The majority of the sync functionality happens in the [`Syncer`](https://github.com/filecoin-project/lotus/blob/master/chain/sync.go),
|
||||||
|
internally managed by a [`SyncManager`](https://github.com/filecoin-project/lotus/blob/master/chain/sync_manager.go).
|
||||||
|
|
||||||
|
We now discuss the various stages of the sync process.
|
||||||
|
|
||||||
|
## Sync setup
|
||||||
|
|
||||||
|
When a Lotus node connects to a new peer, we exchange the head of our chain
|
||||||
|
with the new peer through [the `hello` protocol](https://github.com/filecoin-project/lotus/blob/master/node/hello/hello.go).
|
||||||
|
If the peer's head is heavier than ours, we try to sync to it. Note
|
||||||
|
that we do NOT update our chain head at this stage.
|
||||||
|
|
||||||
|
## Fetching and Persisting Block Headers
|
||||||
|
|
||||||
|
Note: The API refers to these stages as `StageHeaders` and `StagePersistHeaders`.
|
||||||
|
|
||||||
|
We proceed in the sync process by requesting block headers from the peer,
|
||||||
|
moving back from their head, until we reach a tipset that we have in common
|
||||||
|
(such a common tipset must exist, thought it may simply be the genesis block).
|
||||||
|
The functionality can be found in `Syncer::collectHeaders()`.
|
||||||
|
|
||||||
|
If the common tipset is our head, we treat the sync as a "fast-forward", else we must
|
||||||
|
drop part of our chain to connect to the peer's head (referred to as "forking").
|
||||||
|
|
||||||
|
FIXME: This next para might be best replaced with a link to the validation doc
|
||||||
|
Some of the possible causes of failure in this stage include:
|
||||||
|
|
||||||
|
- The chain is linked to a block that we have previously marked as bad,
|
||||||
|
and stored in a [`BadBlockCache`](https://github.com/filecoin-project/lotus/blob/master/chain/badtscache.go).
|
||||||
|
- The beacon entries in a block are inconsistent (FIXME: more details about what is validated here wouldn't be bad).
|
||||||
|
- Switching to this new chain would involve a chain reorganization beyond the allowed threshold (SPECK-CHECK).
|
||||||
|
|
||||||
|
## Fetching and Validating Blocks
|
||||||
|
|
||||||
|
Note: The API refers to this stage as `StageMessages`.
|
||||||
|
|
||||||
|
Having acquired the headers and found a common tipset, we then move forward, requesting the full blocks, including the messages.
|
||||||
|
|
||||||
|
For each block, we first confirm the syntactic validity of the block (SPECK-CHECK),
|
||||||
|
which includes the syntactic validity of messages included
|
||||||
|
in the block.
|
||||||
|
We then apply the messages, running all the state transitions, and compare the state root we calculate with the provided state root.
|
||||||
|
|
||||||
|
|
||||||
|
FIXME: This next para might be best replaced with a link to the validation doc
|
||||||
|
Some of the possible causes of failure in this stage include:
|
||||||
|
|
||||||
|
- a block is syntactically invalid (including potentially containing syntactically invalid messages)
|
||||||
|
- the computed state root after applying the block doesn't match the block's state root
|
||||||
|
- FIXME: Check what's covered by syntactic validity, and add anything important that isn't (like proof validity, future checks, etc.)
|
||||||
|
|
||||||
|
The core functionality can be found in `Syncer::ValidateTipset()`, with `Syncer::checkBlockMessages()` performing
|
||||||
|
syntactic validation of messages.
|
||||||
|
|
||||||
|
## Setting the head
|
||||||
|
|
||||||
|
Note: The API refers to this stage as `StageSyncComplete`.
|
||||||
|
|
||||||
|
If all validations pass we will now set that head as our heaviest tipset in
|
||||||
|
[`ChainStore`](https://github.com/filecoin-project/lotus/blob/master/chain/store/store.go).
|
||||||
|
We already have the full state, since we calculated
|
||||||
|
it during the sync process.
|
||||||
|
|
||||||
|
FIXME (aayush) I don't fuilly understand the next 2 paragraphs, but it seems important. Confirm and polish.
|
||||||
|
Relevant issue in IPFS: https://github.com/ipfs/ipfs-docs/issues/264
|
||||||
|
|
||||||
|
It is important to note at this point that similar to the IPFS architecture of addressing by content and not by location/address (FIXME: check and link to IPFS docs) the "actual" chain stored in the node repo is *relative* to which CID we look for. We always have stored a series of Filecoin blocks pointing to other blocks, each a potential chain in itself by following its parent's reference, and its parent's parent, and so on up to the genesis block. (FIXME: We need a diagram here, one of the Filecoin blog entries might have something similar to what we are describing here.) It only depends on *where* (location) do we start to look for. The *only* address/location reference we hold of the chain, a relative reference, is the `heaviest` pointer. This is reflected by the fact that we don't store it in the `Blockstore` by a fixed, *absolute*, CID that reflects its contents, as this will change each time we sync to a new head (FIXME: link to the immutability IPFS doc that I need to write).
|
||||||
|
|
||||||
|
FIXME: Create a further reading appendix, move this next para to it, along with other
|
||||||
|
extraneous content
|
||||||
|
This is one of the few items we store in `Datastore` by key, location, allowing its contents to change on every sync. This is reflected in the `(*ChainStore) writeHead()` function (called by `takeHeaviestTipSet()` above) where we reference the pointer by the explicit `chainHeadKey` address (the string `"head"`, not a hash embedded in a CID), and similarly in `(*ChainStore).Load()` when we start the node and create the `ChainStore`. Compare this to a Filecoin block or message which are immutable, stored in the `Blockstore` by CID, once created they never change.
|
||||||
|
|
||||||
|
## Keeping up with the chain
|
||||||
|
|
||||||
|
A Lotus node also listens for new blocks broadcast by its peers over the `gossipsub` channel (see FIXME for more).
|
||||||
|
If we have validated such a block's parent tipset, and adding it to our tipset at its height would lead to a heavier
|
||||||
|
head, then we validate and add this block. The validation described is identical to that invoked during the sync
|
||||||
|
process (indeed, it's the same codepath).
|
||||||
|
|
||||||
|
# State
|
||||||
|
|
||||||
|
In Filecoin, the chain state at any given point is a collection of data stored under a root CID
|
||||||
|
encapsulated in the [`StateTree`](https://github.com/filecoin-project/lotus/blob/master/chain/state/statetree.go),
|
||||||
|
and accessed through the
|
||||||
|
[`StateManager`](https://github.com/filecoin-project/lotus/blob/master/chain/stmgr/stmgr.go).
|
||||||
|
The state at the chain's head is thus easily tracked and updated in a state root CID.
|
||||||
|
(FIXME: Talk about CIDs somewhere, we might want to explain some of the modify/flush/update-root mechanism here.))
|
||||||
|
|
||||||
|
## Calculating a Tipset State
|
||||||
|
|
||||||
|
Recall that a tipset is a set of blocks that have identical parents (that is, that are built on top of the same tipset).
|
||||||
|
The genesis tipset comprises the genesis block(s), and has some state corresponding to it.
|
||||||
|
|
||||||
|
The methods `TipSetState()` and `computeTipSetState()` in
|
||||||
|
[`StateManager`](https://github.com/filecoin-project/lotus/blob/master/chain/stmgr/stmgr.go)
|
||||||
|
are responsible for computing
|
||||||
|
the state that results from applying a tipset. This involves applying all the messages included
|
||||||
|
in the tipset, and performing implicit operations like awarding block rewards.
|
||||||
|
|
||||||
|
Any valid block built on top of a tipset `ts` should have its Parent State Root equal to the result of
|
||||||
|
calculating the tipset state of `ts`. Note that this means that all blocks in a tipset must have the same Parent
|
||||||
|
State Root (which is to be expected, since they have the same parent tipset)
|
||||||
|
|
||||||
|
### Preparing to apply a tipset
|
||||||
|
|
||||||
|
When `StateManager::computeTipsetState()` is called with a tipset, `ts`,
|
||||||
|
it retrieves the parent state root of the blocks in `ts`. It also creates a list of `BlockMessages`, which wraps the BLS
|
||||||
|
and SecP messages in a block along with the miner that produced the block.
|
||||||
|
|
||||||
|
Control then flows to `StateManager::ApplyBlocks()`, which builds a VM to apply the messages given to it. The VM
|
||||||
|
is initialized with the parent state root of the blocks in `ts`. We apply the blocks in `ts` in order (see FIXME for
|
||||||
|
ordering of blocks in a tipset).
|
||||||
|
|
||||||
|
### Applying a block
|
||||||
|
|
||||||
|
For each block, we prepare to apply the ordered messages (first BLS, then SecP). Before applying a message, we check if
|
||||||
|
we have already applied a message with that CID within the scope of this method. If so, we simply skip that message;
|
||||||
|
this is how duplicate messages included in the same tipset are skipped (with only the miner of the "first" block to
|
||||||
|
include the message getting the reward). For the actual process of message application, see FIXME (need an
|
||||||
|
internal link here), for now we
|
||||||
|
simply assume that the outcome of the VM applying a message is either an error, or a
|
||||||
|
[`MessageReceipt`](https://github.com/filecoin-project/lotus/blob/master/chain/types/message_receipt.go)
|
||||||
|
and some
|
||||||
|
other information.
|
||||||
|
|
||||||
|
We treat an error from the VM as a showstopper; there is no recovery, and no meaningful state can be computed for `ts`.
|
||||||
|
Given a successful receipt, we add the rewards and penalties to what the miner has earned so far. Once all the messages
|
||||||
|
included in a block have been applied (or skipped if they're a duplicate), we use an implicit message to call
|
||||||
|
the Reward Actor. This awards the miner their reward for having won a block, and also awards / penalizes them based
|
||||||
|
on the message rewards and penalties we tracked.
|
||||||
|
|
||||||
|
We then proceed to apply the next block in `ts`, using the same VM. This means that the state changes that result
|
||||||
|
from applying a message are visible when applying all subsequent messages, even if they are included in a different block.
|
||||||
|
|
||||||
|
### Finishing up
|
||||||
|
|
||||||
|
Having applied all the blocks, we send one more implicit message, to the Cron Actor, which handles operations that
|
||||||
|
must be performed at the end of every epoch (see FIXME for more). The resulting state after calling the Cron Actor
|
||||||
|
is the computed state of the tipset.
|
||||||
|
|
||||||
|
# Virtual Machine
|
||||||
|
|
||||||
|
The Virtual Machine (VM) is responsible for executing messages.
|
||||||
|
The [Lotus Virtual Machine](https://github.com/filecoin-project/lotus/blob/master/chain/vm/vm.go)
|
||||||
|
invokes the appropriate methods in the builtin actors, and provides
|
||||||
|
a [`Runtime`](https://github.com/filecoin-project/specs-actors/blob/master/actors/runtime/runtime.go)
|
||||||
|
interface to the [builtin actors](https://github.com/filecoin-project/specs-actors)
|
||||||
|
that exposes their state, allows them to take certain actions, and meters
|
||||||
|
their gas usage. The VM also performs balance transfers, creates new account actors as needed, and tracks the gas reward,
|
||||||
|
penalty, return value, and exit code.
|
||||||
|
|
||||||
|
## Applying a Message
|
||||||
|
|
||||||
|
The primary entrypoint of the VM is the `ApplyMessage()` method. This method should not return an error
|
||||||
|
unless something goes unrecoverably wrong.
|
||||||
|
|
||||||
|
The first thing this method does is assess if the message provided meets any of the penalty criteria.
|
||||||
|
If so, a penalty is issued, and the method returns. Next, the entire gas cost of the message is transferred to
|
||||||
|
a temporary gas holder account. It is from this gas holder that gas will be deducted; if it runs out of gas, the message
|
||||||
|
fails. Any unused gas in this holder will be refunded to the message's sender at the end of message execution.
|
||||||
|
|
||||||
|
The VM then increments the sender's nonce, takes a snapshot of the state, and invokes `VM::send()`.
|
||||||
|
|
||||||
|
The `send()` method creates a [`Runtime`](https://github.com/filecoin-project/lotus/blob/master/chain/vm/runtime.go)
|
||||||
|
for the subsequent message execution.
|
||||||
|
It then transfers the message's value to the recipient, creating a new account actor if needed.
|
||||||
|
|
||||||
|
### Method Invocation
|
||||||
|
|
||||||
|
We use reflection to translate a Filecoin message for the VM to an actual Go function, relying on the VM's
|
||||||
|
[`invoker`](https://github.com/filecoin-project/lotus/blob/master/chain/vm/invoker.go) structure.
|
||||||
|
Each actor has its own set of codes defined in `specs-actors/actors/builtin/methods.go`.
|
||||||
|
The `invoker` structure maps the builtin actors' CIDs
|
||||||
|
to a list of `invokeFunc` (one per exported method), which each take the `Runtime` (for state manipulation)
|
||||||
|
and the serialized input parameters.
|
||||||
|
|
||||||
|
FIXME (aayush) Polish this next para.
|
||||||
|
|
||||||
|
The basic layout (without reflection details) of `(*invoker).transform()` is as follows. From each actor registered in `NewInvoker()` we take its `Exports()` methods converting them to `invokeFunc`s. The actual method is wrapped in another function that takes care of decoding the serialized parameters and the runtime, this function is passed to `shimCall()` that will encapsulate the actors code being run inside a `defer` function to `recover()` from panics (we fail in the actors code with panics to unwrap the stack). The return values will then be (CBOR) marshaled and returned to the VM.
|
||||||
|
|
||||||
|
### Returning from the VM
|
||||||
|
|
||||||
|
Once method invocation is complete (including any subcalls), we return to `ApplyMessage()`, which receives
|
||||||
|
the serialized response and the [`ActorError`](https://github.com/filecoin-project/lotus/blob/master/chain/actors/aerrors/error.go).
|
||||||
|
The sender will be charged the appropriate amount of gas for the returned response, which gets put into the
|
||||||
|
[`MessageReceipt`](https://github.com/filecoin-project/lotus/blob/master/chain/types/message_receipt.go).
|
||||||
|
|
||||||
|
The method then refunds any unused gas to the sender, sets up the gas reward for the miner, and
|
||||||
|
wraps all of this into an `ApplyRet`, which is returned.
|
||||||
|
|
||||||
|
# Building a Lotus node
|
||||||
|
|
||||||
|
When we launch a Lotus node with the command `./lotus daemon`
|
||||||
|
(see [here](https://github.com/filecoin-project/lotus/blob/master/cmd/lotus/daemon.go) for more),
|
||||||
|
the node is created through [dependency injection](https://godoc.org/go.uber.org/fx).
|
||||||
|
This relies on reflection, which makes some of the references hard to follow.
|
||||||
|
The node sets up all of the subsystems it needs to run, such as the repository, the network connections, thechain sync
|
||||||
|
service, etc.
|
||||||
|
This setup is orchestrated through calls to the `node.Override` function.
|
||||||
|
The structure of each call indicates the type of component it will set up
|
||||||
|
(many defined in [`node/modules/dtypes/`](https://github.com/filecoin-project/lotus/tree/master/node/modules/dtypes)),
|
||||||
|
and the function that will provide it.
|
||||||
|
The dependency is implicit in the argument of the provider function.
|
||||||
|
|
||||||
|
As an example, consider the `modules.ChainStore()` function that provides the
|
||||||
|
[`ChainStore`](https://github.com/filecoin-project/lotus/blob/master/chain/store/store.go) structure.
|
||||||
|
It takes as one of its parameters the [`ChainBlockstore`](https://github.com/filecoin-project/lotus/blob/master/node/modules/dtypes/storage.go)
|
||||||
|
type, which becomes one of its dependencies.
|
||||||
|
For the node to be built successfully the `ChainBlockstore` will need to be provided before `ChainStore`, a requirement
|
||||||
|
that is made explicit in another `Override()` call that sets the provider of that type as the `ChainBlockstore()` function.
|
||||||
|
|
||||||
|
## The Repository
|
||||||
|
|
||||||
|
The repo is the directory where all of a node's information is stored. The node is entirely defined by its repo, which
|
||||||
|
makes it easy to port to another location. This one-to-one relationship means we can speak
|
||||||
|
of the node as the repo it is associated with, instead of the daemon process that runs from that repo.
|
||||||
|
|
||||||
|
Only one daemon can run be running with an associated repo at a time.
|
||||||
|
A process signals that it is running a node associated with a particular repo, by creating and acquiring
|
||||||
|
a `repo.lock`.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
lsof ~/.lotus/repo.lock
|
||||||
|
# COMMAND PID
|
||||||
|
# lotus 52356
|
||||||
|
```
|
||||||
|
Trying to launch a second daemon hooked to the same repo leads to a `repo is already locked (lotus daemon already running)`
|
||||||
|
error.
|
||||||
|
|
||||||
|
The `node.Repo()` function (`node/builder.go`) contains most of the dependencies (specified as `Override()` calls)
|
||||||
|
needed to properly set up the node's repo. We list the most salient ones here.
|
||||||
|
|
||||||
|
### Datastore
|
||||||
|
|
||||||
|
`Datastore` and `ChainBlockstore`: Data related to the node state is saved in the repo's `Datastore`,
|
||||||
|
an IPFS interface defined [here](github.com/ipfs/go-datastore/datastore.go).
|
||||||
|
Lotus creates this interface from a [Badger DB](https://github.com/dgraph-io/badger) in
|
||||||
|
[`FsRepo`](https://github.com/filecoin-project/lotus/blob/master/node/repo/fsrepo.go).
|
||||||
|
Every piece of data is fundamentally a key-value pair in the `datastore` directory of the repo.
|
||||||
|
There are several abstractions laid on top of it that appear through the code depending on *how* we access it,
|
||||||
|
but it is important to remember that we're always accessing it from the same place.
|
||||||
|
|
||||||
|
FIXME: Maybe mention the `Batching` interface as the developer will stumble upon it before reaching the `Datastore` one.
|
||||||
|
|
||||||
|
#### Blocks
|
||||||
|
|
||||||
|
FIXME: IPFS blocks vs Filecoin blocks ideally happens before this / here
|
||||||
|
|
||||||
|
The [`Blockstore` interface](`github.com/ipfs/go-ipfs-blockstore/blockstore.go`) structures the key-value pair
|
||||||
|
into the CID format for the key and the [`Block` interface](`github.com/ipfs/go-block-format/blocks.go`) for the value.
|
||||||
|
The `Block` value is just a raw string of bytes addressed by its hash, which is included in the CID key.
|
||||||
|
|
||||||
|
`ChainBlockstore` creates a `Blockstore` in the repo under the `/blocks` namespace.
|
||||||
|
Every key stored there will have the `blocks` prefix so that it does not collide with other stores that use the same repo.
|
||||||
|
|
||||||
|
FIXME: Link to IPFS documentation about DAG, CID, and related, especially we need a diagram that shows how do we wrap each datastore inside the next layer (datastore, batching, block store, gc, etc).
|
||||||
|
|
||||||
|
#### Metadata
|
||||||
|
|
||||||
|
`modules.Datastore()` creates a `dtypes.MetadataDS`, which is an alias for the basic `Datastore` interface.
|
||||||
|
Metadata is stored here under the `/metadata` prefix.
|
||||||
|
(FIXME: Explain *what* is metadata in contrast with the block store, namely we store the pointer to the heaviest chain, we might just link to that unwritten section here later.)
|
||||||
|
|
||||||
|
FIXME: Explain the key store related calls (maybe remove, per Schomatis)
|
||||||
|
|
||||||
|
### LockedRepo
|
||||||
|
|
||||||
|
`LockedRepo()`: This method doesn't create or initialize any new structures, but rather registers an
|
||||||
|
`OnStop` [hook](https://godoc.org/go.uber.org/fx/internal/lifecycle#Hook)
|
||||||
|
that will close the locked repository associated with it on shutdown.
|
||||||
|
|
||||||
|
|
||||||
|
### Repo types / Node types
|
||||||
|
|
||||||
|
FIXME: This section needs to be clarified / corrected...I don't fully understand the config differences (what do they have in common, if anything?)
|
||||||
|
|
||||||
|
At the end of the `Repo()` function we see two mutually exclusive configuration calls based on the `RepoType` (`node/repo/fsrepo.go`).
|
||||||
|
```Go
|
||||||
|
ApplyIf(isType(repo.FullNode), ConfigFullNode(c)),
|
||||||
|
ApplyIf(isType(repo.StorageMiner), ConfigStorageMiner(c)),
|
||||||
|
```
|
||||||
|
As we said, the repo fully identifies the node so a repo type is also a *node* type, in this case a full node or a storage miner. (FIXME: What is the difference between the two, does *full* imply miner?) In this case the `daemon` command will create a `FullNode`, this is specified in the command logic itself in `main.DaemonCmd()`, the `FsRepo` created (and passed to `node.Repo()`) will be initiated with that type (see `(*FsRepo).Init(t RepoType)`).
|
||||||
|
|
||||||
|
## Online
|
||||||
|
|
||||||
|
FIXME: Much of this might need to be subsumed into the p2p section
|
||||||
|
|
||||||
|
The `node.Online()` configuration function (`node/builder.go`) initializes components that involve connecting to,
|
||||||
|
or interacting with, the Filecoin network. These connections are managed through the libp2p stack (FIXME link to this section when it exists).
|
||||||
|
We discuss some of the components found in the full node type (that is, included in the `ApplyIf(isType(repo.FullNode),` call).
|
||||||
|
|
||||||
|
#### Chainstore
|
||||||
|
|
||||||
|
`modules.ChainStore()` creates the [`store.ChainStore`](https://github.com/filecoin-project/lotus/blob/master/chain/store/store.go))
|
||||||
|
that wraps the stores
|
||||||
|
previously instantiated in `Repo()`. It is the main point of entry for the node to all chain-related data
|
||||||
|
(FIXME: this is incorrect, we sometimes access its underlying block store directly, and probably shouldn't).
|
||||||
|
It also holds the crucial `heaviest` pointer, which indicates the current head of the chain.
|
||||||
|
|
||||||
|
#### ChainExchange and ChainBlockservice
|
||||||
|
`ChainExchange()` and `ChainBlockservice()` establish a BitSwap connection (FIXME libp2p link)
|
||||||
|
to exchange chain information in the form of `blocks.Block`s stored in the repo. (See sync section for more details, the Filecoin blocks and messages are backed by these raw IPFS blocks that together form the different structures that define the state of the current/heaviest chain.)
|
||||||
|
|
||||||
|
#### Incoming handlers
|
||||||
|
`HandleIncomingBlocks()` and `HandleIncomingMessages()` start the services in charge of processing new Filecoin blocks
|
||||||
|
and messages from the network (see `<undefined>` for more information about the topics the node is subscribed to, FIXME: should that be part of the libp2p section or should we expand on gossipsub separately?).
|
||||||
|
|
||||||
|
#### Hello
|
||||||
|
`RunHello()`: starts the services to both send (`(*Service).SayHello()`) and receive (`(*Service).HandleStream()`, `node/hello/hello.go`)
|
||||||
|
`hello` messages. When nodes establish a new connection with each other, they exchange these messages
|
||||||
|
to share chain-related information (namely their genesis block and their heaviest tipset).
|
||||||
|
|
||||||
|
#### Syncer
|
||||||
|
`NewSyncer()` creates the `Syncer` structure and starts the services related to the chain sync process (FIXME link).
|
||||||
|
|
||||||
|
### Ordering the dependencies
|
||||||
|
|
||||||
|
We can establish the dependency relations by looking at the parameters that each function needs and by understanding
|
||||||
|
the architecture of the node and how the different components relate to each other (the chief purpose of this document).
|
||||||
|
|
||||||
|
As an example, the sync mechanism depends on the node being able to exchange different IPFS blocks with the network,
|
||||||
|
so as to be able to request the "missing pieces" needed to construct the chain. This dependency is reflected by `NewSyncer()`
|
||||||
|
having a `blocksync.BlockSync` parameter, which in turn depends on `ChainBlockservice()` and `ChainExchange()`.
|
||||||
|
The chain exchange service further depends on the chain store to save and retrieve chain data, which is reflected
|
||||||
|
in `ChainExchange()` having `ChainGCBlockstore` as a parameter (which is just a wrapper around `ChainBlockstore` capable
|
||||||
|
of garbage collection).
|
||||||
|
|
||||||
|
This block store is the same store underlying the chain store, which is an indirect dependency of `NewSyncer()` (through the `StateManager`).
|
||||||
|
(FIXME: This last line is flaky, we need to resolve the hierarchy better, we sometimes refer to the chain store and sometimes to its underlying block store. We need a diagram to visualize all the different components just mentioned otherwise it is too hard to follow. We probably even need to skip some of the connections mentioned.)
|
153
documentation/en/dev/WIP-arch-complementary-notes.md
Normal file
153
documentation/en/dev/WIP-arch-complementary-notes.md
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
# Genesis block
|
||||||
|
|
||||||
|
Seems a good way to start exploring the VM state though the instantiation of its different actors like the storage power.
|
||||||
|
|
||||||
|
Explain where do we load the genesis block, the CAR entries, and we set the root of the state. Follow the daemon command option, `chain.LoadGenesis()` saves all the blocks of the CAR file into the store provided by `ChainBlockstore` (this should already be explained in the previous section). The CAR root (MT root?) of those blocks is decoded into the `BlockHeader` that will be the Filecoin (genesis) block of the chain, but most of the information was stored in the raw data (non-Filecoin, what's the correct term?) blocks forwarded directly to the chain, the block header just has a pointer to it.
|
||||||
|
|
||||||
|
`SetGenesis` block with name 0. `(ChainStore).SetGenesis()` stores it there.
|
||||||
|
|
||||||
|
`MakeInitialStateTree` (`chain/gen/genesis/genesis.go`, used to construct the genesis block (`MakeGenesisBlock()`), constructs the state tree (`NewStateTree`) which is just a "pointer" (root node in the HAMT) to the different actors. It will be continuously used in `(*StateTree).SetActor()` an `types.Actor` structure under a certain `Address` (in the HAMT). (How does the `stateSnaps` work? It has no comments.)
|
||||||
|
|
||||||
|
From this point we can follow different setup function like:
|
||||||
|
|
||||||
|
* `SetupInitActor()`: see the `AddressMap`.
|
||||||
|
|
||||||
|
* `SetupStoragePowerActor`: initial (zero) power state of the chain, most important attributes.
|
||||||
|
|
||||||
|
* Account actors in the `template.Accounts`: `SetActor`.
|
||||||
|
|
||||||
|
Which other actor type could be helpful at this point?
|
||||||
|
|
||||||
|
# Basic concepts
|
||||||
|
|
||||||
|
What should be clear at this point either from this document or the spec.
|
||||||
|
|
||||||
|
## Addresses
|
||||||
|
|
||||||
|
## Accounts
|
||||||
|
|
||||||
|
# Sync Topics PubSub
|
||||||
|
|
||||||
|
Gossip sub spec and some introduction.
|
||||||
|
|
||||||
|
# Look at the constructor of a miner
|
||||||
|
|
||||||
|
Follow the `lotus-storage-miner` command to see how a miner is created, from the command to the message to the storage power logic.
|
||||||
|
|
||||||
|
# Directory structure so far, main structures seen, their relation
|
||||||
|
|
||||||
|
List what are the main directories we should be looking at (e.g., `chain/`) and the most important structures (e.g., `StateTree`, `Runtime`, etc.)
|
||||||
|
|
||||||
|
# Tests
|
||||||
|
|
||||||
|
Run a few messages and observe state changes. What is the easiest test that also let's us "interact" with it (modify something and observe the difference).
|
||||||
|
|
||||||
|
### Filecoin blocks vs IPFS blocks
|
||||||
|
|
||||||
|
The term *block* has different meanings depending on the context, many times both meanings coexist at once in the code and it is important to distinguish them. (FIXME: link to IPFS blocks and related doc throughout this explanation). In terms of the lower IPFS layer, in charge of storing and retrieving data, both present at the repo or accessible through the network (e.g., through the BitSwap protocol discussed later), a block is a string of raw bytes identified by its hash, embedded and fully qualified in a CID identifier. IPFS blocks are the "building blocks" of almost any other piece of (chain) data described in the Filecoin protocol.
|
||||||
|
|
||||||
|
In contrast, in the higher Filecoin (application) layer, a block is roughly (FIXME: link to spec definition, if we have any) a set of zero or more messages grouped together by a single miner which is itself grouped with other blocks (from other miners) in the same round to form a tipset. The Filecoin blockchain is a series of "chained" tipsets, each referencing its parent by its header's *CID*, that is, its header as seen as a single IPFS block, this is where both layers interact.
|
||||||
|
|
||||||
|
Using now the full Go package qualifiers to avoid any ambiguity, the Filecoin block, `github.com/filecoin-project/lotus/chain/types.FullBlock`, is defined as,
|
||||||
|
|
||||||
|
```Go
|
||||||
|
package types
|
||||||
|
|
||||||
|
import "github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
type FullBlock struct {
|
||||||
|
Header *BlockHeader
|
||||||
|
BlsMessages []*Message
|
||||||
|
SecpkMessages []*SignedMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fb *FullBlock) Cid() cid.Cid {
|
||||||
|
return fb.Header.Cid()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
It has, besides the Filecoin messages, a header with protocol related information (e.g., its `Height`) which is (like virtually any other piece of data in the Filecoin protocol) stored, retrieved and shared as an IPFS block with its corresponding CID,
|
||||||
|
|
||||||
|
```Go
|
||||||
|
func (b *BlockHeader) Cid() cid.Cid {
|
||||||
|
sb, err := b.ToStorageBlock()
|
||||||
|
|
||||||
|
return sb.Cid()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BlockHeader) ToStorageBlock() (block.Block, error) {
|
||||||
|
data, err := b.Serialize()
|
||||||
|
|
||||||
|
return github.com/ipfs/go-block-format.block.NewBlockWithCid(data)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
These edited extracts from the `BlockHeader` show how it's treated as an IPFS block, `github.com/ipfs/go-block-format.block.BasicBlock`, to be both stored and referenced by its block storage CID.
|
||||||
|
|
||||||
|
This duality permeates the code (and the Filecoin spec for that matter) but it is usually clear within the context to which block we are referring to. Normally the unqualified *block* is reserved for the Filecoin block and we won't usually refer to the IPFS one but only implicitly through the concept of its CID. With enough understanding of both stack's architecture the two definitions can coexist without much confusion as we will abstract away the IPFS layer and just use the CID as an identifier that we now its unique for two sequences of different *raw* byte strings.
|
||||||
|
|
||||||
|
(FIXME: We use to do this presentation when talking about `gossipsub` topics and incoming blocks, and had to deal with, besides the block ambiguity, a similar confusion with the *message* term, used in libp2p to name anything that comes through the network, needing to present the extremely confusing hierarchy of a libp2p message containing a Filecoin block, identified by a IPFS block CID, containing Filecoin messages.)
|
||||||
|
|
||||||
|
FIXME: Move the following tipset definition to sync or wherever is most needed, to avoid making this more confusing.
|
||||||
|
|
||||||
|
Messages from the same round are collected into a block set (`chain/store/fts.go`):
|
||||||
|
|
||||||
|
```Go
|
||||||
|
type FullTipSet struct {
|
||||||
|
Blocks []*types.FullBlock
|
||||||
|
tipset *types.TipSet
|
||||||
|
cids []cid.Cid
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The "tipset" denomination might be a bit misleading as it doesn't refer *only* to the tip, the block set from the last round in the chain, but to *any* set of blocks, depending on the context the tipset is the actual tip or not. From its own perspective any block set is always the tip because it assumes nothing from following blocks.
|
||||||
|
|
||||||
|
# CLI, API
|
||||||
|
|
||||||
|
Explain how do we communicate with the node, both in terms of the CLI and the programmatic way (to create our own tools).
|
||||||
|
|
||||||
|
## Client/server architecture
|
||||||
|
|
||||||
|
In terms of the Filecoin network the node is a peer on a distributed hierarchy, but in terms of how we interact with the node we have client/server architecture.
|
||||||
|
|
||||||
|
The node itself was initiated with the `daemon` command, it already started syncing to the chain by default. Along with that service it also started a [JSON-RPC](https://en.wikipedia.org/wiki/JSON-RPC) server to allow a client to interact with it. (FIXME: Check if this client is local or can be remote, link to external documentation of connection API.)
|
||||||
|
|
||||||
|
We can connect to this server through the Lotus CLI. Virtually any other command other than `daemon` will run a client that will connect (by default) to the address specified in the `api` file in the repo associated with the node (by default in `~/.lotus`), e.g.,
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cat ~/.lotus/api && echo
|
||||||
|
# /ip4/127.0.0.1/tcp/1234/http
|
||||||
|
|
||||||
|
# With `lotus daemon` running in another terminal.
|
||||||
|
nc -v -z 127.0.0.1 1234
|
||||||
|
|
||||||
|
# Start daemon and turn off the logs to not clutter the command line.
|
||||||
|
bash -c "lotus daemon &" &&
|
||||||
|
lotus wait-api &&
|
||||||
|
lotus log set-level error # Or a env.var in the daemon command.
|
||||||
|
|
||||||
|
nc -v -z 127.0.0.1 1234
|
||||||
|
# Connection to 127.0.0.1 1234 port [tcp/*] succeeded!
|
||||||
|
|
||||||
|
killall lotus
|
||||||
|
# FIXME: We need a lotus stop command:
|
||||||
|
# https://github.com/filecoin-project/lotus/issues/1827
|
||||||
|
```
|
||||||
|
|
||||||
|
FIXME: Link to more in-depth documentation of the CLI architecture, maybe some IPFS documentation (since they share some common logic).
|
||||||
|
|
||||||
|
## Node API
|
||||||
|
|
||||||
|
The JSON-RPC server exposes the node API, the `FullNode` interface (defined in `api/api_full.go`). When we issue a command like `lotus sync status` to query the progress of the node sync we don't access the node's internals, those are decoupled in a separate daemon process, we call the `SyncState` function (of the `FullNode` API interface) through the RPC client started by our own command (see `NewFullNodeRPC` in `api/client/client.go` for more details).
|
||||||
|
|
||||||
|
FIXME: Link to (and create) documentation about API fulfillment.
|
||||||
|
|
||||||
|
Because we rely heavily on reflection for this part of the code the call chain is not easily visible by just following the references through the symbolic analysis of the IDE. If we start by the `lotus sync` command definition (in `cli/sync.go`), we eventually end up in the method interface `SyncState`, and when we look for its implementation we will find two functions:
|
||||||
|
|
||||||
|
* `(*SyncAPI).SyncState()` (in `node/impl/full/sync.go`): this is the actual implementation of the API function that shows what the node (here acting as the RPC server) will execute when it receives the RPC request issued from the CLI acting as the client.
|
||||||
|
|
||||||
|
* `(*FullNodeStruct).SyncState()`: this is an "empty placeholder" structure that will get later connected to the JSON-RPC client logic (see `NewMergeClient` in `lib/jsonrpc/client.go`, which is called by `NewFullNodeRPC`). (FIXME: check if this is accurate). The CLI (JSON-RPC client) will actually execute this function which will connect to the server and send the corresponding JSON request that will trigger the call of `(*SyncAPI).SyncState()` with the node implementation.
|
||||||
|
|
||||||
|
This means that when we are tracking the logic of a CLI command we will eventually find this bifurcation and need to study the code of the server-side implementation in `node/impl/full` (mostly in the `common/` and `full/` directories). If we understand this architecture going directly to that part of the code abstracts away the JSON-RPC client/server logic and we can think that the CLI is actually running the node's logic.
|
||||||
|
|
||||||
|
FIXME: Explain that "*the* node" is actually an API structure like `impl.FullNodeAPI` with the different API subcomponents like `full.SyncAPI`. We won't see a *single* node structure, each API (full node, minder, etc) will gather the necessary subcomponents it needs to service its calls.
|
@ -28,6 +28,8 @@ lotus net peers | wc -l
|
|||||||
|
|
||||||
In order to connect to the network, you need to be connected to at least 1 peer. If you’re seeing 0 peers, read our [troubleshooting notes](https://docs.lotu.sh/en+setup-troubleshooting).
|
In order to connect to the network, you need to be connected to at least 1 peer. If you’re seeing 0 peers, read our [troubleshooting notes](https://docs.lotu.sh/en+setup-troubleshooting).
|
||||||
|
|
||||||
|
Make sure that you have a reasonable "open files limit" set on your machine, such as 10000. If you're seeing a lower value, such as 256 (default on macOS), read our [troubleshooting notes](https://docs.lotu.sh/en+setup-troubleshooting) on how to update it prior to starting the Lotus daemon.
|
||||||
|
|
||||||
## Chain sync
|
## Chain sync
|
||||||
|
|
||||||
While the daemon is running, the next requirement is to sync the chain. Run the command below to view the chain sync progress. To see current chain height, visit the [network stats page](https://stats.testnet.filecoin.io/).
|
While the daemon is running, the next requirement is to sync the chain. Run the command below to view the chain sync progress. To see current chain height, visit the [network stats page](https://stats.testnet.filecoin.io/).
|
||||||
|
@ -29,4 +29,18 @@ ERROR hello hello/hello.go:81 other peer has different genesis!
|
|||||||
- repo is already locked
|
- repo is already locked
|
||||||
```
|
```
|
||||||
|
|
||||||
- You already have another lotus daemon running.
|
- You already have another lotus daemon running.
|
||||||
|
|
||||||
|
## Config: Open files limit
|
||||||
|
|
||||||
|
On most systems you can check the open files limit with:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ulimit -n
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also modify this number by using the `ulimit` command. It gives you the ability to control the resources available for the shell or process started by it. If the number is below 10000, you can change it with the following command prior to starting the Lotus daemon:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ulimit -n 10000
|
||||||
|
```
|
||||||
|
@ -22,16 +22,17 @@ var (
|
|||||||
|
|
||||||
// Measures
|
// Measures
|
||||||
var (
|
var (
|
||||||
LotusInfo = stats.Int64("info", "Arbitrary counter to tag lotus info to", stats.UnitDimensionless)
|
LotusInfo = stats.Int64("info", "Arbitrary counter to tag lotus info to", stats.UnitDimensionless)
|
||||||
ChainNodeHeight = stats.Int64("chain/node_height", "Current Height of the node", stats.UnitDimensionless)
|
ChainNodeHeight = stats.Int64("chain/node_height", "Current Height of the node", stats.UnitDimensionless)
|
||||||
ChainNodeWorkerHeight = stats.Int64("chain/node_worker_height", "Current Height of workers on the node", stats.UnitDimensionless)
|
ChainNodeWorkerHeight = stats.Int64("chain/node_worker_height", "Current Height of workers on the node", stats.UnitDimensionless)
|
||||||
MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless)
|
MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless)
|
||||||
MessageValidationFailure = stats.Int64("message/failure", "Counter for message validation failures", stats.UnitDimensionless)
|
MessageValidationFailure = stats.Int64("message/failure", "Counter for message validation failures", stats.UnitDimensionless)
|
||||||
MessageValidationSuccess = stats.Int64("message/success", "Counter for message validation successes", stats.UnitDimensionless)
|
MessageValidationSuccess = stats.Int64("message/success", "Counter for message validation successes", stats.UnitDimensionless)
|
||||||
BlockReceived = stats.Int64("block/received", "Counter for total received blocks", stats.UnitDimensionless)
|
BlockReceived = stats.Int64("block/received", "Counter for total received blocks", stats.UnitDimensionless)
|
||||||
BlockValidationFailure = stats.Int64("block/failure", "Counter for block validation failures", stats.UnitDimensionless)
|
BlockValidationFailure = stats.Int64("block/failure", "Counter for block validation failures", stats.UnitDimensionless)
|
||||||
BlockValidationSuccess = stats.Int64("block/success", "Counter for block validation successes", stats.UnitDimensionless)
|
BlockValidationSuccess = stats.Int64("block/success", "Counter for block validation successes", stats.UnitDimensionless)
|
||||||
PeerCount = stats.Int64("peer/count", "Current number of FIL peers", stats.UnitDimensionless)
|
BlockValidationDurationMilliseconds = stats.Float64("block/validation_ms", "Duration for Block Validation in ms", stats.UnitMilliseconds)
|
||||||
|
PeerCount = stats.Int64("peer/count", "Current number of FIL peers", stats.UnitDimensionless)
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -63,6 +64,10 @@ var (
|
|||||||
Measure: BlockValidationSuccess,
|
Measure: BlockValidationSuccess,
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
|
BlockValidationDurationView = &view.View{
|
||||||
|
Measure: BlockValidationDurationMilliseconds,
|
||||||
|
Aggregation: view.Sum(),
|
||||||
|
}
|
||||||
MessageReceivedView = &view.View{
|
MessageReceivedView = &view.View{
|
||||||
Measure: MessageReceived,
|
Measure: MessageReceived,
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
@ -90,6 +95,7 @@ var DefaultViews = append([]*view.View{
|
|||||||
BlockReceivedView,
|
BlockReceivedView,
|
||||||
BlockValidationFailureView,
|
BlockValidationFailureView,
|
||||||
BlockValidationSuccessView,
|
BlockValidationSuccessView,
|
||||||
|
BlockValidationDurationView,
|
||||||
MessageReceivedView,
|
MessageReceivedView,
|
||||||
MessageValidationFailureView,
|
MessageValidationFailureView,
|
||||||
MessageValidationSuccessView,
|
MessageValidationSuccessView,
|
||||||
|
Loading…
Reference in New Issue
Block a user