Merge remote-tracking branch 'origin/master' into feat/async-restartable-workers
This commit is contained in:
commit
03cf6cca40
1
.gitignore
vendored
1
.gitignore
vendored
@ -10,6 +10,7 @@
|
|||||||
/lotus-fountain
|
/lotus-fountain
|
||||||
/lotus-stats
|
/lotus-stats
|
||||||
/lotus-bench
|
/lotus-bench
|
||||||
|
/lotus-gateway
|
||||||
/bench.json
|
/bench.json
|
||||||
/lotuspond/front/node_modules
|
/lotuspond/front/node_modules
|
||||||
/lotuspond/front/build
|
/lotuspond/front/build
|
||||||
|
6
Makefile
6
Makefile
@ -92,6 +92,12 @@ lotus-shed: $(BUILD_DEPS)
|
|||||||
.PHONY: lotus-shed
|
.PHONY: lotus-shed
|
||||||
BINS+=lotus-shed
|
BINS+=lotus-shed
|
||||||
|
|
||||||
|
lotus-gateway: $(BUILD_DEPS)
|
||||||
|
rm -f lotus-gateway
|
||||||
|
go build $(GOFLAGS) -o lotus-gateway ./cmd/lotus-gateway
|
||||||
|
.PHONY: lotus-gateway
|
||||||
|
BINS+=lotus-gateway
|
||||||
|
|
||||||
build: lotus lotus-miner lotus-worker
|
build: lotus lotus-miner lotus-worker
|
||||||
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
||||||
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
||||||
|
@ -452,8 +452,8 @@ type FullNode interface {
|
|||||||
|
|
||||||
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
|
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
|
||||||
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
|
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
|
||||||
PaychAvailableFunds(ch address.Address) (*ChannelAvailableFunds, error)
|
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error)
|
||||||
PaychAvailableFundsByFromTo(from, to address.Address) (*ChannelAvailableFunds, error)
|
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error)
|
||||||
PaychList(context.Context) ([]address.Address, error)
|
PaychList(context.Context) ([]address.Address, error)
|
||||||
PaychStatus(context.Context, address.Address) (*PaychStatus, error)
|
PaychStatus(context.Context, address.Address) (*PaychStatus, error)
|
||||||
PaychSettle(context.Context, address.Address) (cid.Cid, error)
|
PaychSettle(context.Context, address.Address) (cid.Cid, error)
|
||||||
|
@ -219,8 +219,8 @@ type FullNodeStruct struct {
|
|||||||
|
|
||||||
PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
|
PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
|
||||||
PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"`
|
PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"`
|
||||||
PaychAvailableFunds func(address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
PaychAvailableFunds func(context.Context, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||||
PaychAvailableFundsByFromTo func(address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
PaychAvailableFundsByFromTo func(context.Context, address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||||
PaychList func(context.Context) ([]address.Address, error) `perm:"read"`
|
PaychList func(context.Context) ([]address.Address, error) `perm:"read"`
|
||||||
PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"`
|
PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"`
|
||||||
PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
|
PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||||
@ -955,12 +955,12 @@ func (c *FullNodeStruct) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid
|
|||||||
return c.Internal.PaychGetWaitReady(ctx, sentinel)
|
return c.Internal.PaychGetWaitReady(ctx, sentinel)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) PaychAvailableFunds(ch address.Address) (*api.ChannelAvailableFunds, error) {
|
func (c *FullNodeStruct) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) {
|
||||||
return c.Internal.PaychAvailableFunds(ch)
|
return c.Internal.PaychAvailableFunds(ctx, ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) PaychAvailableFundsByFromTo(from, to address.Address) (*api.ChannelAvailableFunds, error) {
|
func (c *FullNodeStruct) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) {
|
||||||
return c.Internal.PaychAvailableFundsByFromTo(from, to)
|
return c.Internal.PaychAvailableFundsByFromTo(ctx, from, to)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) {
|
func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) {
|
||||||
|
@ -27,7 +27,6 @@ type heightEvents struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
|
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
|
||||||
|
|
||||||
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
|
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
|
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
|
||||||
@ -150,7 +149,6 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
|
|||||||
//
|
//
|
||||||
// ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null
|
// ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null
|
||||||
func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
|
func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
|
||||||
|
|
||||||
e.lk.Lock() // Tricky locking, check your locks if you modify this function!
|
e.lk.Lock() // Tricky locking, check your locks if you modify this function!
|
||||||
|
|
||||||
best, err := e.tsc.best()
|
best, err := e.tsc.best()
|
||||||
|
@ -3,6 +3,7 @@ package state
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
typegen "github.com/whyrusleeping/cbor-gen"
|
typegen "github.com/whyrusleeping/cbor-gen"
|
||||||
)
|
)
|
||||||
@ -69,7 +70,7 @@ func DiffAdtArray(preArr, curArr *adt.Array, out AdtArrayDiff) error {
|
|||||||
// Modify should be called when a value is modified in the map
|
// Modify should be called when a value is modified in the map
|
||||||
// Remove should be called when a value is removed from the map
|
// Remove should be called when a value is removed from the map
|
||||||
type AdtMapDiff interface {
|
type AdtMapDiff interface {
|
||||||
AsKey(key string) (adt.Keyer, error)
|
AsKey(key string) (abi.Keyer, error)
|
||||||
Add(key string, val *typegen.Deferred) error
|
Add(key string, val *typegen.Deferred) error
|
||||||
Modify(key string, from, to *typegen.Deferred) error
|
Modify(key string, from, to *typegen.Deferred) error
|
||||||
Remove(key string, val *typegen.Deferred) error
|
Remove(key string, val *typegen.Deferred) error
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
cbornode "github.com/ipfs/go-ipld-cbor"
|
cbornode "github.com/ipfs/go-ipld-cbor"
|
||||||
typegen "github.com/whyrusleeping/cbor-gen"
|
typegen "github.com/whyrusleeping/cbor-gen"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
|
|
||||||
@ -78,21 +79,21 @@ func TestDiffAdtMap(t *testing.T) {
|
|||||||
mapA := adt.MakeEmptyMap(ctxstoreA)
|
mapA := adt.MakeEmptyMap(ctxstoreA)
|
||||||
mapB := adt.MakeEmptyMap(ctxstoreB)
|
mapB := adt.MakeEmptyMap(ctxstoreB)
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete
|
require.NoError(t, mapA.Put(abi.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify
|
require.NoError(t, mapA.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(1), runtime.CBORBytes([]byte{1})))
|
require.NoError(t, mapB.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{1})))
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete
|
require.NoError(t, mapA.Put(abi.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop
|
require.NoError(t, mapA.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(3), runtime.CBORBytes([]byte{0})))
|
require.NoError(t, mapB.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0})))
|
||||||
|
|
||||||
require.NoError(t, mapA.Put(adt.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify
|
require.NoError(t, mapA.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(4), runtime.CBORBytes([]byte{6})))
|
require.NoError(t, mapB.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{6})))
|
||||||
|
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(5), runtime.CBORBytes{8})) // add
|
require.NoError(t, mapB.Put(abi.UIntKey(5), runtime.CBORBytes{8})) // add
|
||||||
require.NoError(t, mapB.Put(adt.UIntKey(6), runtime.CBORBytes{9})) // add
|
require.NoError(t, mapB.Put(abi.UIntKey(6), runtime.CBORBytes{9})) // add
|
||||||
|
|
||||||
changes := new(TestDiffMap)
|
changes := new(TestDiffMap)
|
||||||
|
|
||||||
@ -134,12 +135,12 @@ type TestDiffMap struct {
|
|||||||
|
|
||||||
var _ AdtMapDiff = &TestDiffMap{}
|
var _ AdtMapDiff = &TestDiffMap{}
|
||||||
|
|
||||||
func (t *TestDiffMap) AsKey(key string) (adt.Keyer, error) {
|
func (t *TestDiffMap) AsKey(key string) (abi.Keyer, error) {
|
||||||
k, err := adt.ParseUIntKey(key)
|
k, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return adt.UIntKey(k), nil
|
return abi.UIntKey(k), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
|
func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
|
||||||
@ -148,7 +149,7 @@ func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
k, err := adt.ParseUIntKey(key)
|
k, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -172,7 +173,7 @@ func (t *TestDiffMap) Modify(key string, from, to *typegen.Deferred) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
k, err := adt.ParseUIntKey(key)
|
k, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -198,7 +199,7 @@ func (t *TestDiffMap) Remove(key string, val *typegen.Deferred) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
k, err := adt.ParseUIntKey(key)
|
k, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -537,8 +537,8 @@ type MinerPreCommitChanges struct {
|
|||||||
Removed []miner.SectorPreCommitOnChainInfo
|
Removed []miner.SectorPreCommitOnChainInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MinerPreCommitChanges) AsKey(key string) (adt.Keyer, error) {
|
func (m *MinerPreCommitChanges) AsKey(key string) (abi.Keyer, error) {
|
||||||
sector, err := adt.ParseUIntKey(key)
|
sector, err := abi.ParseUIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -662,12 +662,12 @@ type AddressChange struct {
|
|||||||
|
|
||||||
type DiffInitActorStateFunc func(ctx context.Context, oldState *init_.State, newState *init_.State) (changed bool, user UserData, err error)
|
type DiffInitActorStateFunc func(ctx context.Context, oldState *init_.State, newState *init_.State) (changed bool, user UserData, err error)
|
||||||
|
|
||||||
func (i *InitActorAddressChanges) AsKey(key string) (adt.Keyer, error) {
|
func (i *InitActorAddressChanges) AsKey(key string) (abi.Keyer, error) {
|
||||||
addr, err := address.NewFromBytes([]byte(key))
|
addr, err := address.NewFromBytes([]byte(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return adt.AddrKey(addr), nil
|
return abi.AddrKey(addr), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error {
|
func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error {
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
@ -50,7 +51,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
|
|||||||
fmt.Printf("init set %s t0%d\n", e, counter)
|
fmt.Printf("init set %s t0%d\n", e, counter)
|
||||||
|
|
||||||
value := cbg.CborInt(counter)
|
value := cbg.CborInt(counter)
|
||||||
if err := amap.Put(adt.AddrKey(e), &value); err != nil {
|
if err := amap.Put(abi.AddrKey(e), &value); err != nil {
|
||||||
return 0, nil, nil, err
|
return 0, nil, nil, err
|
||||||
}
|
}
|
||||||
counter = counter + 1
|
counter = counter + 1
|
||||||
@ -77,7 +78,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
|
|||||||
fmt.Printf("init set %s t0%d\n", ainfo.Owner, counter)
|
fmt.Printf("init set %s t0%d\n", ainfo.Owner, counter)
|
||||||
|
|
||||||
value := cbg.CborInt(counter)
|
value := cbg.CborInt(counter)
|
||||||
if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil {
|
if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
|
||||||
return 0, nil, nil, err
|
return 0, nil, nil, err
|
||||||
}
|
}
|
||||||
counter = counter + 1
|
counter = counter + 1
|
||||||
@ -95,7 +96,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
|
|||||||
return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
|
return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
|
||||||
}
|
}
|
||||||
value := cbg.CborInt(80)
|
value := cbg.CborInt(80)
|
||||||
if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil {
|
if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
|
||||||
return 0, nil, nil, err
|
return 0, nil, nil, err
|
||||||
}
|
}
|
||||||
} else if rootVerifier.Type == genesis.TMultisig {
|
} else if rootVerifier.Type == genesis.TMultisig {
|
||||||
@ -110,7 +111,7 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
|
|||||||
fmt.Printf("init set %s t0%d\n", e, counter)
|
fmt.Printf("init set %s t0%d\n", e, counter)
|
||||||
|
|
||||||
value := cbg.CborInt(counter)
|
value := cbg.CborInt(counter)
|
||||||
if err := amap.Put(adt.AddrKey(e), &value); err != nil {
|
if err := amap.Put(abi.AddrKey(e), &value); err != nil {
|
||||||
return 0, nil, nil, err
|
return 0, nil, nil, err
|
||||||
}
|
}
|
||||||
counter = counter + 1
|
counter = counter + 1
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/lib/sigs"
|
"github.com/filecoin-project/lotus/lib/sigs"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
|
||||||
@ -51,6 +52,7 @@ var RepublishInterval = time.Duration(10*build.BlockDelaySecs+build.PropagationD
|
|||||||
|
|
||||||
var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee))
|
var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee))
|
||||||
var baseFeeLowerBoundFactor = types.NewInt(10)
|
var baseFeeLowerBoundFactor = types.NewInt(10)
|
||||||
|
var baseFeeLowerBoundFactorConservative = types.NewInt(100)
|
||||||
|
|
||||||
var MaxActorPendingMessages = 1000
|
var MaxActorPendingMessages = 1000
|
||||||
|
|
||||||
@ -83,9 +85,25 @@ const (
|
|||||||
localUpdates = "update"
|
localUpdates = "update"
|
||||||
)
|
)
|
||||||
|
|
||||||
// this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go
|
// Journal event types.
|
||||||
// away in the next fork.
|
const (
|
||||||
var strictBaseFeeValidation = false
|
evtTypeMpoolAdd = iota
|
||||||
|
evtTypeMpoolRemove
|
||||||
|
evtTypeMpoolRepub
|
||||||
|
)
|
||||||
|
|
||||||
|
// MessagePoolEvt is the journal entry for message pool events.
|
||||||
|
type MessagePoolEvt struct {
|
||||||
|
Action string
|
||||||
|
Messages []MessagePoolEvtMessage
|
||||||
|
Error error `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MessagePoolEvtMessage struct {
|
||||||
|
types.Message
|
||||||
|
|
||||||
|
CID cid.Cid
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// if the republish interval is too short compared to the pubsub timecache, adjust it
|
// if the republish interval is too short compared to the pubsub timecache, adjust it
|
||||||
@ -140,6 +158,8 @@ type MessagePool struct {
|
|||||||
netName dtypes.NetworkName
|
netName dtypes.NetworkName
|
||||||
|
|
||||||
sigValCache *lru.TwoQueueCache
|
sigValCache *lru.TwoQueueCache
|
||||||
|
|
||||||
|
evtTypes [3]journal.EventType
|
||||||
}
|
}
|
||||||
|
|
||||||
type msgSet struct {
|
type msgSet struct {
|
||||||
@ -316,6 +336,11 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
|
|||||||
api: api,
|
api: api,
|
||||||
netName: netName,
|
netName: netName,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
|
evtTypes: [...]journal.EventType{
|
||||||
|
evtTypeMpoolAdd: journal.J.RegisterEventType("mpool", "add"),
|
||||||
|
evtTypeMpoolRemove: journal.J.RegisterEventType("mpool", "remove"),
|
||||||
|
evtTypeMpoolRepub: journal.J.RegisterEventType("mpool", "repub"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// enable initial prunes
|
// enable initial prunes
|
||||||
@ -367,10 +392,12 @@ func (mp *MessagePool) runLoop() {
|
|||||||
if err := mp.republishPendingMessages(); err != nil {
|
if err := mp.republishPendingMessages(); err != nil {
|
||||||
log.Errorf("error while republishing messages: %s", err)
|
log.Errorf("error while republishing messages: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-mp.pruneTrigger:
|
case <-mp.pruneTrigger:
|
||||||
if err := mp.pruneExcessMessages(); err != nil {
|
if err := mp.pruneExcessMessages(); err != nil {
|
||||||
log.Errorf("failed to prune excess messages from mempool: %s", err)
|
log.Errorf("failed to prune excess messages from mempool: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-mp.closer:
|
case <-mp.closer:
|
||||||
mp.repubTk.Stop()
|
mp.repubTk.Stop()
|
||||||
return
|
return
|
||||||
@ -414,9 +441,19 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
|
|||||||
// Note that for local messages, we always add them so that they can be accepted and republished
|
// Note that for local messages, we always add them so that they can be accepted and republished
|
||||||
// automatically.
|
// automatically.
|
||||||
publish := local
|
publish := local
|
||||||
if strictBaseFeeValidation && len(curTs.Blocks()) > 0 {
|
|
||||||
baseFee := curTs.Blocks()[0].ParentBaseFee
|
var baseFee big.Int
|
||||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee)
|
if len(curTs.Blocks()) > 0 {
|
||||||
|
baseFee = curTs.Blocks()[0].ParentBaseFee
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
baseFee, err = mp.api.ChainComputeBaseFee(context.TODO(), curTs)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("computing basefee: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactorConservative)
|
||||||
if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
|
if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
|
||||||
if local {
|
if local {
|
||||||
log.Warnf("local message will not be immediately published because GasFeeCap doesn't meet the lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s)",
|
log.Warnf("local message will not be immediately published because GasFeeCap doesn't meet the lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s)",
|
||||||
@ -427,7 +464,6 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
|
|||||||
m.Message.GasFeeCap, baseFeeLowerBound, ErrSoftValidationFailure)
|
m.Message.GasFeeCap, baseFeeLowerBound, ErrSoftValidationFailure)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return publish, nil
|
return publish, nil
|
||||||
}
|
}
|
||||||
@ -700,6 +736,14 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict bool) error {
|
|||||||
Type: api.MpoolAdd,
|
Type: api.MpoolAdd,
|
||||||
Message: m,
|
Message: m,
|
||||||
}, localUpdates)
|
}, localUpdates)
|
||||||
|
|
||||||
|
journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolAdd], func() interface{} {
|
||||||
|
return MessagePoolEvt{
|
||||||
|
Action: "add",
|
||||||
|
Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -862,6 +906,12 @@ func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool)
|
|||||||
Message: m,
|
Message: m,
|
||||||
}, localUpdates)
|
}, localUpdates)
|
||||||
|
|
||||||
|
journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRemove], func() interface{} {
|
||||||
|
return MessagePoolEvt{
|
||||||
|
Action: "remove",
|
||||||
|
Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}}}
|
||||||
|
})
|
||||||
|
|
||||||
mp.currentSize--
|
mp.currentSize--
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1300,8 +1350,8 @@ func (mp *MessagePool) Clear(local bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBaseFeeLowerBound(baseFee types.BigInt) types.BigInt {
|
func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {
|
||||||
baseFeeLowerBound := types.BigDiv(baseFee, baseFeeLowerBoundFactor)
|
baseFeeLowerBound := types.BigDiv(baseFee, factor)
|
||||||
if baseFeeLowerBound.LessThan(minimumBaseFee) {
|
if baseFeeLowerBound.LessThan(minimumBaseFee) {
|
||||||
baseFeeLowerBound = minimumBaseFee
|
baseFeeLowerBound = minimumBaseFee
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("computing basefee: %w", err)
|
return xerrors.Errorf("computing basefee: %w", err)
|
||||||
}
|
}
|
||||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee)
|
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
|
||||||
|
|
||||||
pending, _ := mp.getPendingMessages(ts, ts)
|
pending, _ := mp.getPendingMessages(ts, ts)
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,7 +28,7 @@ func (mp *MessagePool) republishPendingMessages() error {
|
|||||||
mp.curTsLk.Unlock()
|
mp.curTsLk.Unlock()
|
||||||
return xerrors.Errorf("computing basefee: %w", err)
|
return xerrors.Errorf("computing basefee: %w", err)
|
||||||
}
|
}
|
||||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee)
|
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
|
||||||
|
|
||||||
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
|
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
|
||||||
mp.lk.Lock()
|
mp.lk.Lock()
|
||||||
@ -146,6 +147,19 @@ loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(msgs) > 0 {
|
||||||
|
journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRepub], func() interface{} {
|
||||||
|
msgs := make([]MessagePoolEvtMessage, 0, len(msgs))
|
||||||
|
for _, m := range msgs {
|
||||||
|
msgs = append(msgs, MessagePoolEvtMessage{Message: m.Message, CID: m.Cid()})
|
||||||
|
}
|
||||||
|
return MessagePoolEvt{
|
||||||
|
Action: "repub",
|
||||||
|
Messages: msgs,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// track most recently republished messages
|
// track most recently republished messages
|
||||||
republished := make(map[cid.Cid]struct{})
|
republished := make(map[cid.Cid]struct{})
|
||||||
for _, m := range msgs[:count] {
|
for _, m := range msgs[:count] {
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
||||||
|
|
||||||
@ -209,7 +210,7 @@ func (st *StateTree) GetActor(addr address.Address) (*types.Actor, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var act types.Actor
|
var act types.Actor
|
||||||
if found, err := st.root.Get(adt.AddrKey(addr), &act); err != nil {
|
if found, err := st.root.Get(abi.AddrKey(addr), &act); err != nil {
|
||||||
return nil, xerrors.Errorf("hamt find failed: %w", err)
|
return nil, xerrors.Errorf("hamt find failed: %w", err)
|
||||||
} else if !found {
|
} else if !found {
|
||||||
return nil, types.ErrActorNotFound
|
return nil, types.ErrActorNotFound
|
||||||
@ -254,11 +255,11 @@ func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
|
|||||||
|
|
||||||
for addr, sto := range st.snaps.layers[0].actors {
|
for addr, sto := range st.snaps.layers[0].actors {
|
||||||
if sto.Delete {
|
if sto.Delete {
|
||||||
if err := st.root.Delete(adt.AddrKey(addr)); err != nil {
|
if err := st.root.Delete(abi.AddrKey(addr)); err != nil {
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := st.root.Put(adt.AddrKey(addr), &sto.Act); err != nil {
|
if err := st.root.Put(abi.AddrKey(addr), &sto.Act); err != nil {
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,8 +74,8 @@ func (ta *testActor) Exports() []interface{} {
|
|||||||
|
|
||||||
func (ta *testActor) Constructor(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue {
|
func (ta *testActor) Constructor(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue {
|
||||||
rt.ValidateImmediateCallerAcceptAny()
|
rt.ValidateImmediateCallerAcceptAny()
|
||||||
rt.State().Create(&testActorState{11})
|
rt.StateCreate(&testActorState{11})
|
||||||
fmt.Println("NEW ACTOR ADDRESS IS: ", rt.Message().Receiver())
|
fmt.Println("NEW ACTOR ADDRESS IS: ", rt.Receiver())
|
||||||
|
|
||||||
return abi.Empty
|
return abi.Empty
|
||||||
}
|
}
|
||||||
@ -83,7 +83,7 @@ func (ta *testActor) Constructor(rt runtime.Runtime, params *abi.EmptyValue) *ab
|
|||||||
func (ta *testActor) TestMethod(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue {
|
func (ta *testActor) TestMethod(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue {
|
||||||
rt.ValidateImmediateCallerAcceptAny()
|
rt.ValidateImmediateCallerAcceptAny()
|
||||||
var st testActorState
|
var st testActorState
|
||||||
rt.State().Readonly(&st)
|
rt.StateReadonly(&st)
|
||||||
|
|
||||||
if rt.CurrEpoch() > testForkHeight {
|
if rt.CurrEpoch() > testForkHeight {
|
||||||
if st.HasUpgraded != 55 {
|
if st.HasUpgraded != 55 {
|
||||||
|
@ -120,7 +120,7 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres
|
|||||||
}
|
}
|
||||||
|
|
||||||
var claim power.Claim
|
var claim power.Claim
|
||||||
if _, err := cm.Get(adt.AddrKey(maddr), &claim); err != nil {
|
if _, err := cm.Get(abi.AddrKey(maddr), &claim); err != nil {
|
||||||
return power.Claim{}, power.Claim{}, err
|
return power.Claim{}, power.Claim{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -312,7 +312,7 @@ func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, ma
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ok, err := claims.Get(power.AddrKey(maddr), nil)
|
ok, err := claims.Get(abi.AddrKey(maddr), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -72,6 +72,20 @@ func init() {
|
|||||||
// ReorgNotifee represents a callback that gets called upon reorgs.
|
// ReorgNotifee represents a callback that gets called upon reorgs.
|
||||||
type ReorgNotifee func(rev, app []*types.TipSet) error
|
type ReorgNotifee func(rev, app []*types.TipSet) error
|
||||||
|
|
||||||
|
// Journal event types.
|
||||||
|
const (
|
||||||
|
evtTypeHeadChange = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
type HeadChangeEvt struct {
|
||||||
|
From types.TipSetKey
|
||||||
|
FromHeight abi.ChainEpoch
|
||||||
|
To types.TipSetKey
|
||||||
|
ToHeight abi.ChainEpoch
|
||||||
|
RevertCount int
|
||||||
|
ApplyCount int
|
||||||
|
}
|
||||||
|
|
||||||
// ChainStore is the main point of access to chain data.
|
// ChainStore is the main point of access to chain data.
|
||||||
//
|
//
|
||||||
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
|
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
|
||||||
@ -103,6 +117,8 @@ type ChainStore struct {
|
|||||||
tsCache *lru.ARCCache
|
tsCache *lru.ARCCache
|
||||||
|
|
||||||
vmcalls vm.SyscallBuilder
|
vmcalls vm.SyscallBuilder
|
||||||
|
|
||||||
|
evtTypes [1]journal.EventType
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore {
|
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore {
|
||||||
@ -118,6 +134,10 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallB
|
|||||||
vmcalls: vmcalls,
|
vmcalls: vmcalls,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cs.evtTypes = [1]journal.EventType{
|
||||||
|
evtTypeHeadChange: journal.J.RegisterEventType("sync", "head_change"),
|
||||||
|
}
|
||||||
|
|
||||||
ci := NewChainIndex(cs.LoadTipSet)
|
ci := NewChainIndex(cs.LoadTipSet)
|
||||||
|
|
||||||
cs.cindex = ci
|
cs.cindex = ci
|
||||||
@ -344,12 +364,15 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
journal.Add("sync", map[string]interface{}{
|
journal.J.RecordEvent(cs.evtTypes[evtTypeHeadChange], func() interface{} {
|
||||||
"op": "headChange",
|
return HeadChangeEvt{
|
||||||
"from": r.old.Key(),
|
From: r.old.Key(),
|
||||||
"to": r.new.Key(),
|
FromHeight: r.old.Height(),
|
||||||
"rev": len(revert),
|
To: r.new.Key(),
|
||||||
"apply": len(apply),
|
ToHeight: r.new.Height(),
|
||||||
|
RevertCount: len(revert),
|
||||||
|
ApplyCount: len(apply),
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// reverse the apply array
|
// reverse the apply array
|
||||||
|
@ -58,7 +58,14 @@ import (
|
|||||||
// the theoretical max height based on systime are quickly rejected
|
// the theoretical max height based on systime are quickly rejected
|
||||||
const MaxHeightDrift = 5
|
const MaxHeightDrift = 5
|
||||||
|
|
||||||
var defaultMessageFetchWindowSize = 200
|
var (
|
||||||
|
// LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic
|
||||||
|
// where the Syncer publishes candidate chain heads to be synced.
|
||||||
|
LocalIncoming = "incoming"
|
||||||
|
|
||||||
|
log = logging.Logger("chain")
|
||||||
|
defaultMessageFetchWindowSize = 200
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if s := os.Getenv("LOTUS_BSYNC_MSG_WINDOW"); s != "" {
|
if s := os.Getenv("LOTUS_BSYNC_MSG_WINDOW"); s != "" {
|
||||||
@ -71,10 +78,6 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var log = logging.Logger("chain")
|
|
||||||
|
|
||||||
var LocalIncoming = "incoming"
|
|
||||||
|
|
||||||
// Syncer is in charge of running the chain synchronization logic. As such, it
|
// Syncer is in charge of running the chain synchronization logic. As such, it
|
||||||
// is tasked with these functions, amongst others:
|
// is tasked with these functions, amongst others:
|
||||||
//
|
//
|
||||||
@ -119,7 +122,7 @@ type Syncer struct {
|
|||||||
|
|
||||||
self peer.ID
|
self peer.ID
|
||||||
|
|
||||||
syncmgr *SyncManager
|
syncmgr SyncManager
|
||||||
|
|
||||||
connmgr connmgr.ConnManager
|
connmgr connmgr.ConnManager
|
||||||
|
|
||||||
@ -140,8 +143,10 @@ type Syncer struct {
|
|||||||
ds dtypes.MetadataDS
|
ds dtypes.MetadataDS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SyncManagerCtor func(syncFn SyncFunc) SyncManager
|
||||||
|
|
||||||
// NewSyncer creates a new Syncer object.
|
// NewSyncer creates a new Syncer object.
|
||||||
func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
|
func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
|
||||||
gen, err := sm.ChainStore().GetGenesis()
|
gen, err := sm.ChainStore().GetGenesis()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting genesis block: %w", err)
|
return nil, xerrors.Errorf("getting genesis block: %w", err)
|
||||||
@ -181,7 +186,7 @@ func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.C
|
|||||||
log.Warn("*********************************************************************************************")
|
log.Warn("*********************************************************************************************")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.syncmgr = NewSyncManager(s.Sync)
|
s.syncmgr = syncMgrCtor(s.Sync)
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -648,7 +653,7 @@ func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, b
|
|||||||
}
|
}
|
||||||
|
|
||||||
var claim power.Claim
|
var claim power.Claim
|
||||||
exist, err := cm.Get(adt.AddrKey(maddr), &claim)
|
exist, err := cm.Get(abi.AddrKey(maddr), &claim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1665,11 +1670,7 @@ func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []b
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) State() []SyncerState {
|
func (syncer *Syncer) State() []SyncerState {
|
||||||
var out []SyncerState
|
return syncer.syncmgr.State()
|
||||||
for _, ss := range syncer.syncmgr.syncStates {
|
|
||||||
out = append(out, ss.Snapshot())
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarkBad manually adds a block to the "bad blocks" cache.
|
// MarkBad manually adds a block to the "bad blocks" cache.
|
||||||
|
@ -20,7 +20,28 @@ const (
|
|||||||
|
|
||||||
type SyncFunc func(context.Context, *types.TipSet) error
|
type SyncFunc func(context.Context, *types.TipSet) error
|
||||||
|
|
||||||
type SyncManager struct {
|
// SyncManager manages the chain synchronization process, both at bootstrap time
|
||||||
|
// and during ongoing operation.
|
||||||
|
//
|
||||||
|
// It receives candidate chain heads in the form of tipsets from peers,
|
||||||
|
// and schedules them onto sync workers, deduplicating processing for
|
||||||
|
// already-active syncs.
|
||||||
|
type SyncManager interface {
|
||||||
|
// Start starts the SyncManager.
|
||||||
|
Start()
|
||||||
|
|
||||||
|
// Stop stops the SyncManager.
|
||||||
|
Stop()
|
||||||
|
|
||||||
|
// SetPeerHead informs the SyncManager that the supplied peer reported the
|
||||||
|
// supplied tipset.
|
||||||
|
SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet)
|
||||||
|
|
||||||
|
// State retrieves the state of the sync workers.
|
||||||
|
State() []SyncerState
|
||||||
|
}
|
||||||
|
|
||||||
|
type syncManager struct {
|
||||||
lk sync.Mutex
|
lk sync.Mutex
|
||||||
peerHeads map[peer.ID]*types.TipSet
|
peerHeads map[peer.ID]*types.TipSet
|
||||||
|
|
||||||
@ -48,6 +69,8 @@ type SyncManager struct {
|
|||||||
workerChan chan *types.TipSet
|
workerChan chan *types.TipSet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ SyncManager = (*syncManager)(nil)
|
||||||
|
|
||||||
type syncResult struct {
|
type syncResult struct {
|
||||||
ts *types.TipSet
|
ts *types.TipSet
|
||||||
success bool
|
success bool
|
||||||
@ -55,8 +78,8 @@ type syncResult struct {
|
|||||||
|
|
||||||
const syncWorkerCount = 3
|
const syncWorkerCount = 3
|
||||||
|
|
||||||
func NewSyncManager(sync SyncFunc) *SyncManager {
|
func NewSyncManager(sync SyncFunc) SyncManager {
|
||||||
return &SyncManager{
|
return &syncManager{
|
||||||
bspThresh: 1,
|
bspThresh: 1,
|
||||||
peerHeads: make(map[peer.ID]*types.TipSet),
|
peerHeads: make(map[peer.ID]*types.TipSet),
|
||||||
syncTargets: make(chan *types.TipSet),
|
syncTargets: make(chan *types.TipSet),
|
||||||
@ -69,18 +92,18 @@ func NewSyncManager(sync SyncFunc) *SyncManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) Start() {
|
func (sm *syncManager) Start() {
|
||||||
go sm.syncScheduler()
|
go sm.syncScheduler()
|
||||||
for i := 0; i < syncWorkerCount; i++ {
|
for i := 0; i < syncWorkerCount; i++ {
|
||||||
go sm.syncWorker(i)
|
go sm.syncWorker(i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) Stop() {
|
func (sm *syncManager) Stop() {
|
||||||
close(sm.stop)
|
close(sm.stop)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
|
func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
|
||||||
sm.lk.Lock()
|
sm.lk.Lock()
|
||||||
defer sm.lk.Unlock()
|
defer sm.lk.Unlock()
|
||||||
sm.peerHeads[p] = ts
|
sm.peerHeads[p] = ts
|
||||||
@ -105,6 +128,14 @@ func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.Tip
|
|||||||
sm.incomingTipSets <- ts
|
sm.incomingTipSets <- ts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sm *syncManager) State() []SyncerState {
|
||||||
|
ret := make([]SyncerState, 0, len(sm.syncStates))
|
||||||
|
for _, s := range sm.syncStates {
|
||||||
|
ret = append(ret, s.Snapshot())
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
type syncBucketSet struct {
|
type syncBucketSet struct {
|
||||||
buckets []*syncTargetBucket
|
buckets []*syncTargetBucket
|
||||||
}
|
}
|
||||||
@ -234,7 +265,7 @@ func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet {
|
|||||||
return best
|
return best
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) {
|
func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) {
|
||||||
var buckets syncBucketSet
|
var buckets syncBucketSet
|
||||||
|
|
||||||
var peerHeads []*types.TipSet
|
var peerHeads []*types.TipSet
|
||||||
@ -258,7 +289,7 @@ func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) {
|
|||||||
return buckets.Heaviest(), nil
|
return buckets.Heaviest(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) syncScheduler() {
|
func (sm *syncManager) syncScheduler() {
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -280,7 +311,7 @@ func (sm *SyncManager) syncScheduler() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
|
func (sm *syncManager) scheduleIncoming(ts *types.TipSet) {
|
||||||
log.Debug("scheduling incoming tipset sync: ", ts.Cids())
|
log.Debug("scheduling incoming tipset sync: ", ts.Cids())
|
||||||
if sm.getBootstrapState() == BSStateSelected {
|
if sm.getBootstrapState() == BSStateSelected {
|
||||||
sm.setBootstrapState(BSStateScheduled)
|
sm.setBootstrapState(BSStateScheduled)
|
||||||
@ -328,10 +359,11 @@ func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
|
func (sm *syncManager) scheduleProcessResult(res *syncResult) {
|
||||||
if res.success && sm.getBootstrapState() != BSStateComplete {
|
if res.success && sm.getBootstrapState() != BSStateComplete {
|
||||||
sm.setBootstrapState(BSStateComplete)
|
sm.setBootstrapState(BSStateComplete)
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(sm.activeSyncs, res.ts.Key())
|
delete(sm.activeSyncs, res.ts.Key())
|
||||||
relbucket := sm.activeSyncTips.PopRelated(res.ts)
|
relbucket := sm.activeSyncTips.PopRelated(res.ts)
|
||||||
if relbucket != nil {
|
if relbucket != nil {
|
||||||
@ -360,7 +392,7 @@ func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) scheduleWorkSent() {
|
func (sm *syncManager) scheduleWorkSent() {
|
||||||
hts := sm.nextSyncTarget.heaviestTipSet()
|
hts := sm.nextSyncTarget.heaviestTipSet()
|
||||||
sm.activeSyncs[hts.Key()] = hts
|
sm.activeSyncs[hts.Key()] = hts
|
||||||
|
|
||||||
@ -372,7 +404,7 @@ func (sm *SyncManager) scheduleWorkSent() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) syncWorker(id int) {
|
func (sm *syncManager) syncWorker(id int) {
|
||||||
ss := &SyncerState{}
|
ss := &SyncerState{}
|
||||||
sm.syncStates[id] = ss
|
sm.syncStates[id] = ss
|
||||||
for {
|
for {
|
||||||
@ -397,7 +429,7 @@ func (sm *SyncManager) syncWorker(id int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) syncedPeerCount() int {
|
func (sm *syncManager) syncedPeerCount() int {
|
||||||
var count int
|
var count int
|
||||||
for _, ts := range sm.peerHeads {
|
for _, ts := range sm.peerHeads {
|
||||||
if ts.Height() > 0 {
|
if ts.Height() > 0 {
|
||||||
@ -407,19 +439,19 @@ func (sm *SyncManager) syncedPeerCount() int {
|
|||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) getBootstrapState() int {
|
func (sm *syncManager) getBootstrapState() int {
|
||||||
sm.bssLk.Lock()
|
sm.bssLk.Lock()
|
||||||
defer sm.bssLk.Unlock()
|
defer sm.bssLk.Unlock()
|
||||||
return sm.bootstrapState
|
return sm.bootstrapState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) setBootstrapState(v int) {
|
func (sm *syncManager) setBootstrapState(v int) {
|
||||||
sm.bssLk.Lock()
|
sm.bssLk.Lock()
|
||||||
defer sm.bssLk.Unlock()
|
defer sm.bssLk.Unlock()
|
||||||
sm.bootstrapState = v
|
sm.bootstrapState = v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SyncManager) IsBootstrapped() bool {
|
func (sm *syncManager) IsBootstrapped() bool {
|
||||||
sm.bssLk.Lock()
|
sm.bssLk.Lock()
|
||||||
defer sm.bssLk.Unlock()
|
defer sm.bssLk.Unlock()
|
||||||
return sm.bootstrapState == BSStateComplete
|
return sm.bootstrapState == BSStateComplete
|
||||||
|
@ -17,7 +17,7 @@ type syncOp struct {
|
|||||||
done func()
|
done func()
|
||||||
}
|
}
|
||||||
|
|
||||||
func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *SyncManager, chan *syncOp)) {
|
func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *syncManager, chan *syncOp)) {
|
||||||
syncTargets := make(chan *syncOp)
|
syncTargets := make(chan *syncOp)
|
||||||
sm := NewSyncManager(func(ctx context.Context, ts *types.TipSet) error {
|
sm := NewSyncManager(func(ctx context.Context, ts *types.TipSet) error {
|
||||||
ch := make(chan struct{})
|
ch := make(chan struct{})
|
||||||
@ -27,7 +27,7 @@ func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T,
|
|||||||
}
|
}
|
||||||
<-ch
|
<-ch
|
||||||
return nil
|
return nil
|
||||||
})
|
}).(*syncManager)
|
||||||
sm.bspThresh = thresh
|
sm.bspThresh = thresh
|
||||||
|
|
||||||
sm.Start()
|
sm.Start()
|
||||||
@ -77,12 +77,12 @@ func TestSyncManager(t *testing.T) {
|
|||||||
c3 := mock.TipSet(mock.MkBlock(b, 3, 5))
|
c3 := mock.TipSet(mock.MkBlock(b, 3, 5))
|
||||||
d := mock.TipSet(mock.MkBlock(c1, 4, 5))
|
d := mock.TipSet(mock.MkBlock(c1, 4, 5))
|
||||||
|
|
||||||
runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", c1)
|
sm.SetPeerHead(ctx, "peer1", c1)
|
||||||
assertGetSyncOp(t, stc, c1)
|
assertGetSyncOp(t, stc, c1)
|
||||||
})
|
})
|
||||||
|
|
||||||
runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", c1)
|
sm.SetPeerHead(ctx, "peer1", c1)
|
||||||
assertNoOp(t, stc)
|
assertNoOp(t, stc)
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ func TestSyncManager(t *testing.T) {
|
|||||||
assertGetSyncOp(t, stc, c1)
|
assertGetSyncOp(t, stc, c1)
|
||||||
})
|
})
|
||||||
|
|
||||||
runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", b)
|
sm.SetPeerHead(ctx, "peer1", b)
|
||||||
assertGetSyncOp(t, stc, b)
|
assertGetSyncOp(t, stc, b)
|
||||||
|
|
||||||
@ -101,7 +101,7 @@ func TestSyncManager(t *testing.T) {
|
|||||||
assertGetSyncOp(t, stc, c2)
|
assertGetSyncOp(t, stc, c2)
|
||||||
})
|
})
|
||||||
|
|
||||||
runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", a)
|
sm.SetPeerHead(ctx, "peer1", a)
|
||||||
assertGetSyncOp(t, stc, a)
|
assertGetSyncOp(t, stc, a)
|
||||||
|
|
||||||
@ -122,7 +122,7 @@ func TestSyncManager(t *testing.T) {
|
|||||||
assertGetSyncOp(t, stc, d)
|
assertGetSyncOp(t, stc, d)
|
||||||
})
|
})
|
||||||
|
|
||||||
runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||||
sm.SetPeerHead(ctx, "peer1", a)
|
sm.SetPeerHead(ctx, "peer1", a)
|
||||||
assertGetSyncOp(t, stc, a)
|
assertGetSyncOp(t, stc, a)
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ func (inv *Invoker) Invoke(codeCid cid.Cid, rt runtime.Runtime, method abi.Metho
|
|||||||
|
|
||||||
code, ok := inv.builtInCode[codeCid]
|
code, ok := inv.builtInCode[codeCid]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Errorf("no code for actor %s (Addr: %s)", codeCid, rt.Message().Receiver())
|
log.Errorf("no code for actor %s (Addr: %s)", codeCid, rt.Receiver())
|
||||||
return nil, aerrors.Newf(exitcode.SysErrorIllegalActor, "no code for actor %s(%d)(%s)", codeCid, method, hex.EncodeToString(params))
|
return nil, aerrors.Newf(exitcode.SysErrorIllegalActor, "no code for actor %s(%d)(%s)", codeCid, method, hex.EncodeToString(params))
|
||||||
}
|
}
|
||||||
if method >= abi.MethodNum(len(code)) || code[method] == nil {
|
if method >= abi.MethodNum(len(code)) || code[method] == nil {
|
||||||
|
@ -8,7 +8,9 @@ import (
|
|||||||
gruntime "runtime"
|
gruntime "runtime"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/cbor"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
rtt "github.com/filecoin-project/go-state-types/rt"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
@ -16,11 +18,9 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
rt0 "github.com/filecoin-project/specs-actors/actors/runtime"
|
||||||
vmr "github.com/filecoin-project/specs-actors/actors/runtime"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
ipldcbor "github.com/ipfs/go-ipld-cbor"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -31,20 +31,20 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Runtime struct {
|
type Runtime struct {
|
||||||
|
types.Message
|
||||||
|
rt0.Syscalls
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
|
||||||
vm *VM
|
vm *VM
|
||||||
state *state.StateTree
|
state *state.StateTree
|
||||||
vmsg vmr.Message
|
|
||||||
height abi.ChainEpoch
|
height abi.ChainEpoch
|
||||||
cst cbor.IpldStore
|
cst ipldcbor.IpldStore
|
||||||
pricelist Pricelist
|
pricelist Pricelist
|
||||||
|
|
||||||
gasAvailable int64
|
gasAvailable int64
|
||||||
gasUsed int64
|
gasUsed int64
|
||||||
|
|
||||||
sys runtime.Syscalls
|
|
||||||
|
|
||||||
// address that started invoke chain
|
// address that started invoke chain
|
||||||
origin address.Address
|
origin address.Address
|
||||||
originNonce uint64
|
originNonce uint64
|
||||||
@ -85,11 +85,11 @@ type notFoundErr interface {
|
|||||||
IsNotFound() bool
|
IsNotFound() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) Get(c cid.Cid, o vmr.CBORUnmarshaler) bool {
|
func (rt *Runtime) StoreGet(c cid.Cid, o cbor.Unmarshaler) bool {
|
||||||
if err := rt.cst.Get(context.TODO(), c, o); err != nil {
|
if err := rt.cst.Get(context.TODO(), c, o); err != nil {
|
||||||
var nfe notFoundErr
|
var nfe notFoundErr
|
||||||
if xerrors.As(err, &nfe) && nfe.IsNotFound() {
|
if xerrors.As(err, &nfe) && nfe.IsNotFound() {
|
||||||
if xerrors.As(err, new(cbor.SerializationError)) {
|
if xerrors.As(err, new(ipldcbor.SerializationError)) {
|
||||||
panic(aerrors.Newf(exitcode.ErrSerialization, "failed to unmarshal cbor object %s", err))
|
panic(aerrors.Newf(exitcode.ErrSerialization, "failed to unmarshal cbor object %s", err))
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@ -100,10 +100,10 @@ func (rt *Runtime) Get(c cid.Cid, o vmr.CBORUnmarshaler) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) Put(x vmr.CBORMarshaler) cid.Cid {
|
func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid {
|
||||||
c, err := rt.cst.Put(context.TODO(), x)
|
c, err := rt.cst.Put(context.TODO(), x)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if xerrors.As(err, new(cbor.SerializationError)) {
|
if xerrors.As(err, new(ipldcbor.SerializationError)) {
|
||||||
panic(aerrors.Newf(exitcode.ErrSerialization, "failed to marshal cbor object %s", err))
|
panic(aerrors.Newf(exitcode.ErrSerialization, "failed to marshal cbor object %s", err))
|
||||||
}
|
}
|
||||||
panic(aerrors.Fatalf("failed to put cbor object: %s", err))
|
panic(aerrors.Fatalf("failed to put cbor object: %s", err))
|
||||||
@ -111,7 +111,7 @@ func (rt *Runtime) Put(x vmr.CBORMarshaler) cid.Cid {
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ vmr.Runtime = (*Runtime)(nil)
|
var _ rt0.Runtime = (*Runtime)(nil)
|
||||||
|
|
||||||
func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) {
|
func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) {
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -139,7 +139,7 @@ func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
case *abi.EmptyValue:
|
case *abi.EmptyValue:
|
||||||
return nil, nil
|
return nil, nil
|
||||||
case cbg.CBORMarshaler:
|
case cbor.Marshaler:
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
if err := ret.MarshalCBOR(buf); err != nil {
|
if err := ret.MarshalCBOR(buf); err != nil {
|
||||||
return nil, aerrors.Absorb(err, 2, "failed to marshal response to cbor")
|
return nil, aerrors.Absorb(err, 2, "failed to marshal response to cbor")
|
||||||
@ -152,17 +152,13 @@ func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) Message() vmr.Message {
|
|
||||||
return rt.vmsg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rt *Runtime) ValidateImmediateCallerAcceptAny() {
|
func (rt *Runtime) ValidateImmediateCallerAcceptAny() {
|
||||||
rt.abortIfAlreadyValidated()
|
rt.abortIfAlreadyValidated()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) CurrentBalance() abi.TokenAmount {
|
func (rt *Runtime) CurrentBalance() abi.TokenAmount {
|
||||||
b, err := rt.GetBalance(rt.Message().Receiver())
|
b, err := rt.GetBalance(rt.Receiver())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rt.Abortf(err.RetCode(), "get current balance: %v", err)
|
rt.Abortf(err.RetCode(), "get current balance: %v", err)
|
||||||
}
|
}
|
||||||
@ -198,10 +194,6 @@ func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparati
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) Store() vmr.Store {
|
|
||||||
return rt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rt *Runtime) NewActorAddress() address.Address {
|
func (rt *Runtime) NewActorAddress() address.Address {
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
oa, _ := ResolveToKeyAddr(rt.vm.cstate, rt.vm.cst, rt.origin)
|
oa, _ := ResolveToKeyAddr(rt.vm.cstate, rt.vm.cst, rt.origin)
|
||||||
@ -258,7 +250,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
|
|||||||
// May only be called by the actor itself.
|
// May only be called by the actor itself.
|
||||||
func (rt *Runtime) DeleteActor(beneficiary address.Address) {
|
func (rt *Runtime) DeleteActor(beneficiary address.Address) {
|
||||||
rt.chargeGas(rt.Pricelist().OnDeleteActor())
|
rt.chargeGas(rt.Pricelist().OnDeleteActor())
|
||||||
act, err := rt.state.GetActor(rt.Message().Receiver())
|
act, err := rt.state.GetActor(rt.Receiver())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if xerrors.Is(err, types.ErrActorNotFound) {
|
if xerrors.Is(err, types.ErrActorNotFound) {
|
||||||
rt.Abortf(exitcode.SysErrorIllegalActor, "failed to load actor in delete actor: %s", err)
|
rt.Abortf(exitcode.SysErrorIllegalActor, "failed to load actor in delete actor: %s", err)
|
||||||
@ -267,36 +259,32 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) {
|
|||||||
}
|
}
|
||||||
if !act.Balance.IsZero() {
|
if !act.Balance.IsZero() {
|
||||||
// Transfer the executing actor's balance to the beneficiary
|
// Transfer the executing actor's balance to the beneficiary
|
||||||
if err := rt.vm.transfer(rt.Message().Receiver(), beneficiary, act.Balance); err != nil {
|
if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil {
|
||||||
panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err))
|
panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the executing actor
|
// Delete the executing actor
|
||||||
if err := rt.state.DeleteActor(rt.Message().Receiver()); err != nil {
|
if err := rt.state.DeleteActor(rt.Receiver()); err != nil {
|
||||||
panic(aerrors.Fatalf("failed to delete actor: %s", err))
|
panic(aerrors.Fatalf("failed to delete actor: %s", err))
|
||||||
}
|
}
|
||||||
_ = rt.chargeGasSafe(gasOnActorExec)
|
_ = rt.chargeGasSafe(gasOnActorExec)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) Syscalls() vmr.Syscalls {
|
func (rt *Runtime) StartSpan(name string) func() {
|
||||||
return rt.sys
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rt *Runtime) StartSpan(name string) vmr.TraceSpan {
|
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) ValidateImmediateCallerIs(as ...address.Address) {
|
func (rt *Runtime) ValidateImmediateCallerIs(as ...address.Address) {
|
||||||
rt.abortIfAlreadyValidated()
|
rt.abortIfAlreadyValidated()
|
||||||
imm := rt.Message().Caller()
|
imm := rt.Caller()
|
||||||
|
|
||||||
for _, a := range as {
|
for _, a := range as {
|
||||||
if imm == a {
|
if imm == a {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rt.Abortf(exitcode.SysErrForbidden, "caller %s is not one of %s", rt.Message().Caller(), as)
|
rt.Abortf(exitcode.SysErrForbidden, "caller %s is not one of %s", rt.Caller(), as)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) Context() context.Context {
|
func (rt *Runtime) Context() context.Context {
|
||||||
@ -314,7 +302,7 @@ func (rt *Runtime) AbortStateMsg(msg string) {
|
|||||||
|
|
||||||
func (rt *Runtime) ValidateImmediateCallerType(ts ...cid.Cid) {
|
func (rt *Runtime) ValidateImmediateCallerType(ts ...cid.Cid) {
|
||||||
rt.abortIfAlreadyValidated()
|
rt.abortIfAlreadyValidated()
|
||||||
callerCid, ok := rt.GetActorCodeCID(rt.Message().Caller())
|
callerCid, ok := rt.GetActorCodeCID(rt.Caller())
|
||||||
if !ok {
|
if !ok {
|
||||||
panic(aerrors.Fatalf("failed to lookup code cid for caller"))
|
panic(aerrors.Fatalf("failed to lookup code cid for caller"))
|
||||||
}
|
}
|
||||||
@ -334,11 +322,11 @@ type dumbWrapperType struct {
|
|||||||
val []byte
|
val []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dwt *dumbWrapperType) Into(um vmr.CBORUnmarshaler) error {
|
func (dwt *dumbWrapperType) Into(um cbor.Unmarshaler) error {
|
||||||
return um.UnmarshalCBOR(bytes.NewReader(dwt.val))
|
return um.UnmarshalCBOR(bytes.NewReader(dwt.val))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m vmr.CBORMarshaler, value abi.TokenAmount) (vmr.SendReturn, exitcode.ExitCode) {
|
func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode {
|
||||||
if !rt.allowInternal {
|
if !rt.allowInternal {
|
||||||
rt.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed")
|
rt.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed")
|
||||||
}
|
}
|
||||||
@ -351,16 +339,20 @@ func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m vmr.CBORMars
|
|||||||
params = buf.Bytes()
|
params = buf.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
ret, err := rt.internalSend(rt.Message().Receiver(), to, method, value, params)
|
ret, err := rt.internalSend(rt.Receiver(), to, method, value, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err.IsFatal() {
|
if err.IsFatal() {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
log.Warnf("vmctx send failed: to: %s, method: %d: ret: %d, err: %s", to, method, ret, err)
|
log.Warnf("vmctx send failed: to: %s, method: %d: ret: %d, err: %s", to, method, ret, err)
|
||||||
return &dumbWrapperType{nil}, err.RetCode()
|
return err.RetCode()
|
||||||
}
|
}
|
||||||
_ = rt.chargeGasSafe(gasOnActorExec)
|
_ = rt.chargeGasSafe(gasOnActorExec)
|
||||||
return &dumbWrapperType{ret}, 0
|
|
||||||
|
if err := out.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
|
||||||
|
rt.Abortf(exitcode.ErrSerialization, "failed to unmarshal return value: %s", err)
|
||||||
|
}
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum, value types.BigInt, params []byte) ([]byte, aerrors.ActorError) {
|
func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum, value types.BigInt, params []byte) ([]byte, aerrors.ActorError) {
|
||||||
@ -404,49 +396,41 @@ func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum,
|
|||||||
return ret, errSend
|
return ret, errSend
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) State() vmr.StateHandle {
|
func (rt *Runtime) StateCreate(obj cbor.Marshaler) {
|
||||||
return &shimStateHandle{rt: rt}
|
c := rt.StorePut(obj)
|
||||||
}
|
err := rt.stateCommit(EmptyObjectCid, c)
|
||||||
|
|
||||||
type shimStateHandle struct {
|
|
||||||
rt *Runtime
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) {
|
|
||||||
c := ssh.rt.Put(obj)
|
|
||||||
err := ssh.rt.stateCommit(EmptyObjectCid, c)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("failed to commit state after creating object: %w", err))
|
panic(fmt.Errorf("failed to commit state after creating object: %w", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) {
|
func (rt *Runtime) StateReadonly(obj cbor.Unmarshaler) {
|
||||||
act, err := ssh.rt.state.GetActor(ssh.rt.Message().Receiver())
|
act, err := rt.state.GetActor(rt.Receiver())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ssh.rt.Abortf(exitcode.SysErrorIllegalArgument, "failed to get actor for Readonly state: %s", err)
|
rt.Abortf(exitcode.SysErrorIllegalArgument, "failed to get actor for Readonly state: %s", err)
|
||||||
}
|
}
|
||||||
ssh.rt.Get(act.Head, obj)
|
rt.StoreGet(act.Head, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ssh *shimStateHandle) Transaction(obj vmr.CBORer, f func()) {
|
func (rt *Runtime) StateTransaction(obj cbor.Er, f func()) {
|
||||||
if obj == nil {
|
if obj == nil {
|
||||||
ssh.rt.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()")
|
rt.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()")
|
||||||
}
|
}
|
||||||
|
|
||||||
act, err := ssh.rt.state.GetActor(ssh.rt.Message().Receiver())
|
act, err := rt.state.GetActor(rt.Receiver())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ssh.rt.Abortf(exitcode.SysErrorIllegalActor, "failed to get actor for Transaction: %s", err)
|
rt.Abortf(exitcode.SysErrorIllegalActor, "failed to get actor for Transaction: %s", err)
|
||||||
}
|
}
|
||||||
baseState := act.Head
|
baseState := act.Head
|
||||||
ssh.rt.Get(baseState, obj)
|
rt.StoreGet(baseState, obj)
|
||||||
|
|
||||||
ssh.rt.allowInternal = false
|
rt.allowInternal = false
|
||||||
f()
|
f()
|
||||||
ssh.rt.allowInternal = true
|
rt.allowInternal = true
|
||||||
|
|
||||||
c := ssh.rt.Put(obj)
|
c := rt.StorePut(obj)
|
||||||
|
|
||||||
err = ssh.rt.stateCommit(baseState, c)
|
err = rt.stateCommit(baseState, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("failed to commit state after transaction: %w", err))
|
panic(fmt.Errorf("failed to commit state after transaction: %w", err))
|
||||||
}
|
}
|
||||||
@ -466,7 +450,7 @@ func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorErr
|
|||||||
|
|
||||||
func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
|
func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
|
||||||
// TODO: we can make this more efficient in the future...
|
// TODO: we can make this more efficient in the future...
|
||||||
act, err := rt.state.GetActor(rt.Message().Receiver())
|
act, err := rt.state.GetActor(rt.Receiver())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return aerrors.Escalate(err, "failed to get actor to commit state")
|
return aerrors.Escalate(err, "failed to get actor to commit state")
|
||||||
}
|
}
|
||||||
@ -477,7 +461,7 @@ func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
|
|||||||
|
|
||||||
act.Head = newh
|
act.Head = newh
|
||||||
|
|
||||||
if err := rt.state.SetActor(rt.Message().Receiver(), act); err != nil {
|
if err := rt.state.SetActor(rt.Receiver(), act); err != nil {
|
||||||
return aerrors.Fatalf("failed to set actor in commit state: %s", err)
|
return aerrors.Fatalf("failed to set actor in commit state: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -572,15 +556,15 @@ func (rt *Runtime) abortIfAlreadyValidated() {
|
|||||||
rt.callerValidated = true
|
rt.callerValidated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) Log(level vmr.LogLevel, msg string, args ...interface{}) {
|
func (rt *Runtime) Log(level rtt.LogLevel, msg string, args ...interface{}) {
|
||||||
switch level {
|
switch level {
|
||||||
case vmr.DEBUG:
|
case rtt.DEBUG:
|
||||||
actorLog.Debugf(msg, args...)
|
actorLog.Debugf(msg, args...)
|
||||||
case vmr.INFO:
|
case rtt.INFO:
|
||||||
actorLog.Infof(msg, args...)
|
actorLog.Infof(msg, args...)
|
||||||
case vmr.WARN:
|
case rtt.WARN:
|
||||||
actorLog.Warnf(msg, args...)
|
actorLog.Warnf(msg, args...)
|
||||||
case vmr.ERROR:
|
case rtt.ERROR:
|
||||||
actorLog.Errorf(msg, args...)
|
actorLog.Errorf(msg, args...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -42,6 +42,6 @@ func TestRuntimePutErrors(t *testing.T) {
|
|||||||
cst: cbor.NewCborStore(nil),
|
cst: cbor.NewCborStore(nil),
|
||||||
}
|
}
|
||||||
|
|
||||||
rt.Put(&NotAVeryGoodMarshaler{})
|
rt.StorePut(&NotAVeryGoodMarshaler{})
|
||||||
t.Error("expected panic")
|
t.Error("expected panic")
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin addres
|
|||||||
Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks},
|
Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks},
|
||||||
Atlas: vm.cst.Atlas,
|
Atlas: vm.cst.Atlas,
|
||||||
}
|
}
|
||||||
rt.sys = pricedSyscalls{
|
rt.Syscalls = pricedSyscalls{
|
||||||
under: vm.Syscalls(ctx, vm.cstate, rt.cst),
|
under: vm.Syscalls(ctx, vm.cstate, rt.cst),
|
||||||
chargeGas: rt.chargeGasFunc(1),
|
chargeGas: rt.chargeGasFunc(1),
|
||||||
pl: rt.pricelist,
|
pl: rt.pricelist,
|
||||||
@ -128,7 +128,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin addres
|
|||||||
rt.Abortf(exitcode.SysErrInvalidReceiver, "resolve msg.From address failed")
|
rt.Abortf(exitcode.SysErrInvalidReceiver, "resolve msg.From address failed")
|
||||||
}
|
}
|
||||||
vmm.From = resF
|
vmm.From = resF
|
||||||
rt.vmsg = &vmm
|
rt.Message = vmm
|
||||||
|
|
||||||
return rt
|
return rt
|
||||||
}
|
}
|
||||||
@ -700,9 +700,9 @@ func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params
|
|||||||
defer span.End()
|
defer span.End()
|
||||||
if span.IsRecordingEvents() {
|
if span.IsRecordingEvents() {
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
trace.StringAttribute("to", rt.Message().Receiver().String()),
|
trace.StringAttribute("to", rt.Receiver().String()),
|
||||||
trace.Int64Attribute("method", int64(method)),
|
trace.Int64Attribute("method", int64(method)),
|
||||||
trace.StringAttribute("value", rt.Message().ValueReceived().String()),
|
trace.StringAttribute("value", rt.ValueReceived().String()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -659,7 +659,7 @@ func handleHamtEpoch(ctx context.Context, api api.FullNode, r cid.Cid) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return mp.ForEach(nil, func(key string) error {
|
return mp.ForEach(nil, func(key string) error {
|
||||||
ik, err := adt.ParseIntKey(key)
|
ik, err := abi.ParseIntKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -29,6 +29,7 @@ var paychCmd = &cli.Command{
|
|||||||
paychVoucherCmd,
|
paychVoucherCmd,
|
||||||
paychSettleCmd,
|
paychSettleCmd,
|
||||||
paychStatusCmd,
|
paychStatusCmd,
|
||||||
|
paychStatusByFromToCmd,
|
||||||
paychCloseCmd,
|
paychCloseCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -103,6 +104,7 @@ var paychStatusByFromToCmd = &cli.Command{
|
|||||||
if cctx.Args().Len() != 2 {
|
if cctx.Args().Len() != 2 {
|
||||||
return ShowHelp(cctx, fmt.Errorf("must pass two arguments: <from address> <to address>"))
|
return ShowHelp(cctx, fmt.Errorf("must pass two arguments: <from address> <to address>"))
|
||||||
}
|
}
|
||||||
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
from, err := address.NewFromString(cctx.Args().Get(0))
|
from, err := address.NewFromString(cctx.Args().Get(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -120,7 +122,7 @@ var paychStatusByFromToCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
avail, err := api.PaychAvailableFundsByFromTo(from, to)
|
avail, err := api.PaychAvailableFundsByFromTo(ctx, from, to)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -138,6 +140,7 @@ var paychStatusCmd = &cli.Command{
|
|||||||
if cctx.Args().Len() != 1 {
|
if cctx.Args().Len() != 1 {
|
||||||
return ShowHelp(cctx, fmt.Errorf("must pass an argument: <channel address>"))
|
return ShowHelp(cctx, fmt.Errorf("must pass an argument: <channel address>"))
|
||||||
}
|
}
|
||||||
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
ch, err := address.NewFromString(cctx.Args().Get(0))
|
ch, err := address.NewFromString(cctx.Args().Get(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -150,7 +153,7 @@ var paychStatusCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
avail, err := api.PaychAvailableFunds(ch)
|
avail, err := api.PaychAvailableFunds(ctx, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
79
cli/state.go
79
cli/state.go
@ -54,6 +54,7 @@ var stateCmd = &cli.Command{
|
|||||||
stateListActorsCmd,
|
stateListActorsCmd,
|
||||||
stateListMinersCmd,
|
stateListMinersCmd,
|
||||||
stateCircSupplyCmd,
|
stateCircSupplyCmd,
|
||||||
|
stateSectorCmd,
|
||||||
stateGetActorCmd,
|
stateGetActorCmd,
|
||||||
stateLookupIDCmd,
|
stateLookupIDCmd,
|
||||||
stateReplaySetCmd,
|
stateReplaySetCmd,
|
||||||
@ -119,6 +120,13 @@ var stateMinerInfo = &cli.Command{
|
|||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
|
cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting miner info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Proving Period Start:\t%s\n", EpochTime(cd.CurrentEpoch, cd.PeriodStart))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1554,6 +1562,77 @@ var stateCircSupplyCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var stateSectorCmd = &cli.Command{
|
||||||
|
Name: "sector",
|
||||||
|
Usage: "Get miner sector info",
|
||||||
|
ArgsUsage: "[miner address] [sector number]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
|
if cctx.Args().Len() != 2 {
|
||||||
|
return xerrors.Errorf("expected 2 params")
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := LoadTipSet(ctx, cctx, api)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts == nil {
|
||||||
|
ts, err = api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
maddr, err := address.NewFromString(cctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sid, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
si, err := api.StateSectorGetInfo(ctx, maddr, abi.SectorNumber(sid), ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("SectorNumber: ", si.SectorNumber)
|
||||||
|
fmt.Println("SealProof: ", si.SealProof)
|
||||||
|
fmt.Println("SealedCID: ", si.SealedCID)
|
||||||
|
fmt.Println("DealIDs: ", si.DealIDs)
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println("Activation: ", EpochTime(ts.Height(), si.Activation))
|
||||||
|
fmt.Println("Expiration: ", EpochTime(ts.Height(), si.Expiration))
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println("DealWeight: ", si.DealWeight)
|
||||||
|
fmt.Println("VerifiedDealWeight: ", si.VerifiedDealWeight)
|
||||||
|
fmt.Println("InitialPledge: ", types.FIL(si.InitialPledge))
|
||||||
|
fmt.Println("ExpectedDayReward: ", types.FIL(si.ExpectedDayReward))
|
||||||
|
fmt.Println("ExpectedStoragePledge: ", types.FIL(si.ExpectedStoragePledge))
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
sp, err := api.StateSectorPartition(ctx, maddr, abi.SectorNumber(sid), ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Deadline: ", sp.Deadline)
|
||||||
|
fmt.Println("Partition: ", sp.Partition)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var stateMarketCmd = &cli.Command{
|
var stateMarketCmd = &cli.Command{
|
||||||
Name: "market",
|
Name: "market",
|
||||||
Usage: "Inspect the storage market actor",
|
Usage: "Inspect the storage market actor",
|
||||||
|
21
cli/util.go
21
cli/util.go
@ -2,10 +2,16 @@ package cli
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) {
|
func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) {
|
||||||
@ -26,3 +32,16 @@ func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.T
|
|||||||
|
|
||||||
return types.NewTipSet(headers)
|
return types.NewTipSet(headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func EpochTime(curr, e abi.ChainEpoch) string {
|
||||||
|
switch {
|
||||||
|
case curr > e:
|
||||||
|
return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e)))
|
||||||
|
case curr == e:
|
||||||
|
return fmt.Sprintf("%d (now)", e)
|
||||||
|
case curr < e:
|
||||||
|
return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr)))
|
||||||
|
}
|
||||||
|
|
||||||
|
panic("math broke")
|
||||||
|
}
|
||||||
|
@ -221,7 +221,7 @@ func (p *Processor) processMiners(ctx context.Context, minerTips map[types.TipSe
|
|||||||
var claim power.Claim
|
var claim power.Claim
|
||||||
// get miner claim from power actors claim map and store if found, else the miner had no claim at
|
// get miner claim from power actors claim map and store if found, else the miner had no claim at
|
||||||
// this tipset
|
// this tipset
|
||||||
found, err := minersClaims.Get(adt.AddrKey(act.addr), &claim)
|
found, err := minersClaims.Get(abi.AddrKey(act.addr), &claim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -136,7 +136,8 @@ create unique index if not exists block_cid_uindex
|
|||||||
on blocks (cid,height);
|
on blocks (cid,height);
|
||||||
|
|
||||||
create materialized view if not exists state_heights
|
create materialized view if not exists state_heights
|
||||||
as select distinct height, parentstateroot from blocks;
|
as select min(b.height) height, b.parentstateroot
|
||||||
|
from blocks b group by b.parentstateroot;
|
||||||
|
|
||||||
create index if not exists state_heights_height_index
|
create index if not exists state_heights_height_index
|
||||||
on state_heights (height);
|
on state_heights (height);
|
||||||
|
90
cmd/lotus-gateway/api.go
Normal file
90
cmd/lotus-gateway/api.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
const LookbackCap = time.Hour
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrLookbackTooLong = fmt.Errorf("lookbacks of more than %s are disallowed", LookbackCap)
|
||||||
|
)
|
||||||
|
|
||||||
|
type GatewayAPI struct {
|
||||||
|
api api.FullNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *GatewayAPI) getTipsetTimestamp(ctx context.Context, tsk types.TipSetKey) (time.Time, error) {
|
||||||
|
if tsk.IsEmpty() {
|
||||||
|
return time.Now(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := a.api.ChainGetTipSet(ctx, tsk)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Unix(int64(ts.Blocks()[0].Timestamp), 0), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *GatewayAPI) checkTipset(ctx context.Context, ts types.TipSetKey) error {
|
||||||
|
when, err := a.getTipsetTimestamp(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.Since(when) > time.Hour {
|
||||||
|
return ErrLookbackTooLong
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *GatewayAPI) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "StateGetActor")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
if err := a.checkTipset(ctx, ts); err != nil {
|
||||||
|
return nil, fmt.Errorf("bad tipset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return a.api.StateGetActor(ctx, actor, ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *GatewayAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "ChainHead")
|
||||||
|
defer span.End()
|
||||||
|
// TODO: cache and invalidate cache when timestamp is up (or have internal ChainNotify)
|
||||||
|
|
||||||
|
return a.api.ChainHead(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *GatewayAPI) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "ChainGetTipSet")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
if err := a.checkTipset(ctx, tsk); err != nil {
|
||||||
|
return nil, fmt.Errorf("bad tipset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: since we're limiting lookbacks, should just cache this (could really even cache the json response bytes)
|
||||||
|
return a.api.ChainGetTipSet(ctx, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *GatewayAPI) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "MpoolPush")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
// TODO: additional anti-spam checks
|
||||||
|
|
||||||
|
return a.api.MpoolPush(ctx, sm)
|
||||||
|
}
|
112
cmd/lotus-gateway/main.go
Normal file
112
cmd/lotus-gateway/main.go
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||||
|
logging "github.com/ipfs/go-log"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("gateway")
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
lotuslog.SetupLogLevels()
|
||||||
|
|
||||||
|
local := []*cli.Command{
|
||||||
|
runCmd,
|
||||||
|
}
|
||||||
|
|
||||||
|
app := &cli.App{
|
||||||
|
Name: "lotus-gateway",
|
||||||
|
Usage: "Public API server for lotus",
|
||||||
|
Version: build.UserVersion(),
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "repo",
|
||||||
|
EnvVars: []string{"LOTUS_PATH"},
|
||||||
|
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
Commands: local,
|
||||||
|
}
|
||||||
|
app.Setup()
|
||||||
|
|
||||||
|
if err := app.Run(os.Args); err != nil {
|
||||||
|
log.Warnf("%+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var runCmd = &cli.Command{
|
||||||
|
Name: "run",
|
||||||
|
Usage: "Start api server",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "listen",
|
||||||
|
Usage: "host address and port the api server will listen on",
|
||||||
|
Value: "0.0.0.0:2346",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
log.Info("Starting lotus gateway")
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
address := cctx.String("listen")
|
||||||
|
mux := mux.NewRouter()
|
||||||
|
|
||||||
|
log.Info("Setting up API endpoint at " + address)
|
||||||
|
|
||||||
|
rpcServer := jsonrpc.NewServer()
|
||||||
|
rpcServer.Register("Filecoin", &GatewayAPI{api: api})
|
||||||
|
|
||||||
|
mux.Handle("/rpc/v0", rpcServer)
|
||||||
|
mux.PathPrefix("/").Handler(http.DefaultServeMux)
|
||||||
|
|
||||||
|
/*ah := &auth.Handler{
|
||||||
|
Verify: nodeApi.AuthVerify,
|
||||||
|
Next: mux.ServeHTTP,
|
||||||
|
}*/
|
||||||
|
|
||||||
|
srv := &http.Server{
|
||||||
|
Handler: mux,
|
||||||
|
BaseContext: func(listener net.Listener) context.Context {
|
||||||
|
return ctx
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
log.Warn("Shutting down...")
|
||||||
|
if err := srv.Shutdown(context.TODO()); err != nil {
|
||||||
|
log.Errorf("shutting down RPC server failed: %s", err)
|
||||||
|
}
|
||||||
|
log.Warn("Graceful shutdown successful")
|
||||||
|
}()
|
||||||
|
|
||||||
|
nl, err := net.Listen("tcp", address)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return srv.Serve(nl)
|
||||||
|
},
|
||||||
|
}
|
@ -7,6 +7,7 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
@ -336,7 +337,7 @@ var verifRegCheckVerifierCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
var dcap verifreg.DataCap
|
var dcap verifreg.DataCap
|
||||||
if found, err := vh.Get(adt.AddrKey(vaddr), &dcap); err != nil {
|
if found, err := vh.Get(abi.AddrKey(vaddr), &dcap); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if !found {
|
} else if !found {
|
||||||
return fmt.Errorf("not found")
|
return fmt.Errorf("not found")
|
||||||
|
@ -45,6 +45,7 @@ import (
|
|||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/genesis"
|
"github.com/filecoin-project/lotus/genesis"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/miner"
|
"github.com/filecoin-project/lotus/miner"
|
||||||
"github.com/filecoin-project/lotus/node/modules"
|
"github.com/filecoin-project/lotus/node/modules"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
@ -462,6 +463,12 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if jrnl, err := journal.OpenFSJournal(lr, journal.DefaultDisabledEvents); err == nil {
|
||||||
|
journal.J = jrnl
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("failed to open filesystem journal: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
m := miner.NewMiner(api, epp, a, slashfilter.New(mds))
|
m := miner.NewMiner(api, epp, a, slashfilter.New(mds))
|
||||||
{
|
{
|
||||||
if err := m.Start(ctx); err != nil {
|
if err := m.Start(ctx); err != nil {
|
||||||
|
@ -5,16 +5,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
)
|
)
|
||||||
@ -203,8 +200,8 @@ var provingInfoCmd = &cli.Command{
|
|||||||
fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch)
|
fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch)
|
||||||
|
|
||||||
fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%miner.WPoStProvingPeriod)
|
fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%miner.WPoStProvingPeriod)
|
||||||
fmt.Printf("Proving Period Start: %s\n", epochTime(cd.CurrentEpoch, cd.PeriodStart))
|
fmt.Printf("Proving Period Start: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.PeriodStart))
|
||||||
fmt.Printf("Next Period Start: %s\n\n", epochTime(cd.CurrentEpoch, cd.PeriodStart+miner.WPoStProvingPeriod))
|
fmt.Printf("Next Period Start: %s\n\n", lcli.EpochTime(cd.CurrentEpoch, cd.PeriodStart+miner.WPoStProvingPeriod))
|
||||||
|
|
||||||
fmt.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc)
|
fmt.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc)
|
||||||
fmt.Printf("Recovering: %d\n", recovering)
|
fmt.Printf("Recovering: %d\n", recovering)
|
||||||
@ -224,27 +221,14 @@ var provingInfoCmd = &cli.Command{
|
|||||||
fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors)
|
fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Deadline Open: %s\n", epochTime(cd.CurrentEpoch, cd.Open))
|
fmt.Printf("Deadline Open: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Open))
|
||||||
fmt.Printf("Deadline Close: %s\n", epochTime(cd.CurrentEpoch, cd.Close))
|
fmt.Printf("Deadline Close: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Close))
|
||||||
fmt.Printf("Deadline Challenge: %s\n", epochTime(cd.CurrentEpoch, cd.Challenge))
|
fmt.Printf("Deadline Challenge: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Challenge))
|
||||||
fmt.Printf("Deadline FaultCutoff: %s\n", epochTime(cd.CurrentEpoch, cd.FaultCutoff))
|
fmt.Printf("Deadline FaultCutoff: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.FaultCutoff))
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func epochTime(curr, e abi.ChainEpoch) string {
|
|
||||||
switch {
|
|
||||||
case curr > e:
|
|
||||||
return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e)))
|
|
||||||
case curr == e:
|
|
||||||
return fmt.Sprintf("%d (now)", e)
|
|
||||||
case curr < e:
|
|
||||||
return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr)))
|
|
||||||
}
|
|
||||||
|
|
||||||
panic("math broke")
|
|
||||||
}
|
|
||||||
|
|
||||||
var provingDeadlinesCmd = &cli.Command{
|
var provingDeadlinesCmd = &cli.Command{
|
||||||
Name: "deadlines",
|
Name: "deadlines",
|
||||||
Usage: "View the current proving period deadlines information",
|
Usage: "View the current proving period deadlines information",
|
||||||
|
@ -100,18 +100,14 @@ type SendReturn struct {
|
|||||||
// passed parameters.
|
// passed parameters.
|
||||||
func (a Actor) Send(rt runtime.Runtime, args *SendArgs) *SendReturn {
|
func (a Actor) Send(rt runtime.Runtime, args *SendArgs) *SendReturn {
|
||||||
rt.ValidateImmediateCallerAcceptAny()
|
rt.ValidateImmediateCallerAcceptAny()
|
||||||
ret, code := rt.Send(
|
var out runtime.CBORBytes
|
||||||
|
code := rt.Send(
|
||||||
args.To,
|
args.To,
|
||||||
args.Method,
|
args.Method,
|
||||||
runtime.CBORBytes(args.Params),
|
runtime.CBORBytes(args.Params),
|
||||||
args.Value,
|
args.Value,
|
||||||
|
&out,
|
||||||
)
|
)
|
||||||
var out runtime.CBORBytes
|
|
||||||
if ret != nil {
|
|
||||||
if err := ret.Into(&out); err != nil {
|
|
||||||
rt.Abortf(exitcode.ErrIllegalState, "failed to unmarshal send return: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &SendReturn{
|
return &SendReturn{
|
||||||
Return: out,
|
Return: out,
|
||||||
Code: code,
|
Code: code,
|
||||||
@ -217,14 +213,14 @@ func (a Actor) MutateState(rt runtime.Runtime, args *MutateStateArgs) *abi.Empty
|
|||||||
var st State
|
var st State
|
||||||
switch args.Branch {
|
switch args.Branch {
|
||||||
case MutateInTransaction:
|
case MutateInTransaction:
|
||||||
rt.State().Transaction(&st, func() {
|
rt.StateTransaction(&st, func() {
|
||||||
st.Value = args.Value
|
st.Value = args.Value
|
||||||
})
|
})
|
||||||
case MutateReadonly:
|
case MutateReadonly:
|
||||||
rt.State().Readonly(&st)
|
rt.StateReadonly(&st)
|
||||||
st.Value = args.Value
|
st.Value = args.Value
|
||||||
case MutateAfterTransaction:
|
case MutateAfterTransaction:
|
||||||
rt.State().Transaction(&st, func() {
|
rt.StateTransaction(&st, func() {
|
||||||
st.Value = args.Value + "-in"
|
st.Value = args.Value + "-in"
|
||||||
})
|
})
|
||||||
st.Value = args.Value
|
st.Value = args.Value
|
||||||
|
@ -46,7 +46,7 @@ func TestMutateStateInTransaction(t *testing.T) {
|
|||||||
var a Actor
|
var a Actor
|
||||||
|
|
||||||
rt.ExpectValidateCallerAny()
|
rt.ExpectValidateCallerAny()
|
||||||
rt.Create(&State{})
|
rt.StateCreate(&State{})
|
||||||
|
|
||||||
val := "__mutstat test"
|
val := "__mutstat test"
|
||||||
rt.Call(a.MutateState, &MutateStateArgs{
|
rt.Call(a.MutateState, &MutateStateArgs{
|
||||||
@ -72,7 +72,7 @@ func TestMutateStateAfterTransaction(t *testing.T) {
|
|||||||
var a Actor
|
var a Actor
|
||||||
|
|
||||||
rt.ExpectValidateCallerAny()
|
rt.ExpectValidateCallerAny()
|
||||||
rt.Create(&State{})
|
rt.StateCreate(&State{})
|
||||||
|
|
||||||
val := "__mutstat test"
|
val := "__mutstat test"
|
||||||
rt.Call(a.MutateState, &MutateStateArgs{
|
rt.Call(a.MutateState, &MutateStateArgs{
|
||||||
@ -99,7 +99,7 @@ func TestMutateStateReadonly(t *testing.T) {
|
|||||||
var a Actor
|
var a Actor
|
||||||
|
|
||||||
rt.ExpectValidateCallerAny()
|
rt.ExpectValidateCallerAny()
|
||||||
rt.Create(&State{})
|
rt.StateCreate(&State{})
|
||||||
|
|
||||||
val := "__mutstat test"
|
val := "__mutstat test"
|
||||||
rt.Call(a.MutateState, &MutateStateArgs{
|
rt.Call(a.MutateState, &MutateStateArgs{
|
||||||
|
@ -2375,7 +2375,12 @@ There are not yet any comments for this method.
|
|||||||
|
|
||||||
Perms: sign
|
Perms: sign
|
||||||
|
|
||||||
Inputs: `null`
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"t01234"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
```json
|
```json
|
||||||
@ -2399,6 +2404,7 @@ Perms: sign
|
|||||||
Inputs:
|
Inputs:
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
|
"t01234",
|
||||||
"t01234"
|
"t01234"
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
64
extern/sector-storage/manager.go
vendored
64
extern/sector-storage/manager.go
vendored
@ -216,46 +216,65 @@ func schedFetch(wf waitFunc, sector abi.SectorID, ft storiface.SectorFileType, p
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
|
func (m *Manager) readPiece(sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error {
|
||||||
|
return func(ctx context.Context, w Worker) error {
|
||||||
|
r, err := m.waitResult(ctx)(w.ReadPiece(ctx, sink, sector, offset, size))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*rok = r.(bool)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) {
|
||||||
|
|
||||||
|
// acquire a lock purely for reading unsealed sectors
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil {
|
if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTNone); err != nil {
|
||||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
returnErr = xerrors.Errorf("acquiring read sector lock: %w", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// passing 0 spt because we only need it when allowFetch is true
|
// passing 0 spt because we only need it when allowFetch is true
|
||||||
best, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false)
|
best, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
|
returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var readOk bool
|
foundUnsealed = len(best) > 0
|
||||||
readPiece := func(ctx context.Context, w Worker) error {
|
if foundUnsealed { // append to existing
|
||||||
r, err := m.waitResult(ctx)(w.ReadPiece(ctx, sink, sector, offset, size))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readOk = r.(bool)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var selector WorkerSelector
|
|
||||||
if len(best) == 0 { // new
|
|
||||||
selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing)
|
|
||||||
} else {
|
|
||||||
// There is unsealed sector, see if we can read from it
|
// There is unsealed sector, see if we can read from it
|
||||||
|
|
||||||
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(m.waitResult, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), readPiece)
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(m.waitResult, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||||
|
m.readPiece(sink, sector, offset, size, &readOk))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("reading piece from sealed sector: %w", err)
|
returnErr = xerrors.Errorf("reading piece from sealed sector: %w", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
|
||||||
|
foundUnsealed, readOk, selector, err := m.tryReadUnsealedPiece(ctx, sink, sector, offset, size)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if readOk {
|
if readOk {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil {
|
||||||
|
return xerrors.Errorf("acquiring unseal sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
unsealFetch := func(ctx context.Context, worker Worker) error {
|
unsealFetch := func(ctx context.Context, worker Worker) error {
|
||||||
@ -263,7 +282,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
|||||||
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
|
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(best) > 0 {
|
if foundUnsealed {
|
||||||
if _, err := m.waitResult(ctx)(worker.Fetch(ctx, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove)); err != nil {
|
if _, err := m.waitResult(ctx)(worker.Fetch(ctx, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove)); err != nil {
|
||||||
return xerrors.Errorf("copy unsealed sector data: %w", err)
|
return xerrors.Errorf("copy unsealed sector data: %w", err)
|
||||||
}
|
}
|
||||||
@ -284,7 +303,8 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
|||||||
|
|
||||||
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(m.waitResult, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), readPiece)
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(m.waitResult, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||||
|
m.readPiece(sink, sector, offset, size, &readOk))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("reading piece from sealed sector: %w", err)
|
return xerrors.Errorf("reading piece from sealed sector: %w", err)
|
||||||
}
|
}
|
||||||
|
6
extern/storage-sealing/fsm.go
vendored
6
extern/storage-sealing/fsm.go
vendored
@ -189,6 +189,12 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
|||||||
state.Log = append(state.Log, l)
|
state.Log = append(state.Log, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if m.notifee != nil {
|
||||||
|
defer func(before SectorInfo) {
|
||||||
|
m.notifee(before, *state)
|
||||||
|
}(*state) // take safe-ish copy of the before state (except for nested pointers)
|
||||||
|
}
|
||||||
|
|
||||||
p := fsmPlanners[state.State]
|
p := fsmPlanners[state.State]
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return nil, 0, xerrors.Errorf("planner for state %s not found", state.State)
|
return nil, 0, xerrors.Errorf("planner for state %s not found", state.State)
|
||||||
|
14
extern/storage-sealing/fsm_test.go
vendored
14
extern/storage-sealing/fsm_test.go
vendored
@ -27,6 +27,7 @@ type test struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestHappyPath(t *testing.T) {
|
func TestHappyPath(t *testing.T) {
|
||||||
|
var notif []struct{ before, after SectorInfo }
|
||||||
ma, _ := address.NewIDAddress(55151)
|
ma, _ := address.NewIDAddress(55151)
|
||||||
m := test{
|
m := test{
|
||||||
s: &Sealing{
|
s: &Sealing{
|
||||||
@ -34,6 +35,9 @@ func TestHappyPath(t *testing.T) {
|
|||||||
stats: SectorStats{
|
stats: SectorStats{
|
||||||
bySector: map[abi.SectorID]statSectorState{},
|
bySector: map[abi.SectorID]statSectorState{},
|
||||||
},
|
},
|
||||||
|
notifee: func(before, after SectorInfo) {
|
||||||
|
notif = append(notif, struct{ before, after SectorInfo }{before, after})
|
||||||
|
},
|
||||||
},
|
},
|
||||||
t: t,
|
t: t,
|
||||||
state: &SectorInfo{State: Packing},
|
state: &SectorInfo{State: Packing},
|
||||||
@ -68,6 +72,16 @@ func TestHappyPath(t *testing.T) {
|
|||||||
|
|
||||||
m.planSingle(SectorFinalized{})
|
m.planSingle(SectorFinalized{})
|
||||||
require.Equal(m.t, m.state.State, Proving)
|
require.Equal(m.t, m.state.State, Proving)
|
||||||
|
|
||||||
|
expected := []SectorState{Packing, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving}
|
||||||
|
for i, n := range notif {
|
||||||
|
if n.before.State != expected[i] {
|
||||||
|
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
|
||||||
|
}
|
||||||
|
if n.after.State != expected[i+1] {
|
||||||
|
t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSeedRevert(t *testing.T) {
|
func TestSeedRevert(t *testing.T) {
|
||||||
|
9
extern/storage-sealing/sealing.go
vendored
9
extern/storage-sealing/sealing.go
vendored
@ -61,6 +61,8 @@ type SealingAPI interface {
|
|||||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SectorStateNotifee func(before, after SectorInfo)
|
||||||
|
|
||||||
type Sealing struct {
|
type Sealing struct {
|
||||||
api SealingAPI
|
api SealingAPI
|
||||||
feeCfg FeeConfig
|
feeCfg FeeConfig
|
||||||
@ -79,6 +81,8 @@ type Sealing struct {
|
|||||||
upgradeLk sync.Mutex
|
upgradeLk sync.Mutex
|
||||||
toUpgrade map[abi.SectorNumber]struct{}
|
toUpgrade map[abi.SectorNumber]struct{}
|
||||||
|
|
||||||
|
notifee SectorStateNotifee
|
||||||
|
|
||||||
stats SectorStats
|
stats SectorStats
|
||||||
|
|
||||||
getConfig GetSealingConfigFunc
|
getConfig GetSealingConfigFunc
|
||||||
@ -101,7 +105,7 @@ type UnsealedSectorInfo struct {
|
|||||||
pieceSizes []abi.UnpaddedPieceSize
|
pieceSizes []abi.UnpaddedPieceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc) *Sealing {
|
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee) *Sealing {
|
||||||
s := &Sealing{
|
s := &Sealing{
|
||||||
api: api,
|
api: api,
|
||||||
feeCfg: fc,
|
feeCfg: fc,
|
||||||
@ -118,6 +122,9 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds
|
|||||||
},
|
},
|
||||||
|
|
||||||
toUpgrade: map[abi.SectorNumber]struct{}{},
|
toUpgrade: map[abi.SectorNumber]struct{}{},
|
||||||
|
|
||||||
|
notifee: notifee,
|
||||||
|
|
||||||
getConfig: gc,
|
getConfig: gc,
|
||||||
|
|
||||||
stats: SectorStats{
|
stats: SectorStats{
|
||||||
|
4
go.mod
4
go.mod
@ -32,11 +32,11 @@ require (
|
|||||||
github.com/filecoin-project/go-multistore v0.0.3
|
github.com/filecoin-project/go-multistore v0.0.3
|
||||||
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
|
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261
|
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200909080127-001afaca718c
|
github.com/filecoin-project/go-state-types v0.0.0-20200911004822-964d6c679cfc
|
||||||
github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370
|
github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370
|
||||||
github.com/filecoin-project/go-statestore v0.1.0
|
github.com/filecoin-project/go-statestore v0.1.0
|
||||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
||||||
github.com/filecoin-project/specs-actors v0.9.8
|
github.com/filecoin-project/specs-actors v0.9.10
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796
|
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.1
|
github.com/filecoin-project/test-vectors/schema v0.0.1
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||||
|
12
go.sum
12
go.sum
@ -91,7 +91,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||||
github.com/briandowns/spinner v1.11.1 h1:OixPqDEcX3juo5AjQZAnFPbeUA0jvkp2qzB5gOZJ/L0=
|
|
||||||
github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
|
github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
|
||||||
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
||||||
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||||
@ -241,10 +240,9 @@ github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h
|
|||||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200905071437-95828685f9df h1:m2esXSuGBkuXlRyCsl1a/7/FkFam63o1OzIgzaHtOfI=
|
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200905071437-95828685f9df/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
github.com/filecoin-project/go-state-types v0.0.0-20200905071437-95828685f9df/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200909080127-001afaca718c h1:HHRMFpU8OrODDUja5NmGWNBAVGoSy4MRjxgZa+a0qIw=
|
github.com/filecoin-project/go-state-types v0.0.0-20200911004822-964d6c679cfc h1:1vr/LoqGq5m5g37Q3sNSAjfwF1uJY0zmiHcvnxY6hik=
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200909080127-001afaca718c/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
github.com/filecoin-project/go-state-types v0.0.0-20200911004822-964d6c679cfc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||||
github.com/filecoin-project/go-statemachine v0.0.0-20200714194326-a77c3ae20989/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
github.com/filecoin-project/go-statemachine v0.0.0-20200714194326-a77c3ae20989/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||||
github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370 h1:Jbburj7Ih2iaJ/o5Q9A+EAeTabME6YII7FLi9SKUf5c=
|
github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370 h1:Jbburj7Ih2iaJ/o5Q9A+EAeTabME6YII7FLi9SKUf5c=
|
||||||
github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||||
@ -253,10 +251,9 @@ github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZO
|
|||||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg=
|
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg=
|
||||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8=
|
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8=
|
||||||
github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4=
|
github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4=
|
||||||
github.com/filecoin-project/specs-actors v0.9.7 h1:7PAZ8kdqwBdmgf/23FCkQZLCXcVu02XJrkpkhBikiA8=
|
|
||||||
github.com/filecoin-project/specs-actors v0.9.7/go.mod h1:wM2z+kwqYgXn5Z7scV1YHLyd1Q1cy0R8HfTIWQ0BFGU=
|
github.com/filecoin-project/specs-actors v0.9.7/go.mod h1:wM2z+kwqYgXn5Z7scV1YHLyd1Q1cy0R8HfTIWQ0BFGU=
|
||||||
github.com/filecoin-project/specs-actors v0.9.8 h1:45fnx/BsseFL3CtvSoR6CszFY26TFtsh9AHwCW2vkg8=
|
github.com/filecoin-project/specs-actors v0.9.10 h1:gU0TrRhgkCsBEOP42sGDE7RQuR0Cov9hJhBqq+RJmjU=
|
||||||
github.com/filecoin-project/specs-actors v0.9.8/go.mod h1:xFObDoWPySBNTNBrGXVVrutmgSZH/mMo46Q1bec/0hw=
|
github.com/filecoin-project/specs-actors v0.9.10/go.mod h1:czlvLQGEX0fjLLfdNHD7xLymy6L3n7aQzRWzsYGf+ys=
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 h1:dJsTPWpG2pcTeojO2pyn0c6l+x/3MZYCBgo/9d11JEk=
|
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 h1:dJsTPWpG2pcTeojO2pyn0c6l+x/3MZYCBgo/9d11JEk=
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.1 h1:5fNF76nl4qolEvcIsjc0kUADlTMVHO73tW4kXXPnsus=
|
github.com/filecoin-project/test-vectors/schema v0.0.1 h1:5fNF76nl4qolEvcIsjc0kUADlTMVHO73tW4kXXPnsus=
|
||||||
@ -506,7 +503,6 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28
|
|||||||
github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
|
github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
|
||||||
github.com/ipfs/go-graphsync v0.1.2 h1:25Ll9kIXCE+DY0dicvfS3KMw+U5sd01b/FJbA7KAbhg=
|
github.com/ipfs/go-graphsync v0.1.2 h1:25Ll9kIXCE+DY0dicvfS3KMw+U5sd01b/FJbA7KAbhg=
|
||||||
github.com/ipfs/go-graphsync v0.1.2/go.mod h1:sLXVXm1OxtE2XYPw62MuXCdAuNwkAdsbnfrmos5odbA=
|
github.com/ipfs/go-graphsync v0.1.2/go.mod h1:sLXVXm1OxtE2XYPw62MuXCdAuNwkAdsbnfrmos5odbA=
|
||||||
github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk=
|
|
||||||
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
|
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
|
||||||
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
|
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
|
||||||
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
|
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
|
||||||
|
136
journal/fs.go
Normal file
136
journal/fs.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
package journal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
|
)
|
||||||
|
|
||||||
|
const RFC3339nocolon = "2006-01-02T150405Z0700"
|
||||||
|
|
||||||
|
// fsJournal is a basic journal backed by files on a filesystem.
|
||||||
|
type fsJournal struct {
|
||||||
|
EventTypeRegistry
|
||||||
|
|
||||||
|
dir string
|
||||||
|
sizeLimit int64
|
||||||
|
|
||||||
|
fi *os.File
|
||||||
|
fSize int64
|
||||||
|
|
||||||
|
incoming chan *Event
|
||||||
|
|
||||||
|
closing chan struct{}
|
||||||
|
closed chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFSJournal constructs a rolling filesystem journal, with a default
|
||||||
|
// per-file size limit of 1GiB.
|
||||||
|
func OpenFSJournal(lr repo.LockedRepo, disabled DisabledEvents) (Journal, error) {
|
||||||
|
dir := filepath.Join(lr.Path(), "journal")
|
||||||
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to mk directory %s for file journal: %w", dir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &fsJournal{
|
||||||
|
EventTypeRegistry: NewEventTypeRegistry(disabled),
|
||||||
|
dir: dir,
|
||||||
|
sizeLimit: 1 << 30,
|
||||||
|
incoming: make(chan *Event, 32),
|
||||||
|
closing: make(chan struct{}),
|
||||||
|
closed: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := f.rollJournalFile(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
go f.runLoop()
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fsJournal) RecordEvent(evtType EventType, supplier func() interface{}) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
log.Warnf("recovered from panic while recording journal event; type=%s, err=%v", evtType, r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if !evtType.Enabled() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
je := &Event{
|
||||||
|
EventType: evtType,
|
||||||
|
Timestamp: build.Clock.Now(),
|
||||||
|
Data: supplier(),
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case f.incoming <- je:
|
||||||
|
case <-f.closing:
|
||||||
|
log.Warnw("journal closed but tried to log event", "event", je)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fsJournal) Close() error {
|
||||||
|
close(f.closing)
|
||||||
|
<-f.closed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fsJournal) putEvent(evt *Event) error {
|
||||||
|
b, err := json.Marshal(evt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, err := f.fi.Write(append(b, '\n'))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.fSize += int64(n)
|
||||||
|
|
||||||
|
if f.fSize >= f.sizeLimit {
|
||||||
|
_ = f.rollJournalFile()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fsJournal) rollJournalFile() error {
|
||||||
|
if f.fi != nil {
|
||||||
|
_ = f.fi.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
nfi, err := os.Create(filepath.Join(f.dir, fmt.Sprintf("lotus-journal-%s.ndjson", build.Clock.Now().Format(RFC3339nocolon))))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to open journal file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.fi = nfi
|
||||||
|
f.fSize = 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fsJournal) runLoop() {
|
||||||
|
defer close(f.closed)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case je := <-f.incoming:
|
||||||
|
if err := f.putEvent(je); err != nil {
|
||||||
|
log.Errorw("failed to write out journal event", "event", je, "err", err)
|
||||||
|
}
|
||||||
|
case <-f.closing:
|
||||||
|
_ = f.fi.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
9
journal/global.go
Normal file
9
journal/global.go
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
package journal
|
||||||
|
|
||||||
|
var (
|
||||||
|
// J is a globally accessible Journal. It starts being NilJournal, and early
|
||||||
|
// during the Lotus initialization routine, it is reset to whichever Journal
|
||||||
|
// is configured (by default, the filesystem journal). Components can safely
|
||||||
|
// record in the journal by calling: journal.J.RecordEvent(...).
|
||||||
|
J Journal = NilJournal() // nolint
|
||||||
|
)
|
@ -1,152 +0,0 @@
|
|||||||
package journal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
)
|
|
||||||
|
|
||||||
func InitializeSystemJournal(dir string) error {
|
|
||||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
j, err := OpenFSJournal(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
currentJournal = j
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Add(sys string, val interface{}) {
|
|
||||||
if currentJournal == nil {
|
|
||||||
log.Warn("no journal configured")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
currentJournal.AddEntry(sys, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
var log = logging.Logger("journal")
|
|
||||||
|
|
||||||
var currentJournal Journal
|
|
||||||
|
|
||||||
type Journal interface {
|
|
||||||
AddEntry(system string, obj interface{})
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// fsJournal is a basic journal backed by files on a filesystem
|
|
||||||
type fsJournal struct {
|
|
||||||
fi *os.File
|
|
||||||
fSize int64
|
|
||||||
|
|
||||||
journalDir string
|
|
||||||
|
|
||||||
incoming chan *JournalEntry
|
|
||||||
journalSizeLimit int64
|
|
||||||
|
|
||||||
closing chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func OpenFSJournal(dir string) (Journal, error) {
|
|
||||||
fsj := &fsJournal{
|
|
||||||
journalDir: dir,
|
|
||||||
incoming: make(chan *JournalEntry, 32),
|
|
||||||
journalSizeLimit: 1 << 30,
|
|
||||||
closing: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fsj.rollJournalFile(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
go fsj.runLoop()
|
|
||||||
|
|
||||||
return fsj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type JournalEntry struct {
|
|
||||||
System string
|
|
||||||
Timestamp time.Time
|
|
||||||
Val interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsj *fsJournal) putEntry(je *JournalEntry) error {
|
|
||||||
b, err := json.Marshal(je)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n, err := fsj.fi.Write(append(b, '\n'))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fsj.fSize += int64(n)
|
|
||||||
|
|
||||||
if fsj.fSize >= fsj.journalSizeLimit {
|
|
||||||
return fsj.rollJournalFile()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const RFC3339nocolon = "2006-01-02T150405Z0700"
|
|
||||||
|
|
||||||
func (fsj *fsJournal) rollJournalFile() error {
|
|
||||||
if fsj.fi != nil {
|
|
||||||
err := fsj.fi.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nfi, err := os.Create(filepath.Join(fsj.journalDir, fmt.Sprintf("lotus-journal-%s.ndjson", build.Clock.Now().Format(RFC3339nocolon))))
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to open journal file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fsj.fi = nfi
|
|
||||||
fsj.fSize = 0
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsj *fsJournal) runLoop() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case je := <-fsj.incoming:
|
|
||||||
if err := fsj.putEntry(je); err != nil {
|
|
||||||
log.Errorw("failed to write out journal entry", "entry", je, "err", err)
|
|
||||||
}
|
|
||||||
case <-fsj.closing:
|
|
||||||
if err := fsj.fi.Close(); err != nil {
|
|
||||||
log.Errorw("failed to close journal", "err", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsj *fsJournal) AddEntry(system string, obj interface{}) {
|
|
||||||
je := &JournalEntry{
|
|
||||||
System: system,
|
|
||||||
Timestamp: build.Clock.Now(),
|
|
||||||
Val: obj,
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case fsj.incoming <- je:
|
|
||||||
case <-fsj.closing:
|
|
||||||
log.Warnw("journal closed but tried to log event", "entry", je)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fsj *fsJournal) Close() error {
|
|
||||||
close(fsj.closing)
|
|
||||||
return nil
|
|
||||||
}
|
|
16
journal/nil.go
Normal file
16
journal/nil.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package journal
|
||||||
|
|
||||||
|
type nilJournal struct{}
|
||||||
|
|
||||||
|
// nilj is a singleton nil journal.
|
||||||
|
var nilj Journal = &nilJournal{}
|
||||||
|
|
||||||
|
func NilJournal() Journal {
|
||||||
|
return nilj
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *nilJournal) RegisterEventType(_, _ string) EventType { return EventType{} }
|
||||||
|
|
||||||
|
func (n *nilJournal) RecordEvent(_ EventType, _ func() interface{}) {}
|
||||||
|
|
||||||
|
func (n *nilJournal) Close() error { return nil }
|
57
journal/registry.go
Normal file
57
journal/registry.go
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
package journal
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// EventTypeRegistry is a component that constructs tracked EventType tokens,
|
||||||
|
// for usage with a Journal.
|
||||||
|
type EventTypeRegistry interface {
|
||||||
|
|
||||||
|
// RegisterEventType introduces a new event type to a journal, and
|
||||||
|
// returns an EventType token that components can later use to check whether
|
||||||
|
// journalling for that type is enabled/suppressed, and to tag journal
|
||||||
|
// entries appropriately.
|
||||||
|
RegisterEventType(system, event string) EventType
|
||||||
|
}
|
||||||
|
|
||||||
|
// eventTypeRegistry is an embeddable mixin that takes care of tracking disabled
|
||||||
|
// event types, and returning initialized/safe EventTypes when requested.
|
||||||
|
type eventTypeRegistry struct {
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
m map[string]EventType
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ EventTypeRegistry = (*eventTypeRegistry)(nil)
|
||||||
|
|
||||||
|
func NewEventTypeRegistry(disabled DisabledEvents) EventTypeRegistry {
|
||||||
|
ret := &eventTypeRegistry{
|
||||||
|
m: make(map[string]EventType, len(disabled)+32), // + extra capacity.
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, et := range disabled {
|
||||||
|
et.enabled, et.safe = false, true
|
||||||
|
ret.m[et.System+":"+et.Event] = et
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *eventTypeRegistry) RegisterEventType(system, event string) EventType {
|
||||||
|
d.Lock()
|
||||||
|
defer d.Unlock()
|
||||||
|
|
||||||
|
key := system + ":" + event
|
||||||
|
if et, ok := d.m[key]; ok {
|
||||||
|
return et
|
||||||
|
}
|
||||||
|
|
||||||
|
et := EventType{
|
||||||
|
System: system,
|
||||||
|
Event: event,
|
||||||
|
enabled: true,
|
||||||
|
safe: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
d.m[key] = et
|
||||||
|
return et
|
||||||
|
}
|
49
journal/registry_test.go
Normal file
49
journal/registry_test.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package journal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDisabledEvents(t *testing.T) {
|
||||||
|
req := require.New(t)
|
||||||
|
|
||||||
|
test := func(dis DisabledEvents) func(*testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
registry := NewEventTypeRegistry(dis)
|
||||||
|
|
||||||
|
reg1 := registry.RegisterEventType("system1", "disabled1")
|
||||||
|
reg2 := registry.RegisterEventType("system2", "disabled2")
|
||||||
|
|
||||||
|
req.False(reg1.Enabled())
|
||||||
|
req.False(reg2.Enabled())
|
||||||
|
req.True(reg1.safe)
|
||||||
|
req.True(reg2.safe)
|
||||||
|
|
||||||
|
reg3 := registry.RegisterEventType("system3", "enabled3")
|
||||||
|
req.True(reg3.Enabled())
|
||||||
|
req.True(reg3.safe)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("direct", test(DisabledEvents{
|
||||||
|
EventType{System: "system1", Event: "disabled1"},
|
||||||
|
EventType{System: "system2", Event: "disabled2"},
|
||||||
|
}))
|
||||||
|
|
||||||
|
dis, err := ParseDisabledEvents("system1:disabled1,system2:disabled2")
|
||||||
|
req.NoError(err)
|
||||||
|
|
||||||
|
t.Run("parsed", test(dis))
|
||||||
|
|
||||||
|
dis, err = ParseDisabledEvents(" system1:disabled1 , system2:disabled2 ")
|
||||||
|
req.NoError(err)
|
||||||
|
|
||||||
|
t.Run("parsed_spaces", test(dis))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseDisableEvents(t *testing.T) {
|
||||||
|
_, err := ParseDisabledEvents("system1:disabled1:failed,system2:disabled2")
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
102
journal/types.go
Normal file
102
journal/types.go
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
package journal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
logging "github.com/ipfs/go-log"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("journal")
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultDisabledEvents lists the journal events disabled by
|
||||||
|
// default, usually because they are considered noisy.
|
||||||
|
DefaultDisabledEvents = DisabledEvents{
|
||||||
|
EventType{System: "mpool", Event: "add"},
|
||||||
|
EventType{System: "mpool", Event: "remove"},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// DisabledEvents is the set of event types whose journaling is suppressed.
|
||||||
|
type DisabledEvents []EventType
|
||||||
|
|
||||||
|
// ParseDisabledEvents parses a string of the form: "system1:event1,system2:event2[,...]"
|
||||||
|
// into a DisabledEvents object, returning an error if the string failed to parse.
|
||||||
|
//
|
||||||
|
// It sanitizes strings via strings.TrimSpace.
|
||||||
|
func ParseDisabledEvents(s string) (DisabledEvents, error) {
|
||||||
|
s = strings.TrimSpace(s) // sanitize
|
||||||
|
evts := strings.Split(s, ",")
|
||||||
|
ret := make(DisabledEvents, 0, len(evts))
|
||||||
|
for _, evt := range evts {
|
||||||
|
evt = strings.TrimSpace(evt) // sanitize
|
||||||
|
s := strings.Split(evt, ":")
|
||||||
|
if len(s) != 2 {
|
||||||
|
return nil, fmt.Errorf("invalid event type: %s", s)
|
||||||
|
}
|
||||||
|
ret = append(ret, EventType{System: s[0], Event: s[1]})
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EventType represents the signature of an event.
|
||||||
|
type EventType struct {
|
||||||
|
System string
|
||||||
|
Event string
|
||||||
|
|
||||||
|
// enabled stores whether this event type is enabled.
|
||||||
|
enabled bool
|
||||||
|
|
||||||
|
// safe is a sentinel marker that's set to true if this EventType was
|
||||||
|
// constructed correctly (via Journal#RegisterEventType).
|
||||||
|
safe bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (et EventType) String() string {
|
||||||
|
return et.System + ":" + et.Event
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled returns whether this event type is enabled in the journaling
|
||||||
|
// subsystem. Users are advised to check this before actually attempting to
|
||||||
|
// add a journal entry, as it helps bypass object construction for events that
|
||||||
|
// would be discarded anyway.
|
||||||
|
//
|
||||||
|
// All event types are enabled by default, and specific event types can only
|
||||||
|
// be disabled at Journal construction time.
|
||||||
|
func (et EventType) Enabled() bool {
|
||||||
|
return et.safe && et.enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// Journal represents an audit trail of system actions.
|
||||||
|
//
|
||||||
|
// Every entry is tagged with a timestamp, a system name, and an event name.
|
||||||
|
// The supplied data can be any type, as long as it is JSON serializable,
|
||||||
|
// including structs, map[string]interface{}, or primitive types.
|
||||||
|
//
|
||||||
|
// For cleanliness and type safety, we recommend to use typed events. See the
|
||||||
|
// *Evt struct types in this package for more info.
|
||||||
|
type Journal interface {
|
||||||
|
EventTypeRegistry
|
||||||
|
|
||||||
|
// RecordEvent records this event to the journal, if and only if the
|
||||||
|
// EventType is enabled. If so, it calls the supplier function to obtain
|
||||||
|
// the payload to record.
|
||||||
|
//
|
||||||
|
// Implementations MUST recover from panics raised by the supplier function.
|
||||||
|
RecordEvent(evtType EventType, supplier func() interface{})
|
||||||
|
|
||||||
|
// Close closes this journal for further writing.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event represents a journal entry.
|
||||||
|
//
|
||||||
|
// See godocs on Journal for more information.
|
||||||
|
type Event struct {
|
||||||
|
EventType
|
||||||
|
|
||||||
|
Timestamp time.Time
|
||||||
|
Data interface{}
|
||||||
|
}
|
76
markets/journal.go
Normal file
76
markets/journal.go
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
package markets
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StorageClientEvt struct {
|
||||||
|
Event string
|
||||||
|
Deal storagemarket.ClientDeal
|
||||||
|
}
|
||||||
|
|
||||||
|
type StorageProviderEvt struct {
|
||||||
|
Event string
|
||||||
|
Deal storagemarket.MinerDeal
|
||||||
|
}
|
||||||
|
|
||||||
|
type RetrievalClientEvt struct {
|
||||||
|
Event string
|
||||||
|
Deal retrievalmarket.ClientDealState
|
||||||
|
}
|
||||||
|
|
||||||
|
type RetrievalProviderEvt struct {
|
||||||
|
Event string
|
||||||
|
Deal retrievalmarket.ProviderDealState
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageClientJournaler records journal events from the storage client.
|
||||||
|
func StorageClientJournaler(evtType journal.EventType) func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) {
|
||||||
|
return func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) {
|
||||||
|
journal.J.RecordEvent(evtType, func() interface{} {
|
||||||
|
return StorageClientEvt{
|
||||||
|
Event: storagemarket.ClientEvents[event],
|
||||||
|
Deal: deal,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageProviderJournaler records journal events from the storage provider.
|
||||||
|
func StorageProviderJournaler(evtType journal.EventType) func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) {
|
||||||
|
return func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) {
|
||||||
|
journal.J.RecordEvent(evtType, func() interface{} {
|
||||||
|
return StorageProviderEvt{
|
||||||
|
Event: storagemarket.ProviderEvents[event],
|
||||||
|
Deal: deal,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrievalClientJournaler records journal events from the retrieval client.
|
||||||
|
func RetrievalClientJournaler(evtType journal.EventType) func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) {
|
||||||
|
return func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) {
|
||||||
|
journal.J.RecordEvent(evtType, func() interface{} {
|
||||||
|
return RetrievalClientEvt{
|
||||||
|
Event: retrievalmarket.ClientEvents[event],
|
||||||
|
Deal: deal,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrievalProviderJournaler records journal events from the retrieval provider.
|
||||||
|
func RetrievalProviderJournaler(evtType journal.EventType) func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) {
|
||||||
|
return func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) {
|
||||||
|
journal.J.RecordEvent(evtType, func() interface{} {
|
||||||
|
return RetrievalProviderEvt{
|
||||||
|
Event: retrievalmarket.ProviderEvents[event],
|
||||||
|
Deal: deal,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -80,7 +80,7 @@ func (rcn *retrievalClientNode) WaitForPaymentChannelReady(ctx context.Context,
|
|||||||
|
|
||||||
func (rcn *retrievalClientNode) CheckAvailableFunds(ctx context.Context, paymentChannel address.Address) (retrievalmarket.ChannelAvailableFunds, error) {
|
func (rcn *retrievalClientNode) CheckAvailableFunds(ctx context.Context, paymentChannel address.Address) (retrievalmarket.ChannelAvailableFunds, error) {
|
||||||
|
|
||||||
channelAvailableFunds, err := rcn.payAPI.PaychAvailableFunds(paymentChannel)
|
channelAvailableFunds, err := rcn.payAPI.PaychAvailableFunds(ctx, paymentChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return retrievalmarket.ChannelAvailableFunds{}, err
|
return retrievalmarket.ChannelAvailableFunds{}, err
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,11 @@ import (
|
|||||||
|
|
||||||
var log = logging.Logger("miner")
|
var log = logging.Logger("miner")
|
||||||
|
|
||||||
|
// Journal event types.
|
||||||
|
const (
|
||||||
|
evtTypeBlockMined = iota
|
||||||
|
)
|
||||||
|
|
||||||
// returns a callback reporting whether we mined a blocks in this round
|
// returns a callback reporting whether we mined a blocks in this round
|
||||||
type waitFunc func(ctx context.Context, baseTime uint64) (func(bool, abi.ChainEpoch, error), abi.ChainEpoch, error)
|
type waitFunc func(ctx context.Context, baseTime uint64) (func(bool, abi.ChainEpoch, error), abi.ChainEpoch, error)
|
||||||
|
|
||||||
@ -68,6 +73,9 @@ func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address,
|
|||||||
|
|
||||||
sf: sf,
|
sf: sf,
|
||||||
minedBlockHeights: arc,
|
minedBlockHeights: arc,
|
||||||
|
evtTypes: [...]journal.EventType{
|
||||||
|
evtTypeBlockMined: journal.J.RegisterEventType("miner", "block_mined"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,6 +95,8 @@ type Miner struct {
|
|||||||
|
|
||||||
sf *slashfilter.SlashFilter
|
sf *slashfilter.SlashFilter
|
||||||
minedBlockHeights *lru.ARCCache
|
minedBlockHeights *lru.ARCCache
|
||||||
|
|
||||||
|
evtTypes [1]journal.EventType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Miner) Address() address.Address {
|
func (m *Miner) Address() address.Address {
|
||||||
@ -220,12 +230,14 @@ func (m *Miner) mine(ctx context.Context) {
|
|||||||
onDone(b != nil, h, nil)
|
onDone(b != nil, h, nil)
|
||||||
|
|
||||||
if b != nil {
|
if b != nil {
|
||||||
journal.Add("blockMined", map[string]interface{}{
|
journal.J.RecordEvent(m.evtTypes[evtTypeBlockMined], func() interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
"parents": base.TipSet.Cids(),
|
"parents": base.TipSet.Cids(),
|
||||||
"nulls": base.NullRounds,
|
"nulls": base.NullRounds,
|
||||||
"epoch": b.Header.Height,
|
"epoch": b.Header.Height,
|
||||||
"timestamp": b.Header.Timestamp,
|
"timestamp": b.Header.Timestamp,
|
||||||
"cid": b.Header.Cid(),
|
"cid": b.Header.Cid(),
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
btime := time.Unix(int64(b.Header.Timestamp), 0)
|
btime := time.Unix(int64(b.Header.Timestamp), 0)
|
||||||
|
@ -3,6 +3,7 @@ package node
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log"
|
logging "github.com/ipfs/go-log"
|
||||||
@ -45,6 +46,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||||
@ -68,6 +70,10 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// EnvJournalDisabledEvents is the environment variable through which disabled
|
||||||
|
// journal events can be customized.
|
||||||
|
const EnvJournalDisabledEvents = "LOTUS_JOURNAL_DISABLED_EVENTS"
|
||||||
|
|
||||||
//nolint:deadcode,varcheck
|
//nolint:deadcode,varcheck
|
||||||
var log = logging.Logger("builder")
|
var log = logging.Logger("builder")
|
||||||
|
|
||||||
@ -92,11 +98,16 @@ var (
|
|||||||
|
|
||||||
type invoke int
|
type invoke int
|
||||||
|
|
||||||
|
// Invokes are called in the order they are defined.
|
||||||
//nolint:golint
|
//nolint:golint
|
||||||
const (
|
const (
|
||||||
|
// InitJournal at position 0 initializes the journal global var as soon as
|
||||||
|
// the system starts, so that it's available for all other components.
|
||||||
|
InitJournalKey = invoke(iota)
|
||||||
|
|
||||||
// libp2p
|
// libp2p
|
||||||
|
|
||||||
PstoreAddSelfKeysKey = invoke(iota)
|
PstoreAddSelfKeysKey
|
||||||
StartListeningKey
|
StartListeningKey
|
||||||
BootstrapKey
|
BootstrapKey
|
||||||
|
|
||||||
@ -124,7 +135,6 @@ const (
|
|||||||
HeadMetricsKey
|
HeadMetricsKey
|
||||||
SettlePaymentChannelsKey
|
SettlePaymentChannelsKey
|
||||||
RunPeerTaggerKey
|
RunPeerTaggerKey
|
||||||
JournalKey
|
|
||||||
|
|
||||||
SetApiEndpointKey
|
SetApiEndpointKey
|
||||||
|
|
||||||
@ -152,11 +162,25 @@ type Settings struct {
|
|||||||
|
|
||||||
func defaults() []Option {
|
func defaults() []Option {
|
||||||
return []Option{
|
return []Option{
|
||||||
|
// global system journal.
|
||||||
|
Override(new(journal.DisabledEvents), func() journal.DisabledEvents {
|
||||||
|
if env, ok := os.LookupEnv(EnvJournalDisabledEvents); ok {
|
||||||
|
if ret, err := journal.ParseDisabledEvents(env); err == nil {
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// fallback if env variable is not set, or if it failed to parse.
|
||||||
|
return journal.DefaultDisabledEvents
|
||||||
|
}),
|
||||||
|
Override(new(journal.Journal), modules.OpenFilesystemJournal),
|
||||||
|
Override(InitJournalKey, func(j journal.Journal) {
|
||||||
|
journal.J = j // eagerly sets the global journal through fx.Invoke.
|
||||||
|
}),
|
||||||
|
|
||||||
Override(new(helpers.MetricsCtx), context.Background),
|
Override(new(helpers.MetricsCtx), context.Background),
|
||||||
Override(new(record.Validator), modules.RecordValidator),
|
Override(new(record.Validator), modules.RecordValidator),
|
||||||
Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)),
|
Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)),
|
||||||
Override(new(dtypes.ShutdownChan), make(chan struct{})),
|
Override(new(dtypes.ShutdownChan), make(chan struct{})),
|
||||||
Override(JournalKey, modules.SetupJournal),
|
|
||||||
|
|
||||||
// Filecoin modules
|
// Filecoin modules
|
||||||
|
|
||||||
@ -243,6 +267,9 @@ func Online() Option {
|
|||||||
Override(new(dtypes.ChainBlockService), modules.ChainBlockService),
|
Override(new(dtypes.ChainBlockService), modules.ChainBlockService),
|
||||||
|
|
||||||
// Filecoin services
|
// Filecoin services
|
||||||
|
// We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value.
|
||||||
|
// It will be called implicitly by the Syncer constructor.
|
||||||
|
Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }),
|
||||||
Override(new(*chain.Syncer), modules.NewSyncer),
|
Override(new(*chain.Syncer), modules.NewSyncer),
|
||||||
Override(new(exchange.Client), exchange.NewClient),
|
Override(new(exchange.Client), exchange.NewClient),
|
||||||
Override(new(*messagepool.MessagePool), modules.MessagePool),
|
Override(new(*messagepool.MessagePool), modules.MessagePool),
|
||||||
@ -477,12 +504,18 @@ func Repo(r repo.Repo) Option {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func FullAPI(out *api.FullNode) Option {
|
func FullAPI(out *api.FullNode) Option {
|
||||||
return func(s *Settings) error {
|
return Options(
|
||||||
|
func(s *Settings) error {
|
||||||
|
s.nodeType = repo.FullNode
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
func(s *Settings) error {
|
||||||
resAPI := &impl.FullNodeAPI{}
|
resAPI := &impl.FullNodeAPI{}
|
||||||
s.invokes[ExtractApiKey] = fx.Extract(resAPI)
|
s.invokes[ExtractApiKey] = fx.Extract(resAPI)
|
||||||
*out = resAPI
|
*out = resAPI
|
||||||
return nil
|
return nil
|
||||||
}
|
},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
type StopFunc func(context.Context) error
|
type StopFunc func(context.Context) error
|
||||||
@ -492,7 +525,6 @@ func New(ctx context.Context, opts ...Option) (StopFunc, error) {
|
|||||||
settings := Settings{
|
settings := Settings{
|
||||||
modules: map[interface{}]fx.Option{},
|
modules: map[interface{}]fx.Option{},
|
||||||
invokes: make([]fx.Option, _nInvokes),
|
invokes: make([]fx.Option, _nInvokes),
|
||||||
nodeType: repo.FullNode,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// apply module options in the right order
|
// apply module options in the right order
|
||||||
|
@ -300,7 +300,7 @@ func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.Nod
|
|||||||
return nil, nil, xerrors.Errorf("parsing int64: %w", err)
|
return nil, nil, xerrors.Errorf("parsing int64: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ik := adt.IntKey(i)
|
ik := abi.IntKey(i)
|
||||||
|
|
||||||
names[0] = "@H:" + ik.Key()
|
names[0] = "@H:" + ik.Key()
|
||||||
}
|
}
|
||||||
@ -311,7 +311,7 @@ func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.Nod
|
|||||||
return nil, nil, xerrors.Errorf("parsing uint64: %w", err)
|
return nil, nil, xerrors.Errorf("parsing uint64: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ik := adt.UIntKey(i)
|
ik := abi.UIntKey(i)
|
||||||
|
|
||||||
names[0] = "@H:" + ik.Key()
|
names[0] = "@H:" + ik.Key()
|
||||||
}
|
}
|
||||||
|
@ -509,7 +509,7 @@ func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSet
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if found, err := locked.Get(adt.AddrKey(a), &lk); err != nil {
|
if found, err := locked.Get(abi.AddrKey(a), &lk); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if !found {
|
} else if !found {
|
||||||
return fmt.Errorf("locked funds not found")
|
return fmt.Errorf("locked funds not found")
|
||||||
@ -604,7 +604,7 @@ func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid.
|
|||||||
return xerrors.Errorf("address in state tree was not valid: %w", err)
|
return xerrors.Errorf("address in state tree was not valid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
found, err := oh.Get(adt.AddrKey(addr), &ocval)
|
found, err := oh.Get(abi.AddrKey(addr), &ocval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -883,9 +883,8 @@ func (a *StateAPI) MsigGetAvailableBalance(ctx context.Context, addr address.Add
|
|||||||
return act.Balance, nil
|
return act.Balance, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
minBalance := types.BigDiv(st.InitialBalance, types.NewInt(uint64(st.UnlockDuration)))
|
al := st.AmountLocked(offset)
|
||||||
minBalance = types.BigMul(minBalance, types.NewInt(uint64(offset)))
|
return types.BigSub(act.Balance, al), nil
|
||||||
return types.BigSub(act.Balance, minBalance), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *StateAPI) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) {
|
func (a *StateAPI) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) {
|
||||||
@ -1150,7 +1149,7 @@ func (a *StateAPI) StateVerifiedClientStatus(ctx context.Context, addr address.A
|
|||||||
}
|
}
|
||||||
|
|
||||||
var dcap verifreg.DataCap
|
var dcap verifreg.DataCap
|
||||||
if found, err := vh.Get(adt.AddrKey(aid), &dcap); err != nil {
|
if found, err := vh.Get(abi.AddrKey(aid), &dcap); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if !found {
|
} else if !found {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -39,11 +39,11 @@ func (a *PaychAPI) PaychGet(ctx context.Context, from, to address.Address, amt t
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *PaychAPI) PaychAvailableFunds(ch address.Address) (*api.ChannelAvailableFunds, error) {
|
func (a *PaychAPI) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) {
|
||||||
return a.PaychMgr.AvailableFunds(ch)
|
return a.PaychMgr.AvailableFunds(ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *PaychAPI) PaychAvailableFundsByFromTo(from, to address.Address) (*api.ChannelAvailableFunds, error) {
|
func (a *PaychAPI) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) {
|
||||||
return a.PaychMgr.AvailableFundsByFromTo(from, to)
|
return a.PaychMgr.AvailableFundsByFromTo(from, to)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,8 +163,31 @@ func NetworkName(mctx helpers.MetricsCtx, lc fx.Lifecycle, cs *store.ChainStore,
|
|||||||
return netName, err
|
return netName, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSyncer(lc fx.Lifecycle, ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, h host.Host, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*chain.Syncer, error) {
|
type SyncerParams struct {
|
||||||
syncer, err := chain.NewSyncer(ds, sm, exchange, h.ConnManager(), h.ID(), beacon, verifier)
|
fx.In
|
||||||
|
|
||||||
|
Lifecycle fx.Lifecycle
|
||||||
|
MetadataDS dtypes.MetadataDS
|
||||||
|
StateManager *stmgr.StateManager
|
||||||
|
ChainXchg exchange.Client
|
||||||
|
SyncMgrCtor chain.SyncManagerCtor
|
||||||
|
Host host.Host
|
||||||
|
Beacon beacon.Schedule
|
||||||
|
Verifier ffiwrapper.Verifier
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSyncer(params SyncerParams) (*chain.Syncer, error) {
|
||||||
|
var (
|
||||||
|
lc = params.Lifecycle
|
||||||
|
ds = params.MetadataDS
|
||||||
|
sm = params.StateManager
|
||||||
|
ex = params.ChainXchg
|
||||||
|
smCtor = params.SyncMgrCtor
|
||||||
|
h = params.Host
|
||||||
|
b = params.Beacon
|
||||||
|
v = params.Verifier
|
||||||
|
)
|
||||||
|
syncer, err := chain.NewSyncer(ds, sm, ex, smCtor, h.ConnManager(), h.ID(), b, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,9 @@ import (
|
|||||||
"github.com/ipfs/go-datastore/namespace"
|
"github.com/ipfs/go-datastore/namespace"
|
||||||
"github.com/libp2p/go-libp2p-core/host"
|
"github.com/libp2p/go-libp2p-core/host"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/markets"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||||
"github.com/filecoin-project/lotus/node/impl/full"
|
"github.com/filecoin-project/lotus/node/impl/full"
|
||||||
@ -119,6 +121,10 @@ func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, md
|
|||||||
lc.Append(fx.Hook{
|
lc.Append(fx.Hook{
|
||||||
OnStart: func(ctx context.Context) error {
|
OnStart: func(ctx context.Context) error {
|
||||||
c.SubscribeToEvents(marketevents.StorageClientLogger)
|
c.SubscribeToEvents(marketevents.StorageClientLogger)
|
||||||
|
|
||||||
|
evtType := journal.J.RegisterEventType("markets/storage/client", "state_change")
|
||||||
|
c.SubscribeToEvents(markets.StorageClientJournaler(evtType))
|
||||||
|
|
||||||
return c.Start(ctx)
|
return c.Start(ctx)
|
||||||
},
|
},
|
||||||
OnStop: func(context.Context) error {
|
OnStop: func(context.Context) error {
|
||||||
@ -140,6 +146,10 @@ func RetrievalClient(lc fx.Lifecycle, h host.Host, mds dtypes.ClientMultiDstore,
|
|||||||
lc.Append(fx.Hook{
|
lc.Append(fx.Hook{
|
||||||
OnStart: func(ctx context.Context) error {
|
OnStart: func(ctx context.Context) error {
|
||||||
client.SubscribeToEvents(marketevents.RetrievalClientLogger)
|
client.SubscribeToEvents(marketevents.RetrievalClientLogger)
|
||||||
|
|
||||||
|
evtType := journal.J.RegisterEventType("markets/retrieval/client", "state_change")
|
||||||
|
client.SubscribeToEvents(markets.RetrievalClientJournaler(evtType))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
@ -20,7 +19,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/api/apistruct"
|
"github.com/filecoin-project/lotus/api/apistruct"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/journal"
|
|
||||||
"github.com/filecoin-project/lotus/lib/addrutil"
|
"github.com/filecoin-project/lotus/lib/addrutil"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
@ -107,7 +105,3 @@ func DrandBootstrap(ds dtypes.DrandSchedule) (dtypes.DrandBootstrap, error) {
|
|||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupJournal(lr repo.LockedRepo) error {
|
|
||||||
return journal.InitializeSystemJournal(filepath.Join(lr.Path(), "journal"))
|
|
||||||
}
|
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package modules
|
package modules
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
"github.com/ipfs/go-datastore/namespace"
|
"github.com/ipfs/go-datastore/namespace"
|
||||||
eventbus "github.com/libp2p/go-eventbus"
|
eventbus "github.com/libp2p/go-eventbus"
|
||||||
@ -22,10 +24,12 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/sub"
|
"github.com/filecoin-project/lotus/chain/sub"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||||
"github.com/filecoin-project/lotus/node/hello"
|
"github.com/filecoin-project/lotus/node/hello"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello.Service) error {
|
func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello.Service) error {
|
||||||
@ -150,3 +154,16 @@ func RandomSchedule(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Sche
|
|||||||
|
|
||||||
return shd, nil
|
return shd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func OpenFilesystemJournal(lr repo.LockedRepo, lc fx.Lifecycle, disabled journal.DisabledEvents) (journal.Journal, error) {
|
||||||
|
jrnl, err := journal.OpenFSJournal(lr, disabled)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lc.Append(fx.Hook{
|
||||||
|
OnStop: func(_ context.Context) error { return jrnl.Close() },
|
||||||
|
})
|
||||||
|
|
||||||
|
return jrnl, err
|
||||||
|
}
|
||||||
|
@ -50,6 +50,8 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
|
"github.com/filecoin-project/lotus/markets"
|
||||||
|
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -143,8 +145,34 @@ func SectorIDCounter(ds dtypes.MetadataDS) sealing.SectorIDCounter {
|
|||||||
return &sidsc{sc}
|
return &sidsc{sc}
|
||||||
}
|
}
|
||||||
|
|
||||||
func StorageMiner(fc config.MinerFeeConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, api lapi.FullNode, h host.Host, ds dtypes.MetadataDS, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc) (*storage.Miner, error) {
|
type StorageMinerParams struct {
|
||||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, api lapi.FullNode, h host.Host, ds dtypes.MetadataDS, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc) (*storage.Miner, error) {
|
fx.In
|
||||||
|
|
||||||
|
Lifecycle fx.Lifecycle
|
||||||
|
MetricsCtx helpers.MetricsCtx
|
||||||
|
API lapi.FullNode
|
||||||
|
Host host.Host
|
||||||
|
MetadataDS dtypes.MetadataDS
|
||||||
|
Sealer sectorstorage.SectorManager
|
||||||
|
SectorIDCounter sealing.SectorIDCounter
|
||||||
|
Verifier ffiwrapper.Verifier
|
||||||
|
GetSealingConfigFn dtypes.GetSealingConfigFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*storage.Miner, error) {
|
||||||
|
return func(params StorageMinerParams) (*storage.Miner, error) {
|
||||||
|
var (
|
||||||
|
ds = params.MetadataDS
|
||||||
|
mctx = params.MetricsCtx
|
||||||
|
lc = params.Lifecycle
|
||||||
|
api = params.API
|
||||||
|
sealer = params.Sealer
|
||||||
|
h = params.Host
|
||||||
|
sc = params.SectorIDCounter
|
||||||
|
verif = params.Verifier
|
||||||
|
gsd = params.GetSealingConfigFn
|
||||||
|
)
|
||||||
|
|
||||||
maddr, err := minerAddrFromDS(ds)
|
maddr, err := minerAddrFromDS(ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -188,6 +216,10 @@ func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.Retrieva
|
|||||||
lc.Append(fx.Hook{
|
lc.Append(fx.Hook{
|
||||||
OnStart: func(context.Context) error {
|
OnStart: func(context.Context) error {
|
||||||
m.SubscribeToEvents(marketevents.RetrievalProviderLogger)
|
m.SubscribeToEvents(marketevents.RetrievalProviderLogger)
|
||||||
|
|
||||||
|
evtType := journal.J.RegisterEventType("markets/retrieval/provider", "state_change")
|
||||||
|
m.SubscribeToEvents(markets.RetrievalProviderJournaler(evtType))
|
||||||
|
|
||||||
return m.Start()
|
return m.Start()
|
||||||
},
|
},
|
||||||
OnStop: func(context.Context) error {
|
OnStop: func(context.Context) error {
|
||||||
@ -202,6 +234,10 @@ func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h sto
|
|||||||
lc.Append(fx.Hook{
|
lc.Append(fx.Hook{
|
||||||
OnStart: func(context.Context) error {
|
OnStart: func(context.Context) error {
|
||||||
h.SubscribeToEvents(marketevents.StorageProviderLogger)
|
h.SubscribeToEvents(marketevents.StorageProviderLogger)
|
||||||
|
|
||||||
|
evtType := journal.J.RegisterEventType("markets/storage/provider", "state_change")
|
||||||
|
h.SubscribeToEvents(markets.StorageProviderJournaler(evtType))
|
||||||
|
|
||||||
return h.Start(ctx)
|
return h.Start(ctx)
|
||||||
},
|
},
|
||||||
OnStop: func(context.Context) error {
|
OnStop: func(context.Context) error {
|
||||||
|
@ -195,7 +195,7 @@ func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr a
|
|||||||
}
|
}
|
||||||
|
|
||||||
var pci miner.SectorPreCommitOnChainInfo
|
var pci miner.SectorPreCommitOnChainInfo
|
||||||
ok, err := precommits.Get(adt.UIntKey(uint64(sectorNumber)), &pci)
|
ok, err := precommits.Get(abi.UIntKey(uint64(sectorNumber)), &pci)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
)
|
)
|
||||||
@ -50,6 +51,17 @@ type Miner struct {
|
|||||||
|
|
||||||
getSealConfig dtypes.GetSealingConfigFunc
|
getSealConfig dtypes.GetSealingConfigFunc
|
||||||
sealing *sealing.Sealing
|
sealing *sealing.Sealing
|
||||||
|
|
||||||
|
sealingEvtType journal.EventType
|
||||||
|
}
|
||||||
|
|
||||||
|
// SealingStateEvt is a journal event that records a sector state transition.
|
||||||
|
type SealingStateEvt struct {
|
||||||
|
SectorNumber abi.SectorNumber
|
||||||
|
SectorType abi.RegisteredSealProof
|
||||||
|
From sealing.SectorState
|
||||||
|
After sealing.SectorState
|
||||||
|
Error string
|
||||||
}
|
}
|
||||||
|
|
||||||
type storageMinerApi interface {
|
type storageMinerApi interface {
|
||||||
@ -106,6 +118,7 @@ func NewMiner(api storageMinerApi, maddr, worker address.Address, h host.Host, d
|
|||||||
maddr: maddr,
|
maddr: maddr,
|
||||||
worker: worker,
|
worker: worker,
|
||||||
getSealConfig: gsd,
|
getSealConfig: gsd,
|
||||||
|
sealingEvtType: journal.J.RegisterEventType("storage", "sealing_states"),
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
@ -129,13 +142,25 @@ func (m *Miner) Run(ctx context.Context) error {
|
|||||||
evts := events.NewEvents(ctx, m.api)
|
evts := events.NewEvents(ctx, m.api)
|
||||||
adaptedAPI := NewSealingAPIAdapter(m.api)
|
adaptedAPI := NewSealingAPIAdapter(m.api)
|
||||||
pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, miner.MaxSectorExpirationExtension-(miner.WPoStProvingPeriod*2), md.PeriodStart%miner.WPoStProvingPeriod)
|
pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, miner.MaxSectorExpirationExtension-(miner.WPoStProvingPeriod*2), md.PeriodStart%miner.WPoStProvingPeriod)
|
||||||
m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig))
|
m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications)
|
||||||
|
|
||||||
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function
|
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Miner) handleSealingNotifications(before, after sealing.SectorInfo) {
|
||||||
|
journal.J.RecordEvent(m.sealingEvtType, func() interface{} {
|
||||||
|
return SealingStateEvt{
|
||||||
|
SectorNumber: before.SectorNumber,
|
||||||
|
SectorType: before.SectorType,
|
||||||
|
From: before.State,
|
||||||
|
After: after.State,
|
||||||
|
Error: after.LastErr,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Miner) Stop(ctx context.Context) error {
|
func (m *Miner) Stop(ctx context.Context) error {
|
||||||
return m.sealing.Stop(ctx)
|
return m.sealing.Stop(ctx)
|
||||||
}
|
}
|
||||||
|
75
storage/wdpost_journal.go
Normal file
75
storage/wdpost_journal.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SchedulerState defines the possible states in which the scheduler could be,
|
||||||
|
// for the purposes of journalling.
|
||||||
|
type SchedulerState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SchedulerStateStarted gets recorded when a WdPoSt cycle for an
|
||||||
|
// epoch begins.
|
||||||
|
SchedulerStateStarted = SchedulerState("started")
|
||||||
|
// SchedulerStateAborted gets recorded when a WdPoSt cycle for an
|
||||||
|
// epoch is aborted, normally because of a chain reorg or advancement.
|
||||||
|
SchedulerStateAborted = SchedulerState("aborted")
|
||||||
|
// SchedulerStateFaulted gets recorded when a WdPoSt cycle for an
|
||||||
|
// epoch terminates abnormally, in which case the error is also recorded.
|
||||||
|
SchedulerStateFaulted = SchedulerState("faulted")
|
||||||
|
// SchedulerStateSucceeded gets recorded when a WdPoSt cycle for an
|
||||||
|
// epoch ends successfully.
|
||||||
|
SchedulerStateSucceeded = SchedulerState("succeeded")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Journal event types.
|
||||||
|
const (
|
||||||
|
evtTypeWdPoStScheduler = iota
|
||||||
|
evtTypeWdPoStProofs
|
||||||
|
evtTypeWdPoStRecoveries
|
||||||
|
evtTypeWdPoStFaults
|
||||||
|
)
|
||||||
|
|
||||||
|
// evtCommon is a common set of attributes for Windowed PoSt journal events.
|
||||||
|
type evtCommon struct {
|
||||||
|
Deadline *dline.Info
|
||||||
|
Height abi.ChainEpoch
|
||||||
|
TipSet []cid.Cid
|
||||||
|
Error error `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WdPoStSchedulerEvt is the journal event that gets recorded on scheduler
|
||||||
|
// actions.
|
||||||
|
type WdPoStSchedulerEvt struct {
|
||||||
|
evtCommon
|
||||||
|
State SchedulerState
|
||||||
|
}
|
||||||
|
|
||||||
|
// WdPoStProofsProcessedEvt is the journal event that gets recorded when
|
||||||
|
// Windowed PoSt proofs have been processed.
|
||||||
|
type WdPoStProofsProcessedEvt struct {
|
||||||
|
evtCommon
|
||||||
|
Partitions []miner.PoStPartition
|
||||||
|
MessageCID cid.Cid `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WdPoStRecoveriesProcessedEvt is the journal event that gets recorded when
|
||||||
|
// Windowed PoSt recoveries have been processed.
|
||||||
|
type WdPoStRecoveriesProcessedEvt struct {
|
||||||
|
evtCommon
|
||||||
|
Declarations []miner.RecoveryDeclaration
|
||||||
|
MessageCID cid.Cid `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WdPoStFaultsProcessedEvt is the journal event that gets recorded when
|
||||||
|
// Windowed PoSt faults have been processed.
|
||||||
|
type WdPoStFaultsProcessedEvt struct {
|
||||||
|
evtCommon
|
||||||
|
Declarations []miner.FaultDeclaration
|
||||||
|
MessageCID cid.Cid `json:",omitempty"`
|
||||||
|
}
|
@ -3,7 +3,6 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
@ -18,6 +17,8 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -25,11 +26,17 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
)
|
)
|
||||||
|
|
||||||
var errNoPartitions = errors.New("no partitions")
|
func (s *WindowPoStScheduler) failPost(err error, deadline *dline.Info) {
|
||||||
|
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
||||||
|
return WdPoStSchedulerEvt{
|
||||||
|
evtCommon: s.getEvtCommon(err),
|
||||||
|
State: SchedulerStateFaulted,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
func (s *WindowPoStScheduler) failPost(deadline *dline.Info) {
|
|
||||||
log.Errorf("TODO")
|
log.Errorf("TODO")
|
||||||
/*s.failLk.Lock()
|
/*s.failLk.Lock()
|
||||||
if eps > s.failed {
|
if eps > s.failed {
|
||||||
@ -44,27 +51,60 @@ func (s *WindowPoStScheduler) doPost(ctx context.Context, deadline *dline.Info,
|
|||||||
s.abort = abort
|
s.abort = abort
|
||||||
s.activeDeadline = deadline
|
s.activeDeadline = deadline
|
||||||
|
|
||||||
|
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
||||||
|
return WdPoStSchedulerEvt{
|
||||||
|
evtCommon: s.getEvtCommon(nil),
|
||||||
|
State: SchedulerStateStarted,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer abort()
|
defer abort()
|
||||||
|
|
||||||
ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.doPost")
|
ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.doPost")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
proof, err := s.runPost(ctx, *deadline, ts)
|
// recordProofsEvent records a successful proofs_processed event in the
|
||||||
switch err {
|
// journal, even if it was a noop (no partitions).
|
||||||
case errNoPartitions:
|
recordProofsEvent := func(partitions []miner.PoStPartition, mcid cid.Cid) {
|
||||||
return
|
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStProofs], func() interface{} {
|
||||||
case nil:
|
return &WdPoStProofsProcessedEvt{
|
||||||
if err := s.submitPost(ctx, proof); err != nil {
|
evtCommon: s.getEvtCommon(nil),
|
||||||
log.Errorf("submitPost failed: %+v", err)
|
Partitions: partitions,
|
||||||
s.failPost(deadline)
|
MessageCID: mcid,
|
||||||
return
|
|
||||||
}
|
}
|
||||||
default:
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
posts, err := s.runPost(ctx, *deadline, ts)
|
||||||
|
if err != nil {
|
||||||
log.Errorf("runPost failed: %+v", err)
|
log.Errorf("runPost failed: %+v", err)
|
||||||
s.failPost(deadline)
|
s.failPost(err, deadline)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(posts) == 0 {
|
||||||
|
recordProofsEvent(nil, cid.Undef)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range posts {
|
||||||
|
post := &posts[i]
|
||||||
|
sm, err := s.submitPost(ctx, post)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("submitPost failed: %+v", err)
|
||||||
|
s.failPost(err, deadline)
|
||||||
|
} else {
|
||||||
|
recordProofsEvent(post.Partitions, sm.Cid())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
||||||
|
return WdPoStSchedulerEvt{
|
||||||
|
evtCommon: s.getEvtCommon(nil),
|
||||||
|
State: SchedulerStateSucceeded,
|
||||||
|
}
|
||||||
|
})
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,25 +153,24 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B
|
|||||||
return sbf, nil
|
return sbf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []*miner.Partition) error {
|
func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []*miner.Partition) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries")
|
ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
|
faulty := uint64(0)
|
||||||
params := &miner.DeclareFaultsRecoveredParams{
|
params := &miner.DeclareFaultsRecoveredParams{
|
||||||
Recoveries: []miner.RecoveryDeclaration{},
|
Recoveries: []miner.RecoveryDeclaration{},
|
||||||
}
|
}
|
||||||
|
|
||||||
faulty := uint64(0)
|
|
||||||
|
|
||||||
for partIdx, partition := range partitions {
|
for partIdx, partition := range partitions {
|
||||||
unrecovered, err := bitfield.SubtractBitField(partition.Faults, partition.Recoveries)
|
unrecovered, err := bitfield.SubtractBitField(partition.Faults, partition.Recoveries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("subtracting recovered set from fault set: %w", err)
|
return nil, nil, xerrors.Errorf("subtracting recovered set from fault set: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uc, err := unrecovered.Count()
|
uc, err := unrecovered.Count()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("counting unrecovered sectors: %w", err)
|
return nil, nil, xerrors.Errorf("counting unrecovered sectors: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if uc == 0 {
|
if uc == 0 {
|
||||||
@ -142,13 +181,13 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
|
|||||||
|
|
||||||
recovered, err := s.checkSectors(ctx, unrecovered)
|
recovered, err := s.checkSectors(ctx, unrecovered)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("checking unrecovered sectors: %w", err)
|
return nil, nil, xerrors.Errorf("checking unrecovered sectors: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if all sectors failed to recover, don't declare recoveries
|
// if all sectors failed to recover, don't declare recoveries
|
||||||
recoveredCount, err := recovered.Count()
|
recoveredCount, err := recovered.Count()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("counting recovered sectors: %w", err)
|
return nil, nil, xerrors.Errorf("counting recovered sectors: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if recoveredCount == 0 {
|
if recoveredCount == 0 {
|
||||||
@ -162,17 +201,18 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(params.Recoveries) == 0 {
|
recoveries := params.Recoveries
|
||||||
|
if len(recoveries) == 0 {
|
||||||
if faulty != 0 {
|
if faulty != 0 {
|
||||||
log.Warnw("No recoveries to declare", "deadline", dlIdx, "faulty", faulty)
|
log.Warnw("No recoveries to declare", "deadline", dlIdx, "faulty", faulty)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return recoveries, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
enc, aerr := actors.SerializeParams(params)
|
enc, aerr := actors.SerializeParams(params)
|
||||||
if aerr != nil {
|
if aerr != nil {
|
||||||
return xerrors.Errorf("could not serialize declare recoveries parameters: %w", aerr)
|
return recoveries, nil, xerrors.Errorf("could not serialize declare recoveries parameters: %w", aerr)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
@ -187,52 +227,51 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
|
|||||||
|
|
||||||
sm, err := s.api.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)})
|
sm, err := s.api.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("pushing message to mpool: %w", err)
|
return recoveries, sm, xerrors.Errorf("pushing message to mpool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Warnw("declare faults recovered Message CID", "cid", sm.Cid())
|
log.Warnw("declare faults recovered Message CID", "cid", sm.Cid())
|
||||||
|
|
||||||
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence)
|
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("declare faults recovered wait error: %w", err)
|
return recoveries, sm, xerrors.Errorf("declare faults recovered wait error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rec.Receipt.ExitCode != 0 {
|
if rec.Receipt.ExitCode != 0 {
|
||||||
return xerrors.Errorf("declare faults recovered wait non-0 exit code: %d", rec.Receipt.ExitCode)
|
return recoveries, sm, xerrors.Errorf("declare faults recovered wait non-0 exit code: %d", rec.Receipt.ExitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return recoveries, sm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []*miner.Partition) error {
|
func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []*miner.Partition) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults")
|
ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
|
bad := uint64(0)
|
||||||
params := &miner.DeclareFaultsParams{
|
params := &miner.DeclareFaultsParams{
|
||||||
Faults: []miner.FaultDeclaration{},
|
Faults: []miner.FaultDeclaration{},
|
||||||
}
|
}
|
||||||
|
|
||||||
bad := uint64(0)
|
|
||||||
|
|
||||||
for partIdx, partition := range partitions {
|
for partIdx, partition := range partitions {
|
||||||
toCheck, err := partition.ActiveSectors()
|
toCheck, err := partition.ActiveSectors()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting active sectors: %w", err)
|
return nil, nil, xerrors.Errorf("getting active sectors: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
good, err := s.checkSectors(ctx, toCheck)
|
good, err := s.checkSectors(ctx, toCheck)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("checking sectors: %w", err)
|
return nil, nil, xerrors.Errorf("checking sectors: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
faulty, err := bitfield.SubtractBitField(toCheck, good)
|
faulty, err := bitfield.SubtractBitField(toCheck, good)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("calculating faulty sector set: %w", err)
|
return nil, nil, xerrors.Errorf("calculating faulty sector set: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := faulty.Count()
|
c, err := faulty.Count()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("counting faulty sectors: %w", err)
|
return nil, nil, xerrors.Errorf("counting faulty sectors: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c == 0 {
|
if c == 0 {
|
||||||
@ -248,15 +287,16 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64,
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(params.Faults) == 0 {
|
faults := params.Faults
|
||||||
return nil
|
if len(faults) == 0 {
|
||||||
|
return faults, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Errorw("DETECTED FAULTY SECTORS, declaring faults", "count", bad)
|
log.Errorw("DETECTED FAULTY SECTORS, declaring faults", "count", bad)
|
||||||
|
|
||||||
enc, aerr := actors.SerializeParams(params)
|
enc, aerr := actors.SerializeParams(params)
|
||||||
if aerr != nil {
|
if aerr != nil {
|
||||||
return xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
|
return faults, nil, xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
@ -271,24 +311,24 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64,
|
|||||||
|
|
||||||
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
|
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("pushing message to mpool: %w", err)
|
return faults, sm, xerrors.Errorf("pushing message to mpool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Warnw("declare faults Message CID", "cid", sm.Cid())
|
log.Warnw("declare faults Message CID", "cid", sm.Cid())
|
||||||
|
|
||||||
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence)
|
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("declare faults wait error: %w", err)
|
return faults, sm, xerrors.Errorf("declare faults wait error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rec.Receipt.ExitCode != 0 {
|
if rec.Receipt.ExitCode != 0 {
|
||||||
return xerrors.Errorf("declare faults wait non-0 exit code: %d", rec.Receipt.ExitCode)
|
return faults, sm, xerrors.Errorf("declare faults wait non-0 exit code: %d", rec.Receipt.ExitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return faults, sm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *types.TipSet) (*miner.SubmitWindowedPoStParams, error) {
|
func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "storage.runPost")
|
ctx, span := trace.StartSpan(ctx, "storage.runPost")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
@ -305,15 +345,49 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.checkNextRecoveries(context.TODO(), declDeadline, partitions); err != nil {
|
var (
|
||||||
|
sigmsg *types.SignedMessage
|
||||||
|
recoveries []miner.RecoveryDeclaration
|
||||||
|
faults []miner.FaultDeclaration
|
||||||
|
|
||||||
|
// optionalCid returns the CID of the message, or cid.Undef is the
|
||||||
|
// message is nil. We don't need the argument (could capture the
|
||||||
|
// pointer), but it's clearer and purer like that.
|
||||||
|
optionalCid = func(sigmsg *types.SignedMessage) cid.Cid {
|
||||||
|
if sigmsg == nil {
|
||||||
|
return cid.Undef
|
||||||
|
}
|
||||||
|
return sigmsg.Cid()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions); err != nil {
|
||||||
// TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse
|
// TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse
|
||||||
log.Errorf("checking sector recoveries: %v", err)
|
log.Errorf("checking sector recoveries: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil {
|
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStRecoveries], func() interface{} {
|
||||||
|
j := WdPoStRecoveriesProcessedEvt{
|
||||||
|
evtCommon: s.getEvtCommon(err),
|
||||||
|
Declarations: recoveries,
|
||||||
|
MessageCID: optionalCid(sigmsg),
|
||||||
|
}
|
||||||
|
j.Error = err
|
||||||
|
return j
|
||||||
|
})
|
||||||
|
|
||||||
|
if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil {
|
||||||
// TODO: This is also potentially really bad, but we try to post anyways
|
// TODO: This is also potentially really bad, but we try to post anyways
|
||||||
log.Errorf("checking sector faults: %v", err)
|
log.Errorf("checking sector faults: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStFaults], func() interface{} {
|
||||||
|
return WdPoStFaultsProcessedEvt{
|
||||||
|
evtCommon: s.getEvtCommon(err),
|
||||||
|
Declarations: faults,
|
||||||
|
MessageCID: optionalCid(sigmsg),
|
||||||
|
}
|
||||||
|
})
|
||||||
}()
|
}()
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
@ -326,26 +400,40 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
|
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get the partitions for the given deadline
|
||||||
partitions, err := s.api.StateMinerPartitions(ctx, s.actor, di.Index, ts.Key())
|
partitions, err := s.api.StateMinerPartitions(ctx, s.actor, di.Index, ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting partitions: %w", err)
|
return nil, xerrors.Errorf("getting partitions: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
params := &miner.SubmitWindowedPoStParams{
|
// Split partitions into batches, so as not to exceed the number of sectors
|
||||||
|
// allowed in a single message
|
||||||
|
partitionBatches, err := s.batchPartitions(partitions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate proofs in batches
|
||||||
|
posts := make([]miner.SubmitWindowedPoStParams, 0, len(partitionBatches))
|
||||||
|
for batchIdx, batch := range partitionBatches {
|
||||||
|
batchPartitionStartIdx := 0
|
||||||
|
for _, batch := range partitionBatches[:batchIdx] {
|
||||||
|
batchPartitionStartIdx += len(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := miner.SubmitWindowedPoStParams{
|
||||||
Deadline: di.Index,
|
Deadline: di.Index,
|
||||||
Partitions: make([]miner.PoStPartition, 0, len(partitions)),
|
Partitions: make([]miner.PoStPartition, 0, len(batch)),
|
||||||
Proofs: nil,
|
Proofs: nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
skipCount := uint64(0)
|
skipCount := uint64(0)
|
||||||
postSkipped := bitfield.New()
|
postSkipped := bitfield.New()
|
||||||
var postOut []proof.PoStProof
|
var postOut []proof.PoStProof
|
||||||
|
somethingToProve := true
|
||||||
for retries := 0; retries < 5; retries++ {
|
for retries := 0; retries < 5; retries++ {
|
||||||
var sinfos []proof.SectorInfo
|
var sinfos []proof.SectorInfo
|
||||||
sidToPart := map[abi.SectorNumber]int{}
|
for partIdx, partition := range batch {
|
||||||
|
|
||||||
for partIdx, partition := range partitions {
|
|
||||||
// TODO: Can do this in parallel
|
// TODO: Can do this in parallel
|
||||||
toProve, err := partition.ActiveSectors()
|
toProve, err := partition.ActiveSectors()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -357,16 +445,16 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
|
return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
toProve, err = bitfield.SubtractBitField(toProve, postSkipped)
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("toProve - postSkipped: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
good, err := s.checkSectors(ctx, toProve)
|
good, err := s.checkSectors(ctx, toProve)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
|
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
good, err = bitfield.SubtractBitField(good, postSkipped)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("toProve - postSkipped: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
skipped, err := bitfield.SubtractBitField(toProve, good)
|
skipped, err := bitfield.SubtractBitField(toProve, good)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("toProve - good: %w", err)
|
return nil, xerrors.Errorf("toProve - good: %w", err)
|
||||||
@ -389,21 +477,19 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
}
|
}
|
||||||
|
|
||||||
sinfos = append(sinfos, ssi...)
|
sinfos = append(sinfos, ssi...)
|
||||||
for _, si := range ssi {
|
|
||||||
sidToPart[si.SectorNumber] = partIdx
|
|
||||||
}
|
|
||||||
|
|
||||||
params.Partitions = append(params.Partitions, miner.PoStPartition{
|
params.Partitions = append(params.Partitions, miner.PoStPartition{
|
||||||
Index: uint64(partIdx),
|
Index: uint64(batchPartitionStartIdx + partIdx),
|
||||||
Skipped: skipped,
|
Skipped: skipped,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(sinfos) == 0 {
|
if len(sinfos) == 0 {
|
||||||
// nothing to prove..
|
// nothing to prove for this batch
|
||||||
return nil, errNoPartitions
|
somethingToProve = false
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generate proof
|
||||||
log.Infow("running windowPost",
|
log.Infow("running windowPost",
|
||||||
"chain-random", rand,
|
"chain-random", rand,
|
||||||
"deadline", di,
|
"deadline", di,
|
||||||
@ -421,12 +507,15 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
postOut, ps, err = s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand))
|
postOut, ps, err = s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand))
|
||||||
elapsed := time.Since(tsStart)
|
elapsed := time.Since(tsStart)
|
||||||
|
|
||||||
log.Infow("computing window PoSt", "elapsed", elapsed)
|
log.Infow("computing window PoSt", "batch", batchIdx, "elapsed", elapsed)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
// Proof generation successful, stop retrying
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Proof generation failed, so retry
|
||||||
|
|
||||||
if len(ps) == 0 {
|
if len(ps) == 0 {
|
||||||
return nil, xerrors.Errorf("running post failed: %w", err)
|
return nil, xerrors.Errorf("running post failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -439,23 +528,72 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Nothing to prove for this batch, try the next batch
|
||||||
|
if !somethingToProve {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if len(postOut) == 0 {
|
if len(postOut) == 0 {
|
||||||
return nil, xerrors.Errorf("received no proofs back from generate window post")
|
return nil, xerrors.Errorf("received no proofs back from generate window post")
|
||||||
}
|
}
|
||||||
|
|
||||||
params.Proofs = postOut
|
params.Proofs = postOut
|
||||||
|
|
||||||
|
posts = append(posts, params)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute randomness after generating proofs so as to reduce the impact
|
||||||
|
// of chain reorgs (which change randomness)
|
||||||
commEpoch := di.Open
|
commEpoch := di.Open
|
||||||
commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil)
|
commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
|
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err)
|
||||||
}
|
}
|
||||||
params.ChainCommitEpoch = commEpoch
|
|
||||||
params.ChainCommitRand = commRand
|
|
||||||
|
|
||||||
log.Infow("submitting window PoSt")
|
for i := range posts {
|
||||||
|
posts[i].ChainCommitEpoch = commEpoch
|
||||||
|
posts[i].ChainCommitRand = commRand
|
||||||
|
}
|
||||||
|
|
||||||
return params, nil
|
return posts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WindowPoStScheduler) batchPartitions(partitions []*miner.Partition) ([][]*miner.Partition, error) {
|
||||||
|
// Get the number of sectors allowed in a partition, for this proof size
|
||||||
|
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(s.proofType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting sectors per partition: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't want to exceed the number of sectors allowed in a message.
|
||||||
|
// So given the number of sectors in a partition, work out the number of
|
||||||
|
// partitions that can be in a message without exceeding sectors per
|
||||||
|
// message:
|
||||||
|
// floor(number of sectors allowed in a message / sectors per partition)
|
||||||
|
// eg:
|
||||||
|
// max sectors per message 7: ooooooo
|
||||||
|
// sectors per partition 3: ooo
|
||||||
|
// partitions per message 2: oooOOO
|
||||||
|
// <1><2> (3rd doesn't fit)
|
||||||
|
partitionsPerMsg := int(miner.AddressedSectorsMax / sectorsPerPartition)
|
||||||
|
|
||||||
|
// The number of messages will be:
|
||||||
|
// ceiling(number of partitions / partitions per message)
|
||||||
|
batchCount := len(partitions) / partitionsPerMsg
|
||||||
|
if len(partitions)%partitionsPerMsg != 0 {
|
||||||
|
batchCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split the partitions into batches
|
||||||
|
batches := make([][]*miner.Partition, 0, batchCount)
|
||||||
|
for i := 0; i < len(partitions); i += partitionsPerMsg {
|
||||||
|
end := i + partitionsPerMsg
|
||||||
|
if end > len(partitions) {
|
||||||
|
end = len(partitions)
|
||||||
|
}
|
||||||
|
batches = append(batches, partitions[i:end])
|
||||||
|
}
|
||||||
|
return batches, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof.SectorInfo, error) {
|
func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof.SectorInfo, error) {
|
||||||
@ -498,13 +636,15 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors,
|
|||||||
return proofSectors, nil
|
return proofSectors, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.SubmitWindowedPoStParams) error {
|
func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.SubmitWindowedPoStParams) (*types.SignedMessage, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "storage.commitPost")
|
ctx, span := trace.StartSpan(ctx, "storage.commitPost")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
|
var sm *types.SignedMessage
|
||||||
|
|
||||||
enc, aerr := actors.SerializeParams(proof)
|
enc, aerr := actors.SerializeParams(proof)
|
||||||
if aerr != nil {
|
if aerr != nil {
|
||||||
return xerrors.Errorf("could not serialize submit post parameters: %w", aerr)
|
return nil, xerrors.Errorf("could not serialize submit post parameters: %w", aerr)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
@ -519,8 +659,9 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi
|
|||||||
|
|
||||||
// TODO: consider maybe caring about the output
|
// TODO: consider maybe caring about the output
|
||||||
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
|
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("pushing message to mpool: %w", err)
|
return nil, xerrors.Errorf("pushing message to mpool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Submitted window post: %s", sm.Cid())
|
log.Infof("Submitted window post: %s", sm.Cid())
|
||||||
@ -539,7 +680,7 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi
|
|||||||
log.Errorf("Submitting window post %s failed: exit %d", sm.Cid(), rec.Receipt.ExitCode)
|
log.Errorf("Submitting window post %s failed: exit %d", sm.Cid(), rec.Receipt.ExitCode)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return nil
|
return sm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) {
|
func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) {
|
||||||
|
305
storage/wdpost_run_test.go
Normal file
305
storage/wdpost_run_test.go
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
tutils "github.com/filecoin-project/specs-actors/support/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockStorageMinerAPI struct {
|
||||||
|
partitions []*miner.Partition
|
||||||
|
pushedMessages chan *types.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockStorageMinerAPI() *mockStorageMinerAPI {
|
||||||
|
return &mockStorageMinerAPI{
|
||||||
|
pushedMessages: make(chan *types.Message),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
||||||
|
return abi.Randomness("ticket rand"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
||||||
|
return abi.Randomness("beacon rand"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) setPartitions(ps []*miner.Partition) {
|
||||||
|
m.partitions = append(m.partitions, ps...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMinerPartitions(ctx context.Context, address address.Address, u uint64, key types.TipSetKey) ([]*miner.Partition, error) {
|
||||||
|
return m.partitions, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMinerSectors(ctx context.Context, address address.Address, field *bitfield.BitField, b bool, key types.TipSetKey) ([]*api.ChainSectorInfo, error) {
|
||||||
|
var sis []*api.ChainSectorInfo
|
||||||
|
_ = field.ForEach(func(i uint64) error {
|
||||||
|
sis = append(sis, &api.ChainSectorInfo{
|
||||||
|
Info: miner.SectorOnChainInfo{
|
||||||
|
SectorNumber: abi.SectorNumber(i),
|
||||||
|
},
|
||||||
|
ID: abi.SectorNumber(i),
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return sis, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMinerInfo(ctx context.Context, address address.Address, key types.TipSetKey) (api.MinerInfo, error) {
|
||||||
|
return api.MinerInfo{}, xerrors.Errorf("err")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) MpoolPushMessage(ctx context.Context, message *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
|
||||||
|
m.pushedMessages <- message
|
||||||
|
return &types.SignedMessage{
|
||||||
|
Message: *message,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) {
|
||||||
|
return &api.MsgLookup{
|
||||||
|
Receipt: types.MessageReceipt{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockProver struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof.SectorInfo, pr abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
|
||||||
|
return []proof.PoStProof{
|
||||||
|
{
|
||||||
|
PoStProof: abi.RegisteredPoStProof_StackedDrgWindow2KiBV1,
|
||||||
|
ProofBytes: []byte("post-proof"),
|
||||||
|
},
|
||||||
|
}, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockFaultTracker struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m mockFaultTracker) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error) {
|
||||||
|
// Returns "bad" sectors so just return nil meaning all sectors are good
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWDPostDoPost verifies that doPost will send the correct number of window
|
||||||
|
// PoST messages for a given number of partitions
|
||||||
|
func TestWDPostDoPost(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
expectedMsgCount := 5
|
||||||
|
|
||||||
|
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
||||||
|
postAct := tutils.NewIDAddr(t, 100)
|
||||||
|
workerAct := tutils.NewIDAddr(t, 101)
|
||||||
|
|
||||||
|
mockStgMinerAPI := newMockStorageMinerAPI()
|
||||||
|
|
||||||
|
// Get the number of sectors allowed in a partition for this proof type
|
||||||
|
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(proofType)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// Work out the number of partitions that can be included in a message
|
||||||
|
// without exceeding the message sector limit
|
||||||
|
partitionsPerMsg := int(miner.AddressedSectorsMax / sectorsPerPartition)
|
||||||
|
|
||||||
|
// Enough partitions to fill expectedMsgCount-1 messages
|
||||||
|
partitionCount := (expectedMsgCount - 1) * partitionsPerMsg
|
||||||
|
// Add an extra partition that should be included in the last message
|
||||||
|
partitionCount++
|
||||||
|
|
||||||
|
var partitions []*miner.Partition
|
||||||
|
for p := 0; p < partitionCount; p++ {
|
||||||
|
sectors := bitfield.New()
|
||||||
|
for s := uint64(0); s < sectorsPerPartition; s++ {
|
||||||
|
sectors.Set(s)
|
||||||
|
}
|
||||||
|
partitions = append(partitions, &miner.Partition{
|
||||||
|
Sectors: sectors,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
mockStgMinerAPI.setPartitions(partitions)
|
||||||
|
|
||||||
|
// Run window PoST
|
||||||
|
scheduler := &WindowPoStScheduler{
|
||||||
|
api: mockStgMinerAPI,
|
||||||
|
prover: &mockProver{},
|
||||||
|
faultTracker: &mockFaultTracker{},
|
||||||
|
proofType: proofType,
|
||||||
|
actor: postAct,
|
||||||
|
worker: workerAct,
|
||||||
|
}
|
||||||
|
|
||||||
|
di := &dline.Info{}
|
||||||
|
ts := mockTipSet(t)
|
||||||
|
scheduler.doPost(ctx, di, ts)
|
||||||
|
|
||||||
|
// Read the window PoST messages
|
||||||
|
for i := 0; i < expectedMsgCount; i++ {
|
||||||
|
msg := <-mockStgMinerAPI.pushedMessages
|
||||||
|
require.Equal(t, builtin.MethodsMiner.SubmitWindowedPoSt, msg.Method)
|
||||||
|
var params miner.SubmitWindowedPoStParams
|
||||||
|
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if i == expectedMsgCount-1 {
|
||||||
|
// In the last message we only included a single partition (see above)
|
||||||
|
require.Len(t, params.Partitions, 1)
|
||||||
|
} else {
|
||||||
|
// All previous messages should include the full number of partitions
|
||||||
|
require.Len(t, params.Partitions, partitionsPerMsg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mockTipSet(t *testing.T) *types.TipSet {
|
||||||
|
minerAct := tutils.NewActorAddr(t, "miner")
|
||||||
|
c, err := cid.Decode("QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH")
|
||||||
|
require.NoError(t, err)
|
||||||
|
blks := []*types.BlockHeader{
|
||||||
|
{
|
||||||
|
Miner: minerAct,
|
||||||
|
Height: abi.ChainEpoch(1),
|
||||||
|
ParentStateRoot: c,
|
||||||
|
ParentMessageReceipts: c,
|
||||||
|
Messages: c,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ts, err := types.NewTipSet(blks)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// All the mock methods below here are unused
|
||||||
|
//
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateCall(ctx context.Context, message *types.Message, key types.TipSetKey) (*api.InvocResult, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMinerDeadlines(ctx context.Context, maddr address.Address, tok types.TipSetKey) ([]*miner.Deadline, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateSectorPreCommitInfo(ctx context.Context, address address.Address, number abi.SectorNumber, key types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateSectorGetInfo(ctx context.Context, address address.Address, number abi.SectorNumber, key types.TipSetKey) (*miner.SectorOnChainInfo, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*api.SectorLocation, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMinerProvingDeadline(ctx context.Context, address address.Address, key types.TipSetKey) (*dline.Info, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMinerPreCommitDepositForPower(ctx context.Context, address address.Address, info miner.SectorPreCommitInfo, key types.TipSetKey) (types.BigInt, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMinerInitialPledgeCollateral(ctx context.Context, address address.Address, info miner.SectorPreCommitInfo, key types.TipSetKey) (types.BigInt, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateSearchMsg(ctx context.Context, cid cid.Cid) (*api.MsgLookup, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateGetReceipt(ctx context.Context, cid cid.Cid, key types.TipSetKey) (*types.MessageReceipt, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMarketStorageDeal(ctx context.Context, id abi.DealID, key types.TipSetKey) (*api.MarketDeal, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMinerFaults(ctx context.Context, address address.Address, key types.TipSetKey) (bitfield.BitField, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateMinerRecoveries(ctx context.Context, address address.Address, key types.TipSetKey) (bitfield.BitField, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateAccountKey(ctx context.Context, address address.Address, key types.TipSetKey) (address.Address, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) GasEstimateMessageGas(ctx context.Context, message *types.Message, spec *api.MessageSendSpec, key types.TipSetKey) (*types.Message, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) ChainGetBlockMessages(ctx context.Context, cid cid.Cid) (*api.BlockMessages, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) ChainReadObj(ctx context.Context, cid cid.Cid) ([]byte, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) ChainHasObj(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) WalletSign(ctx context.Context, address address.Address, bytes []byte) (*crypto.Signature, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) WalletBalance(ctx context.Context, address address.Address) (types.BigInt, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) WalletHas(ctx context.Context, address address.Address) (bool, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
@ -41,8 +42,10 @@ type WindowPoStScheduler struct {
|
|||||||
activeDeadline *dline.Info
|
activeDeadline *dline.Info
|
||||||
abort context.CancelFunc
|
abort context.CancelFunc
|
||||||
|
|
||||||
//failed abi.ChainEpoch // eps
|
evtTypes [4]journal.EventType
|
||||||
//failLk sync.Mutex
|
|
||||||
|
// failed abi.ChainEpoch // eps
|
||||||
|
// failLk sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb storage.Prover, ft sectorstorage.FaultTracker, actor address.Address, worker address.Address) (*WindowPoStScheduler, error) {
|
func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb storage.Prover, ft sectorstorage.FaultTracker, actor address.Address, worker address.Address) (*WindowPoStScheduler, error) {
|
||||||
@ -66,6 +69,12 @@ func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb
|
|||||||
|
|
||||||
actor: actor,
|
actor: actor,
|
||||||
worker: worker,
|
worker: worker,
|
||||||
|
evtTypes: [...]journal.EventType{
|
||||||
|
evtTypeWdPoStScheduler: journal.J.RegisterEventType("wdpost", "scheduler"),
|
||||||
|
evtTypeWdPoStProofs: journal.J.RegisterEventType("wdpost", "proofs_processed"),
|
||||||
|
evtTypeWdPoStRecoveries: journal.J.RegisterEventType("wdpost", "recoveries_processed"),
|
||||||
|
evtTypeWdPoStFaults: journal.J.RegisterEventType("wdpost", "faults_processed"),
|
||||||
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,12 +120,13 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) {
|
|||||||
log.Errorf("expected first notif to have len = 1")
|
log.Errorf("expected first notif to have len = 1")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if changes[0].Type != store.HCCurrent {
|
chg := changes[0]
|
||||||
|
if chg.Type != store.HCCurrent {
|
||||||
log.Errorf("expected first notif to tell current ts")
|
log.Errorf("expected first notif to tell current ts")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.update(ctx, changes[0].Val); err != nil {
|
if err := s.update(ctx, chg.Val); err != nil {
|
||||||
log.Errorf("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,10 +230,29 @@ func (s *WindowPoStScheduler) abortActivePoSt() {
|
|||||||
|
|
||||||
if s.abort != nil {
|
if s.abort != nil {
|
||||||
s.abort()
|
s.abort()
|
||||||
|
|
||||||
|
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
||||||
|
return WdPoStSchedulerEvt{
|
||||||
|
evtCommon: s.getEvtCommon(nil),
|
||||||
|
State: SchedulerStateAborted,
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
|
||||||
log.Warnf("Aborting Window PoSt (Deadline: %+v)", s.activeDeadline)
|
log.Warnf("Aborting Window PoSt (Deadline: %+v)", s.activeDeadline)
|
||||||
|
}
|
||||||
|
|
||||||
s.activeDeadline = nil
|
s.activeDeadline = nil
|
||||||
s.abort = nil
|
s.abort = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getEvtCommon populates and returns common attributes from state, for a
|
||||||
|
// WdPoSt journal event.
|
||||||
|
func (s *WindowPoStScheduler) getEvtCommon(err error) evtCommon {
|
||||||
|
c := evtCommon{Error: err}
|
||||||
|
if s.cur != nil {
|
||||||
|
c.Deadline = s.activeDeadline
|
||||||
|
c.Height = s.cur.Height()
|
||||||
|
c.TipSet = s.cur.Cids()
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user