Merge branch 'master' into release/v1.2.0

This commit is contained in:
Aayush Rajasekaran 2020-11-17 19:40:05 -05:00
commit 68a16afeda
24 changed files with 300 additions and 199 deletions

View File

@ -0,0 +1,152 @@
package init
import (
"bytes"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
typegen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/chain/actors/adt"
)
func DiffAddressMap(pre, cur State) (*AddressMapChanges, error) {
prem, err := pre.addressMap()
if err != nil {
return nil, err
}
curm, err := cur.addressMap()
if err != nil {
return nil, err
}
preRoot, err := prem.Root()
if err != nil {
return nil, err
}
curRoot, err := curm.Root()
if err != nil {
return nil, err
}
results := new(AddressMapChanges)
// no change.
if curRoot.Equals(preRoot) {
return results, nil
}
err = adt.DiffAdtMap(prem, curm, &addressMapDiffer{results, pre, cur})
if err != nil {
return nil, err
}
return results, nil
}
type addressMapDiffer struct {
Results *AddressMapChanges
pre, adter State
}
type AddressMapChanges struct {
Added []AddressPair
Modified []AddressChange
Removed []AddressPair
}
func (i *addressMapDiffer) AsKey(key string) (abi.Keyer, error) {
addr, err := address.NewFromBytes([]byte(key))
if err != nil {
return nil, err
}
return abi.AddrKey(addr), nil
}
func (i *addressMapDiffer) Add(key string, val *typegen.Deferred) error {
pkAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
id := new(typegen.CborInt)
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return err
}
idAddr, err := address.NewIDAddress(uint64(*id))
if err != nil {
return err
}
i.Results.Added = append(i.Results.Added, AddressPair{
ID: idAddr,
PK: pkAddr,
})
return nil
}
func (i *addressMapDiffer) Modify(key string, from, to *typegen.Deferred) error {
pkAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
fromID := new(typegen.CborInt)
if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil {
return err
}
fromIDAddr, err := address.NewIDAddress(uint64(*fromID))
if err != nil {
return err
}
toID := new(typegen.CborInt)
if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil {
return err
}
toIDAddr, err := address.NewIDAddress(uint64(*toID))
if err != nil {
return err
}
i.Results.Modified = append(i.Results.Modified, AddressChange{
From: AddressPair{
ID: fromIDAddr,
PK: pkAddr,
},
To: AddressPair{
ID: toIDAddr,
PK: pkAddr,
},
})
return nil
}
func (i *addressMapDiffer) Remove(key string, val *typegen.Deferred) error {
pkAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
id := new(typegen.CborInt)
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return err
}
idAddr, err := address.NewIDAddress(uint64(*id))
if err != nil {
return err
}
i.Results.Removed = append(i.Results.Removed, AddressPair{
ID: idAddr,
PK: pkAddr,
})
return nil
}
type AddressChange struct {
From AddressPair
To AddressPair
}
type AddressPair struct {
ID address.Address
PK address.Address
}

View File

@ -57,4 +57,6 @@ type State interface {
// Sets the network's name. This should only be used on upgrade/fork.
SetNetworkName(name string) error
addressMap() (adt.Map, error)
}

View File

@ -79,3 +79,7 @@ func (s *state0) Remove(addrs ...address.Address) (err error) {
s.State.AddressMap = amr
return nil
}
func (s *state0) addressMap() (adt.Map, error) {
return adt0.AsMap(s.store, s.AddressMap)
}

View File

@ -79,3 +79,7 @@ func (s *state2) Remove(addrs ...address.Address) (err error) {
s.State.AddressMap = amr
return nil
}
func (s *state2) addressMap() (adt.Map, error) {
return adt2.AsMap(s.store, s.AddressMap)
}

View File

@ -1,7 +1,6 @@
package state
import (
"bytes"
"context"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -10,7 +9,6 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
cbor "github.com/ipfs/go-ipld-cbor"
typegen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/chain/actors/adt"
@ -419,179 +417,17 @@ type AddressPair struct {
PK address.Address
}
type InitActorAddressChanges struct {
Added []AddressPair
Modified []AddressChange
Removed []AddressPair
}
type AddressChange struct {
From AddressPair
To AddressPair
}
type DiffInitActorStateFunc func(ctx context.Context, oldState init_.State, newState init_.State) (changed bool, user UserData, err error)
func (i *InitActorAddressChanges) AsKey(key string) (abi.Keyer, error) {
addr, err := address.NewFromBytes([]byte(key))
if err != nil {
return nil, err
}
return abi.AddrKey(addr), nil
}
func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error {
pkAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
id := new(typegen.CborInt)
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return err
}
idAddr, err := address.NewIDAddress(uint64(*id))
if err != nil {
return err
}
i.Added = append(i.Added, AddressPair{
ID: idAddr,
PK: pkAddr,
})
return nil
}
func (i *InitActorAddressChanges) Modify(key string, from, to *typegen.Deferred) error {
pkAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
fromID := new(typegen.CborInt)
if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil {
return err
}
fromIDAddr, err := address.NewIDAddress(uint64(*fromID))
if err != nil {
return err
}
toID := new(typegen.CborInt)
if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil {
return err
}
toIDAddr, err := address.NewIDAddress(uint64(*toID))
if err != nil {
return err
}
i.Modified = append(i.Modified, AddressChange{
From: AddressPair{
ID: fromIDAddr,
PK: pkAddr,
},
To: AddressPair{
ID: toIDAddr,
PK: pkAddr,
},
})
return nil
}
func (i *InitActorAddressChanges) Remove(key string, val *typegen.Deferred) error {
pkAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
id := new(typegen.CborInt)
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return err
}
idAddr, err := address.NewIDAddress(uint64(*id))
if err != nil {
return err
}
i.Removed = append(i.Removed, AddressPair{
ID: idAddr,
PK: pkAddr,
})
return nil
}
func (sp *StatePredicates) OnAddressMapChange() DiffInitActorStateFunc {
return func(ctx context.Context, oldState, newState init_.State) (changed bool, user UserData, err error) {
addressChanges := &InitActorAddressChanges{
Added: []AddressPair{},
Modified: []AddressChange{},
Removed: []AddressPair{},
}
err = oldState.ForEachActor(func(oldId abi.ActorID, oldAddress address.Address) error {
oldIdAddress, err := address.NewIDAddress(uint64(oldId))
if err != nil {
return err
}
newIdAddress, found, err := newState.ResolveAddress(oldAddress)
if err != nil {
return err
}
if !found {
addressChanges.Removed = append(addressChanges.Removed, AddressPair{
ID: oldIdAddress,
PK: oldAddress,
})
}
if oldIdAddress != newIdAddress {
addressChanges.Modified = append(addressChanges.Modified, AddressChange{
From: AddressPair{
ID: oldIdAddress,
PK: oldAddress,
},
To: AddressPair{
ID: newIdAddress,
PK: oldAddress,
},
})
}
return nil
})
addressChanges, err := init_.DiffAddressMap(oldState, newState)
if err != nil {
return false, nil, err
}
err = newState.ForEachActor(func(newId abi.ActorID, newAddress address.Address) error {
newIdAddress, err := address.NewIDAddress(uint64(newId))
if err != nil {
return err
}
_, found, err := newState.ResolveAddress(newAddress)
if err != nil {
return err
}
if !found {
addressChanges.Added = append(addressChanges.Added, AddressPair{
ID: newIdAddress,
PK: newAddress,
})
}
return nil
})
if err != nil {
return false, nil, err
}
if len(addressChanges.Added)+len(addressChanges.Removed)+len(addressChanges.Modified) == 0 {
if len(addressChanges.Added)+len(addressChanges.Modified)+len(addressChanges.Removed) == 0 {
return false, nil, nil
}
return true, addressChanges, nil
}
}

View File

@ -32,6 +32,7 @@ func TestIndexSeeks(t *testing.T) {
nbs := blockstore.NewTemporarySync()
cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil)
defer cs.Close() //nolint:errcheck
_, err = cs.Import(bytes.NewReader(gencar))
if err != nil {

View File

@ -134,24 +134,30 @@ type ChainStore struct {
evtTypes [1]journal.EventType
journal journal.Journal
cancelFn context.CancelFunc
wg sync.WaitGroup
}
// localbs is guaranteed to fail Get* if requested block isn't stored locally
func NewChainStore(bs bstore.Blockstore, localbs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore {
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
mmCache, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsCache, _ := lru.NewARC(DefaultTipSetCacheSize)
if j == nil {
j = journal.NilJournal()
}
ctx, cancel := context.WithCancel(context.Background())
cs := &ChainStore{
bs: bs,
localbs: localbs,
ds: ds,
bestTips: pubsub.New(64),
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
mmCache: c,
tsCache: tsc,
mmCache: mmCache,
tsCache: tsCache,
vmcalls: vmcalls,
cancelFn: cancel,
journal: j,
}
@ -191,19 +197,24 @@ func NewChainStore(bs bstore.Blockstore, localbs bstore.Blockstore, ds dstore.Ba
}
hcmetric := func(rev, app []*types.TipSet) error {
ctx := context.Background()
for _, r := range app {
stats.Record(ctx, metrics.ChainNodeHeight.M(int64(r.Height())))
stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height())))
}
return nil
}
cs.reorgNotifeeCh = make(chan ReorgNotifee)
cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric})
cs.reorgCh = cs.reorgWorker(ctx, []ReorgNotifee{hcnf, hcmetric})
return cs
}
func (cs *ChainStore) Close() error {
cs.cancelFn()
cs.wg.Wait()
return nil
}
func (cs *ChainStore) Load() error {
head, err := cs.ds.Get(chainHeadKey)
if err == dstore.ErrNotFound {
@ -383,7 +394,7 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
// particular tipset to carry out a benchmark, verification, etc. on a chain
// segment.
func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error {
log.Warnf("(!!!) forcing a new head silently; only use this only for testing; new head: %s", ts)
log.Warnf("(!!!) forcing a new head silently; new head: %s", ts)
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
@ -406,7 +417,9 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
notifees := make([]ReorgNotifee, len(initialNotifees))
copy(notifees, initialNotifees)
cs.wg.Add(1)
go func() {
defer cs.wg.Done()
defer log.Warn("reorgWorker quit")
for {

View File

@ -71,6 +71,7 @@ func BenchmarkGetRandomness(b *testing.B) {
}
cs := store.NewChainStore(bs, bs, mds, nil, nil)
defer cs.Close() //nolint:errcheck
b.ResetTimer()
@ -105,6 +106,7 @@ func TestChainExportImport(t *testing.T) {
nbs := blockstore.NewTemporary()
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil)
defer cs.Close() //nolint:errcheck
root, err := cs.Import(buf)
if err != nil {
@ -139,6 +141,8 @@ func TestChainExportImportFull(t *testing.T) {
nbs := blockstore.NewTemporary()
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil)
defer cs.Close() //nolint:errcheck
root, err := cs.Import(buf)
if err != nil {
t.Fatal(err)

View File

@ -119,7 +119,7 @@ var stateMinerInfo = &cli.Command{
}
fmt.Printf("PeerID:\t%s\n", mi.PeerId)
fmt.Printf("Multiaddrs: \t")
fmt.Printf("Multiaddrs:\t")
for _, addr := range mi.Multiaddrs {
a, err := multiaddr.NewMultiaddrBytes(addr)
if err != nil {
@ -127,6 +127,7 @@ var stateMinerInfo = &cli.Command{
}
fmt.Printf("%s ", a)
}
fmt.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed)
fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize)
pow, err := api.StateMinerPower(ctx, addr, ts.Key())

View File

@ -263,6 +263,8 @@ var importBenchCmd = &cli.Command{
metadataDs := datastore.NewMapDatastore()
cs := store.NewChainStore(bs, bs, metadataDs, vm.Syscalls(verifier), nil)
defer cs.Close() //nolint:errcheck
stm := stmgr.NewStateManager(cs)
startTime := time.Now()

View File

@ -189,6 +189,7 @@ var chainBalanceStateCmd = &cli.Command{
}
cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
defer cs.Close() //nolint:errcheck
cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst)
@ -409,6 +410,7 @@ var chainPledgeCmd = &cli.Command{
}
cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
defer cs.Close() //nolint:errcheck
cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst)

View File

@ -91,6 +91,8 @@ var exportChainCmd = &cli.Command{
}
cs := store.NewChainStore(bs, bs, mds, nil, nil)
defer cs.Close() //nolint:errcheck
if err := cs.Load(); err != nil {
return err
}

View File

@ -53,6 +53,7 @@ var genesisVerifyCmd = &cli.Command{
bs := blockstore.NewBlockstore(datastore.NewMapDatastore())
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil, nil)
defer cs.Close() //nolint:errcheck
cf := cctx.Args().Get(0)
f, err := os.Open(cf)

View File

@ -170,6 +170,8 @@ var stateTreePruneCmd = &cli.Command{
}
cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
defer cs.Close() //nolint:errcheck
if err := cs.Load(); err != nil {
return fmt.Errorf("loading chainstore: %w", err)
}

View File

@ -410,14 +410,6 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
return xerrors.Errorf("failed to open blockstore: %w", err)
}
defer func() {
if c, ok := bs.(io.Closer); ok {
if err := c.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}
}()
mds, err := lr.Datastore("/metadata")
if err != nil {
return err
@ -427,7 +419,9 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
if err != nil {
return xerrors.Errorf("failed to open journal: %w", err)
}
cst := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), j)
defer cst.Close() //nolint:errcheck
log.Infof("importing chain from %s...", fname)
@ -472,7 +466,7 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
}
log.Infof("accepting %s as new head", ts.Cids())
if err := cst.SetHead(ts); err != nil {
if err := cst.ForceHeadSilent(context.Background(), ts); err != nil {
return err
}

View File

@ -91,6 +91,8 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot
sm = stmgr.NewStateManager(cs)
)
defer cs.Close() //nolint:errcheck
blocks := make([]store.BlockMessages, 0, len(tipset.Blocks))
for _, b := range tipset.Blocks {
sb := store.BlockMessages{

View File

@ -106,6 +106,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
),
PreCommitFailed: planOne(
on(SectorRetryPreCommit{}, PreCommitting),
on(SectorRetryPreCommitWait{}, PreCommitWait),
on(SectorRetryWaitSeed{}, WaitSeed),
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
on(SectorPreCommitLanded{}, WaitSeed),
@ -125,6 +126,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorChainPreCommitFailed{}, PreCommitFailed),
on(SectorRetryPreCommit{}, PreCommitting),
on(SectorRetryCommitWait{}, CommitWait),
on(SectorRetrySubmitCommit{}, SubmitCommit),
on(SectorDealsExpired{}, DealsExpired),
on(SectorInvalidDealIDs{}, RecoverDealIDs),
on(SectorTicketExpired{}, Removing),

View File

@ -77,6 +77,34 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI
return nil
}
if sector.PreCommitMessage != nil {
mw, err := m.api.StateSearchMsg(ctx.Context(), *sector.PreCommitMessage)
if err != nil {
// API error
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetryPreCommitWait{})
}
if mw == nil {
// API error in precommit
return ctx.Send(SectorRetryPreCommitWait{})
}
switch mw.Receipt.ExitCode {
case exitcode.Ok:
// API error in PreCommitWait
return ctx.Send(SectorRetryPreCommitWait{})
case exitcode.SysErrOutOfGas:
// API error in PreCommitWait AND gas estimator guessed a wrong number in PreCommit
return ctx.Send(SectorRetryPreCommit{})
default:
// something else went wrong
}
}
if err := checkPrecommit(ctx.Context(), m.Address(), sector, tok, height, m.api); err != nil {
switch err.(type) {
case *ErrApi:
@ -160,6 +188,34 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo
return nil
}
if sector.CommitMessage != nil {
mw, err := m.api.StateSearchMsg(ctx.Context(), *sector.CommitMessage)
if err != nil {
// API error
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetryCommitWait{})
}
if mw == nil {
// API error in commit
return ctx.Send(SectorRetryCommitWait{})
}
switch mw.Receipt.ExitCode {
case exitcode.Ok:
// API error in CcommitWait
return ctx.Send(SectorRetryCommitWait{})
case exitcode.SysErrOutOfGas:
// API error in CommitWait AND gas estimator guessed a wrong number in SubmitCommit
return ctx.Send(SectorRetrySubmitCommit{})
default:
// something else went wrong
}
}
if err := checkPrecommit(ctx.Context(), m.maddr, sector, tok, height, m.api); err != nil {
switch err.(type) {
case *ErrApi:

4
go.mod
View File

@ -28,7 +28,7 @@ require (
github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
github.com/filecoin-project/go-data-transfer v1.1.0
github.com/filecoin-project/go-data-transfer v1.2.0
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49
@ -68,7 +68,7 @@ require (
github.com/ipfs/go-filestore v1.0.0
github.com/ipfs/go-fs-lock v0.0.6
github.com/ipfs/go-graphsync v0.5.0
github.com/ipfs/go-ipfs-blockstore v1.0.2
github.com/ipfs/go-ipfs-blockstore v1.0.3-0.20201116142306-a33814d3b08f
github.com/ipfs/go-ipfs-chunker v0.0.5
github.com/ipfs/go-ipfs-ds-help v1.0.0
github.com/ipfs/go-ipfs-exchange-interface v0.0.1

8
go.sum
View File

@ -249,8 +249,8 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMX
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
github.com/filecoin-project/go-data-transfer v1.0.1 h1:5sYKDbstyDsdJpVP4UGUW6+BgCNfgnH8hQgf0E3ZAno=
github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo=
github.com/filecoin-project/go-data-transfer v1.1.0 h1:eRciNLKKbAMkKUdNgLKSHQljQUwe12c4UirGlJ7DDBI=
github.com/filecoin-project/go-data-transfer v1.1.0/go.mod h1:ZAH51JZFR8NZC4FPiDPG+swjgui0q6zTMJbztc6pHhY=
github.com/filecoin-project/go-data-transfer v1.2.0 h1:LM+K+J+y9t8e3gYskJHWDlyHJsF6aaxoHOP+HIiVE1U=
github.com/filecoin-project/go-data-transfer v1.2.0/go.mod h1:ZAH51JZFR8NZC4FPiDPG+swjgui0q6zTMJbztc6pHhY=
github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ=
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
@ -569,8 +569,8 @@ github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86
github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU=
github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w=
github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE=
github.com/ipfs/go-ipfs-blockstore v1.0.2 h1:Z8nUlBHK7wVKPKliQCQR9tLgUtz4J2QRbqFcJrqzM+E=
github.com/ipfs/go-ipfs-blockstore v1.0.2/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE=
github.com/ipfs/go-ipfs-blockstore v1.0.3-0.20201116142306-a33814d3b08f h1:AQQb5zZj7KKTEFh9EaAUXc5Q+F7SbYkjfYogZnEzfUc=
github.com/ipfs/go-ipfs-blockstore v1.0.3-0.20201116142306-a33814d3b08f/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE=
github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ=
github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk=
github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw=

View File

@ -111,13 +111,19 @@ func SetupFallbackBlockstore(cbs dtypes.ChainBlockstore, rem dtypes.ChainBitswap
return nil
}
func ChainStore(bs dtypes.ChainBlockstore, lbs dtypes.ChainRawBlockstore, ds dtypes.MetadataDS, syscalls vm.SyscallBuilder, j journal.Journal) *store.ChainStore {
func ChainStore(lc fx.Lifecycle, bs dtypes.ChainBlockstore, lbs dtypes.ChainRawBlockstore, ds dtypes.MetadataDS, syscalls vm.SyscallBuilder, j journal.Journal) *store.ChainStore {
chain := store.NewChainStore(bs, lbs, ds, syscalls, j)
if err := chain.Load(); err != nil {
log.Warnf("loading chain state from disk: %s", err)
}
lc.Append(fx.Hook{
OnStop: func(_ context.Context) error {
return chain.Close()
},
})
return chain
}

View File

@ -3,6 +3,8 @@ package modules
import (
"bytes"
"context"
"os"
"path/filepath"
"time"
"github.com/filecoin-project/go-multistore"
@ -116,13 +118,18 @@ func RegisterClientValidator(crv dtypes.ClientRequestValidator, dtm dtypes.Clien
// NewClientGraphsyncDataTransfer returns a data transfer manager that just
// uses the clients's Client DAG service for transfers
func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS) (dtypes.ClientDataTransfer, error) {
func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientDataTransfer, error) {
sc := storedcounter.New(ds, datastore.NewKey("/datatransfer/client/counter"))
net := dtnet.NewFromLibp2pHost(h)
dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/client/transfers"))
transport := dtgstransport.NewTransport(h.ID(), gs)
dt, err := dtimpl.NewDataTransfer(dtDs, net, transport, sc)
err := os.MkdirAll(filepath.Join(r.Path(), "data-transfer"), 0755) //nolint: gosec
if err != nil && !os.IsExist(err) {
return nil, err
}
dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, sc)
if err != nil {
return nil, err
}

View File

@ -6,6 +6,8 @@ import (
"errors"
"fmt"
"net/http"
"os"
"path/filepath"
"time"
"go.uber.org/fx"
@ -279,13 +281,18 @@ func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api.
// NewProviderDAGServiceDataTransfer returns a data transfer manager that just
// uses the provider's Staging DAG service for transfers
func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.StagingGraphsync, ds dtypes.MetadataDS) (dtypes.ProviderDataTransfer, error) {
func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.StagingGraphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) {
sc := storedcounter.New(ds, datastore.NewKey("/datatransfer/provider/counter"))
net := dtnet.NewFromLibp2pHost(h)
dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/provider/transfers"))
transport := dtgstransport.NewTransport(h.ID(), gs)
dt, err := dtimpl.NewDataTransfer(dtDs, net, transport, sc)
err := os.MkdirAll(filepath.Join(r.Path(), "data-transfer"), 0755) //nolint: gosec
if err != nil && !os.IsExist(err) {
return nil, err
}
dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, sc)
if err != nil {
return nil, err
}
@ -709,9 +716,10 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error
return func(cfg sealiface.Config) (err error) {
err = mutateCfg(r, func(c *config.StorageMiner) {
c.Sealing = config.SealingConfig{
MaxWaitDealsSectors: cfg.MaxWaitDealsSectors,
MaxSealingSectors: cfg.MaxSealingSectors,
WaitDealsDelay: config.Duration(cfg.WaitDealsDelay),
MaxWaitDealsSectors: cfg.MaxWaitDealsSectors,
MaxSealingSectors: cfg.MaxSealingSectors,
MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals,
WaitDealsDelay: config.Duration(cfg.WaitDealsDelay),
}
})
return

View File

@ -81,7 +81,7 @@ func MakeGenesis(outFile, genesisTemplate string) func(bs dtypes.ChainBlockstore
fmt.Printf("GENESIS MINER ADDRESS: t0%d\n", genesis2.MinerStart)
f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY, 0644)
f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return nil, err
}