Merge remote-tracking branch 'origin/master' into next

This commit is contained in:
Łukasz Magiera 2020-06-06 00:58:24 +02:00
commit 163c2b2544
14 changed files with 341 additions and 33 deletions

157
chain/store/index.go Normal file
View File

@ -0,0 +1,157 @@
package store
import (
"context"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/actors/abi"
lru "github.com/hashicorp/golang-lru"
"golang.org/x/xerrors"
)
type ChainIndex struct {
skipCache *lru.ARCCache
loadTipSet loadTipSetFunc
skipLength abi.ChainEpoch
}
type loadTipSetFunc func(types.TipSetKey) (*types.TipSet, error)
func NewChainIndex(lts loadTipSetFunc) *ChainIndex {
sc, _ := lru.NewARC(8192)
return &ChainIndex{
skipCache: sc,
loadTipSet: lts,
skipLength: 20,
}
}
type lbEntry struct {
ts *types.TipSet
parentHeight abi.ChainEpoch
targetHeight abi.ChainEpoch
target types.TipSetKey
}
func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) {
if from.Height()-to <= ci.skipLength {
return ci.walkBack(from, to)
}
rounded, err := ci.roundDown(from)
if err != nil {
return nil, err
}
cur := rounded.Key()
for {
cval, ok := ci.skipCache.Get(cur)
if !ok {
fc, err := ci.fillCache(cur)
if err != nil {
return nil, err
}
cval = fc
}
lbe := cval.(*lbEntry)
if lbe.ts.Height() == to || lbe.parentHeight < to {
return lbe.ts, nil
} else if to > lbe.targetHeight {
return ci.walkBack(lbe.ts, to)
}
cur = lbe.target
}
}
func (ci *ChainIndex) GetTipsetByHeightWithoutCache(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) {
return ci.walkBack(from, to)
}
func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) {
ts, err := ci.loadTipSet(tsk)
if err != nil {
return nil, err
}
if ts.Height() == 0 {
return &lbEntry{
ts: ts,
parentHeight: 0,
}, nil
}
// will either be equal to ts.Height, or at least > ts.Parent.Height()
rheight := ci.roundHeight(ts.Height())
parent, err := ci.loadTipSet(ts.Parents())
if err != nil {
return nil, err
}
if parent.Height() > rheight {
return nil, xerrors.Errorf("cache is inconsistent")
}
rheight -= ci.skipLength
skipTarget, err := ci.walkBack(parent, rheight)
if err != nil {
return nil, err
}
lbe := &lbEntry{
ts: ts,
parentHeight: parent.Height(),
targetHeight: skipTarget.Height(),
target: skipTarget.Key(),
}
ci.skipCache.Add(tsk, lbe)
return lbe, nil
}
func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch {
return abi.ChainEpoch(h/ci.skipLength) * ci.skipLength
}
func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) {
target := ci.roundHeight(ts.Height())
rounded, err := ci.walkBack(ts, target)
if err != nil {
return nil, err
}
return rounded, nil
}
func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) {
if to > from.Height() {
return nil, xerrors.Errorf("looking for tipset with height greater than start point")
}
if to == from.Height() {
return from, nil
}
ts := from
for {
pts, err := ci.loadTipSet(ts.Parents())
if err != nil {
return nil, err
}
if to > pts.Height() {
return ts, nil
}
if to == pts.Height() {
return pts, nil
}
ts = pts
}
}

View File

@ -62,6 +62,8 @@ type ChainStore struct {
tstLk sync.Mutex
tipsets map[abi.ChainEpoch][]cid.Cid
cindex *ChainIndex
reorgCh chan<- reorg
headChangeNotifs []func(rev, app []*types.TipSet) error
@ -84,6 +86,10 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls runtime.Sys
vmcalls: vmcalls,
}
ci := NewChainIndex(cs.LoadTipSet)
cs.cindex = ci
cs.reorgCh = cs.reorgWorker(context.TODO())
hcnf := func(rev, app []*types.TipSet) error {
@ -946,7 +952,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t
}
if h > ts.Height() {
return nil, xerrors.Errorf("looking for tipset with height less than start point")
return nil, xerrors.Errorf("looking for tipset with height greater than start point")
}
if h == ts.Height() {
@ -957,24 +963,24 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t
log.Warnf("expensive call to GetTipsetByHeight, seeking %d levels", ts.Height()-h)
}
for {
pts, err := cs.LoadTipSet(ts.Parents())
lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h)
if err != nil {
return nil, err
}
if h > pts.Height() {
if prev {
return pts, nil
if lbts.Height() < h {
log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h)
lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ts, h)
if err != nil {
return nil, err
}
return ts, nil
}
if h == pts.Height() {
return pts, nil
}
ts = pts
if lbts.Height() == h || !prev {
return lbts, nil
}
return cs.LoadTipSet(lbts.Parents())
}
func recurseLinks(bs blockstore.Blockstore, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {

View File

@ -8,7 +8,9 @@ import (
"text/tabwriter"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multibase"
"golang.org/x/xerrors"
"github.com/urfave/cli/v2"
@ -21,6 +23,32 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
var CidBaseFlag = cli.StringFlag{
Name: "cid-base",
Hidden: true,
Value: "base32",
Usage: "Multibase encoding used for version 1 CIDs in output.",
DefaultText: "base32",
}
// GetCidEncoder returns an encoder using the `cid-base` flag if provided, or
// the default (Base32) encoder if not.
func GetCidEncoder(cctx *cli.Context) (cidenc.Encoder, error) {
val := cctx.String("cid-base")
e := cidenc.Encoder{Base: multibase.MustNewEncoder(multibase.Base32)}
if val != "" {
var err error
e.Base, err = multibase.EncoderByName(val)
if err != nil {
return e, err
}
}
return e, nil
}
var clientCmd = &cli.Command{
Name: "client",
Usage: "Make deals, store data, retrieve data",
@ -46,6 +74,7 @@ var clientImportCmd = &cli.Command{
Name: "car",
Usage: "import from a car file instead of a regular file",
},
&CidBaseFlag,
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@ -67,7 +96,14 @@ var clientImportCmd = &cli.Command{
if err != nil {
return err
}
fmt.Println(c.String())
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
fmt.Println(encoder.Encode(c))
return nil
},
}
@ -76,6 +112,9 @@ var clientCommPCmd = &cli.Command{
Name: "commP",
Usage: "calculate the piece-cid (commP) of a CAR file",
ArgsUsage: "[inputFile minerAddress]",
Flags: []cli.Flag{
&CidBaseFlag,
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
@ -94,11 +133,16 @@ var clientCommPCmd = &cli.Command{
}
ret, err := api.ClientCalcCommP(ctx, cctx.Args().Get(0), miner)
if err != nil {
return err
}
fmt.Println("CID: ", ret.Root)
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
fmt.Println("CID: ", encoder.Encode(ret.Root))
fmt.Println("Piece size: ", ret.Size)
return nil
},
@ -137,6 +181,9 @@ var clientCarGenCmd = &cli.Command{
var clientLocalCmd = &cli.Command{
Name: "local",
Usage: "List locally imported data",
Flags: []cli.Flag{
&CidBaseFlag,
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
@ -149,8 +196,14 @@ var clientLocalCmd = &cli.Command{
if err != nil {
return err
}
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
for _, v := range list {
fmt.Printf("%s %s %d %s\n", v.Key, v.FilePath, v.Size, v.Status)
fmt.Printf("%s %s %d %s\n", encoder.Encode(v.Key), v.FilePath, v.Size, v.Status)
}
return nil
},
@ -178,6 +231,7 @@ var clientDealCmd = &cli.Command{
Usage: "specify the epoch that the deal should start at",
Value: -1,
},
&CidBaseFlag,
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@ -263,7 +317,13 @@ var clientDealCmd = &cli.Command{
return err
}
fmt.Println(proposal)
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
fmt.Println(encoder.Encode(*proposal))
return nil
},
}

View File

@ -24,7 +24,8 @@ var fetchParamCmd = &cli.Command{
return err
}
sectorSize := uint64(sectorSizeInt)
err = paramfetch.GetParams(build.ParametersJSON(), sectorSize)
err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), sectorSize)
if err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}

View File

@ -20,6 +20,7 @@ import (
"github.com/filecoin-project/go-address"
paramfetch "github.com/filecoin-project/go-paramfetch"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/sector-storage/ffiwrapper/basicfs"
"github.com/filecoin-project/sector-storage/stores"
@ -215,7 +216,7 @@ var sealBenchCmd = &cli.Command{
// Only fetch parameters if actually needed
if !c.Bool("skip-commit2") {
if err := paramfetch.GetParams(build.ParametersJSON(), uint64(sectorSize)); err != nil {
if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil {
return xerrors.Errorf("getting params: %w", err)
}
}
@ -620,7 +621,7 @@ var proveCmd = &cli.Command{
return xerrors.Errorf("unmarshalling input file: %w", err)
}
if err := paramfetch.GetParams(build.ParametersJSON(), c2in.SectorSize); err != nil {
if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), c2in.SectorSize); err != nil {
return xerrors.Errorf("getting params: %w", err)
}

View File

@ -148,7 +148,7 @@ var runCmd = &cli.Command{
}
if cctx.Bool("commit") {
if err := paramfetch.GetParams(build.ParametersJSON(), uint64(ssize)); err != nil {
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("get params: %w", err)
}
}

View File

@ -3,6 +3,7 @@ package main
import (
"github.com/docker/go-units"
paramfetch "github.com/filecoin-project/go-paramfetch"
lcli "github.com/filecoin-project/lotus/cli"
"golang.org/x/xerrors"
"github.com/urfave/cli/v2"
@ -24,7 +25,7 @@ var fetchParamCmd = &cli.Command{
return err
}
sectorSize := uint64(sectorSizeInt)
err = paramfetch.GetParams(build.ParametersJSON(), sectorSize)
err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), sectorSize)
if err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}

View File

@ -125,8 +125,11 @@ var initCmd = &cli.Command{
log.Info("will attempt to symlink to imported sectors")
}
ctx := lcli.ReqContext(cctx)
log.Info("Checking proof parameters")
if err := paramfetch.GetParams(build.ParametersJSON(), uint64(ssize)); err != nil {
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
@ -137,7 +140,6 @@ var initCmd = &cli.Command{
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
log.Info("Checking full node sync status")

View File

@ -155,7 +155,7 @@ var DaemonCmd = &cli.Command{
return xerrors.Errorf("repo init error: %w", err)
}
if err := paramfetch.GetParams(build.ParametersJSON(), 0); err != nil {
if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), 0); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}

View File

@ -0,0 +1,70 @@
# Why does Filecoin mining work best on AMD?
Currently, Filecoin's Proof of Replication (PoRep) prefers to be run on AMD
processors. More accurately, it runs much much slower on Intel CPUs (it runs
competitively fast on some ARM processors, like the ones in newer Samsung
phones, but they lack the RAM to seal the larger sector sizes). The main reason
that we see this benefit on AMD processors is due to their implementation of
the SHA hardware instructions. Now, why do we use the SHA instruction?
## PoRep security assumptions
Our research team has two different models for the security of Proofs of
Replication. These are the Latency Assumption, and the Cost Assumption. These
assumptions are arguments for why an attacker cannot pull off a 'regeneration
attack'. That is, the attacker cannot seal and commit random data (generated by
a function), delete it, and then reseal it on the fly to respond to PoSt
challenges, without actually storing the data for that time period.
### Cost Assumptions
The cost assumption states that the real money cost (hardware, electricity,
etc) of generating a sector is higher than the real money cost of simply
storing it on disks. NSE is a new PoRep our research team is working on that is
based on the cost assumption, and is thus able to be very parallelizable (In
comparison to schemes based on a latency assumption, as will be explained
next). However, cost assumptions vary greatly with available and hypothetical
hardware. For example, someone making an ASIC for NSE could break the cost
assumption by lowering the cost of sealing too much. This is one of our main
hesitations around shipping NSE.
### Latency Assumptions
A Proof of Replication that is secure under a latency assumption is secure
because an attacker cannot regenerate the data in time. We use this assumption
for SDR, where we assume that an attacker cannot regenerate enough of a sector
fast enough to respond to a PoSt. The way we achieve this is through the use
of depth-robust graphs. Without going into too much detail, depth-robust
graphs guarantee a minimum number of serial operations to compute an encoding
based on the graph. Each edge in the graph represents an operation we need to
perform. We thus have a guarantee that someone has to perform some operation
N times in a row in order to compute the encoding. That means that the
computation of the encoding must take at least as long as N times the fastest
someone can do that operation.
Now, to make this secure, we need to choose an operation that can't be made
much faster. There are many potential candidates here, depending on what
hardware you want to require. We opted not to require ASICs in order to mine
Filecoin, so that limits our choices severely. We have to look at what
operations CPUs are really good at. One candidate was AES encryption, which
also has hardware instructions. However, the difference between the performance
of CPU AES instructions, and the hypothetical 'best' performance you get was
still too great. This gap is generally called 'Amax', an attackers maximum
advantage. The higher the Amax of an algorithm we choose, the more expensive
the overall process has to become in order to bound how fast the attacker could
do it.
As we were doing our research, we noticed that AMD shipped their new processors
with a builtin SHA function, and we looked into how fast someone could possibly
compute a SHA hash. We found that AMDs implementation is only around 3 times
slower than anyone could reasonably do (given estimates by the hardware
engineers at [Supranational](https://www.supranational.net/) ). This is
incredibly impressive for something you can get in consumer hardware. With
this, we were able to make SDR sealing reasonably performant for people with
off-the-shelf hardware.
## Super Optimized CPUs
Given all of the above, with a latency assumption that we're basing our proofs
on right now, you need a processor that can do iterated SHA hashes really fast.
As mentioned earlier, this isnt just AMD processors, but many ARM processors
also have support for this. Hopefully, new Intel processors also follow suit.
But for now, Filecoin works best on AMD processors.

6
go.mod
View File

@ -26,7 +26,7 @@ require (
github.com/filecoin-project/go-fil-markets v0.2.7
github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200505180321-973f8949ea8e
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca
github.com/filecoin-project/go-statestore v0.1.0
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
github.com/filecoin-project/sector-storage v0.0.0-20200605192746-4b9317d1f08f
@ -46,6 +46,7 @@ require (
github.com/ipfs/go-block-format v0.0.2
github.com/ipfs/go-blockservice v0.1.3
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00
github.com/ipfs/go-cidutil v0.0.2
github.com/ipfs/go-datastore v0.4.4
github.com/ipfs/go-ds-badger2 v0.1.0
github.com/ipfs/go-ds-leveldb v0.4.2
@ -53,7 +54,7 @@ require (
github.com/ipfs/go-filestore v1.0.0
github.com/ipfs/go-fs-lock v0.0.1
github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103
github.com/ipfs/go-hamt-ipld v0.1.1-0.20200501020327-d53d20a7063e
github.com/ipfs/go-hamt-ipld v0.1.1-0.20200605182717-0310ad2b0b1f
github.com/ipfs/go-ipfs-blockstore v1.0.0
github.com/ipfs/go-ipfs-chunker v0.0.5
github.com/ipfs/go-ipfs-ds-help v1.0.0
@ -99,6 +100,7 @@ require (
github.com/multiformats/go-multiaddr v0.2.2
github.com/multiformats/go-multiaddr-dns v0.2.0
github.com/multiformats/go-multiaddr-net v0.1.5
github.com/multiformats/go-multibase v0.0.2
github.com/multiformats/go-multihash v0.0.13
github.com/opentracing/opentracing-go v1.1.0
github.com/stretchr/objx v0.2.0 // indirect

8
go.sum
View File

@ -210,8 +210,8 @@ github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 h1:9
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6/go.mod h1:0HgYnrkeSU4lu1p+LEOeDpFsNBssa0OGGriWdA4hvaE=
github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200505180321-973f8949ea8e h1:R+HNoQWirMBOhQC+L1OpYUVbvMjB+jq1hx5LmLFvNfA=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200505180321-973f8949ea8e/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca h1:OGykrCr6mSn/ckk2IFbIlkc76nsgEs7tSLhZXQt7+z4=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9 h1:k9qVR9ItcziSB2rxtlkN/MDWNlbsI6yzec+zjUatLW0=
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ=
@ -412,6 +412,8 @@ github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 h1:QN88Q0kT2QiDaLxpR/SDsqOBtNIEF/F3n96gSDUimkA=
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo=
github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s=
github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
@ -449,6 +451,8 @@ github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQ
github.com/ipfs/go-hamt-ipld v0.0.15-0.20200204200533-99b8553ef242/go.mod h1:kq3Pi+UP3oHhAdKexE+kHHYRKMoFNuGero0R7q3hWGg=
github.com/ipfs/go-hamt-ipld v0.1.1-0.20200501020327-d53d20a7063e h1:Klv6s+kbuhh0JVpGFmFK2t6AtZxJfAnVneQHh1DlFOo=
github.com/ipfs/go-hamt-ipld v0.1.1-0.20200501020327-d53d20a7063e/go.mod h1:giiPqWYCnRBYpNTsJ/EX1ojldX5kTXrXYckSJQ7ko9M=
github.com/ipfs/go-hamt-ipld v0.1.1-0.20200605182717-0310ad2b0b1f h1:mchhWiYYUSoCuE3wDfRCo8cho5kqSoxkgnOtGcnNMZw=
github.com/ipfs/go-hamt-ipld v0.1.1-0.20200605182717-0310ad2b0b1f/go.mod h1:phOFBB7W73N9dg1glcb1fQ9HtQFDUpeyJgatW8ns0bw=
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ=

View File

@ -70,7 +70,7 @@ func GetParams(sbc *ffiwrapper.Config) error {
return err
}
if err := paramfetch.GetParams(build.ParametersJSON(), uint64(ssize)); err != nil {
if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}

View File

@ -59,22 +59,26 @@ func (pm *Manager) waitForPaychCreateMsg(ctx context.Context, mcid cid.Cid) {
mwait, err := pm.state.StateWaitMsg(ctx, mcid, build.MessageConfidence)
if err != nil {
log.Errorf("wait msg: %w", err)
return
}
if mwait.Receipt.ExitCode != 0 {
log.Errorf("payment channel creation failed (exit code %d)", mwait.Receipt.ExitCode)
return
}
var decodedReturn init_.ExecReturn
err = decodedReturn.UnmarshalCBOR(bytes.NewReader(mwait.Receipt.Return))
if err != nil {
log.Error(err)
return
}
paychaddr := decodedReturn.RobustAddress
ci, err := pm.loadOutboundChannelInfo(ctx, paychaddr)
if err != nil {
log.Errorf("loading channel info: %w", err)
return
}
if err := pm.store.trackChannel(ci); err != nil {