2021-07-01 16:34:51 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"os"
|
|
|
|
|
2021-07-15 08:56:02 +00:00
|
|
|
"github.com/filecoin-project/go-fil-markets/filestorecaradapter"
|
2021-07-07 09:13:12 +00:00
|
|
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
2021-07-01 16:34:51 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2021-07-07 06:32:48 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/repo/importmgr"
|
2021-07-01 16:34:51 +00:00
|
|
|
"github.com/ipfs/go-blockservice"
|
|
|
|
"github.com/ipfs/go-cid"
|
|
|
|
"github.com/ipfs/go-cidutil"
|
2021-07-07 09:13:12 +00:00
|
|
|
"github.com/ipfs/go-datastore"
|
|
|
|
ds_sync "github.com/ipfs/go-datastore/sync"
|
|
|
|
"github.com/ipfs/go-filestore"
|
2021-07-01 16:34:51 +00:00
|
|
|
chunker "github.com/ipfs/go-ipfs-chunker"
|
|
|
|
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
|
|
files2 "github.com/ipfs/go-ipfs-files"
|
|
|
|
ipld "github.com/ipfs/go-ipld-format"
|
|
|
|
"github.com/ipfs/go-merkledag"
|
|
|
|
"github.com/ipfs/go-unixfs/importer/balanced"
|
|
|
|
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
|
|
|
|
"golang.org/x/xerrors"
|
|
|
|
)
|
|
|
|
|
2021-07-15 08:56:02 +00:00
|
|
|
// importNormalFileToFilestoreCARv2 transforms the client's "normal file" to a Unixfs IPLD DAG and writes out the DAG to a CARv2 file
|
|
|
|
// that can be used to back a filestore.
|
|
|
|
func (a *API) importNormalFileToFilestoreCARv2(ctx context.Context, importID importmgr.ImportID, inputFilePath string, outputCARv2Path string) (c cid.Cid, finalErr error) {
|
2021-07-03 07:25:20 +00:00
|
|
|
|
|
|
|
// TODO: We've currently put in a hack to create the Unixfs DAG as a CARv2 without using Badger.
|
2021-07-07 09:13:12 +00:00
|
|
|
// We first create the Unixfs DAG using a filestore to get the root of the Unixfs DAG.
|
|
|
|
// We can't create the UnixfsDAG right away using a CARv2 read-write blockstore as the blockstore
|
|
|
|
// needs the root of the DAG during instantiation to write out a valid CARv2 file.
|
2021-07-03 07:25:20 +00:00
|
|
|
//
|
|
|
|
// In the second pass, we create a CARv2 file with the root present using the root node we get in the above step.
|
|
|
|
// This hack should be fixed when CARv2 allows specifying the root AFTER finishing the CARv2 streaming write.
|
2021-07-07 09:13:12 +00:00
|
|
|
fm := filestore.NewFileManager(ds_sync.MutexWrap(datastore.NewMapDatastore()), "/")
|
|
|
|
fm.AllowFiles = true
|
|
|
|
fstore := filestore.NewFilestore(bstore.NewMemorySync(), fm)
|
|
|
|
bsvc := blockservice.New(fstore, offline.Exchange(fstore))
|
2021-07-07 09:17:00 +00:00
|
|
|
defer bsvc.Close() //nolint:errcheck
|
2021-07-03 07:25:20 +00:00
|
|
|
|
|
|
|
// ---- First Pass --- Write out the UnixFS DAG to a rootless CARv2 file by instantiating a read-write CARv2 blockstore without the root.
|
|
|
|
root, err := importNormalFileToUnixfsDAG(ctx, inputFilePath, merkledag.NewDAGService(bsvc))
|
2021-07-01 16:34:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, xerrors.Errorf("failed to import file to store: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-07-15 08:56:02 +00:00
|
|
|
//------ Second Pass --- Now that we have the root of the Unixfs DAG -> write out the Unixfs DAG to a CARv2 file with the root present by using a
|
|
|
|
// filestore backed by a read-write CARv2 blockstore.
|
|
|
|
fsb, err := filestorecaradapter.NewReadWriteFileStore(outputCARv2Path, []cid.Cid{root})
|
2021-07-01 16:34:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, xerrors.Errorf("failed to create a CARv2 read-write blockstore: %w", err)
|
|
|
|
}
|
2021-07-15 08:56:02 +00:00
|
|
|
defer fsb.Close() //nolint:errcheck
|
2021-07-01 16:34:51 +00:00
|
|
|
|
2021-07-15 08:56:02 +00:00
|
|
|
bsvc = blockservice.New(fsb, offline.Exchange(fsb))
|
2021-07-01 16:34:51 +00:00
|
|
|
root2, err := importNormalFileToUnixfsDAG(ctx, inputFilePath, merkledag.NewDAGService(bsvc))
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, xerrors.Errorf("failed to create Unixfs DAG with CARv2 blockstore: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if root != root2 {
|
|
|
|
return cid.Undef, xerrors.New("roots do not match")
|
|
|
|
}
|
|
|
|
|
|
|
|
return root, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// importNormalFileToUnixfsDAG transforms a client's normal file to a UnixfsDAG and imports the DAG to the given DAG service.
|
|
|
|
func importNormalFileToUnixfsDAG(ctx context.Context, inputFilePath string, dag ipld.DAGService) (cid.Cid, error) {
|
|
|
|
f, err := os.Open(inputFilePath)
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, xerrors.Errorf("failed to open input file: %w", err)
|
|
|
|
}
|
2021-07-03 07:25:20 +00:00
|
|
|
defer f.Close() //nolint:errcheck
|
2021-07-01 16:34:51 +00:00
|
|
|
|
|
|
|
stat, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, xerrors.Errorf("failed to stat file :%w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
file, err := files2.NewReaderPathFile(inputFilePath, f, stat)
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, xerrors.Errorf("failed to create reader path file: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bufDs := ipld.NewBufferedDAG(ctx, dag)
|
|
|
|
|
|
|
|
prefix, err := merkledag.PrefixForCidVersion(1)
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
prefix.MhType = DefaultHashFunction
|
|
|
|
|
|
|
|
params := ihelper.DagBuilderParams{
|
|
|
|
Maxlinks: build.UnixfsLinksPerLevel,
|
|
|
|
RawLeaves: true,
|
|
|
|
CidBuilder: cidutil.InlineBuilder{
|
|
|
|
Builder: prefix,
|
|
|
|
Limit: 126,
|
|
|
|
},
|
|
|
|
Dagserv: bufDs,
|
|
|
|
NoCopy: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
nd, err := balanced.Layout(db)
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := bufDs.Commit(); err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nd.Cid(), nil
|
|
|
|
}
|