6c01310728
This migrates everything except the `go-car` librairy: https://github.com/ipfs/boxo/issues/218#issuecomment-1529922103 I didn't migrated everything in the previous release because all the boxo code wasn't compatible with the go-ipld-prime one due to a an in flight (/ aftermath) revert of github.com/ipfs/go-block-format. go-block-format has been unmigrated since slight bellow absolutely everything depends on it that would have required everything to be moved on boxo or everything to optin into using boxo which were all deal breakers for different groups. This worked fine because lotus's codebase could live hapely on the first multirepo setup however boost is now trying to use boxo's code with lotus's (still on multirepo) setup: https://filecoinproject.slack.com/archives/C03AQ3QAUG1/p1685022344779649 The alternative would be for boost to write shim types which just forward calls and return with the different interface definitions. Btw why is that an issue in the first place is because unlike what go's duck typing model suggest interfaces are not transparent https://github.com/golang/go/issues/58112, interfaces are strongly typed but they have implicit narrowing. The issue is if you return an interface from an interface Go does not have a function definition to insert the implicit conversion thus instead the type checker complains you are not returning the right type. Stubbing types were reverted https://github.com/ipfs/boxo/issues/218#issuecomment-1478650351 Last time I only migrated `go-bitswap` to `boxo/bitswap` because of the security issues and because we never had the interface return an interface problem (we had concrete wrappers where the implicit conversion took place).
165 lines
4.2 KiB
Go
165 lines
4.2 KiB
Go
package kit
|
|
|
|
import (
|
|
"bufio"
|
|
"bytes"
|
|
"context"
|
|
"io"
|
|
"math/rand"
|
|
"os"
|
|
"testing"
|
|
|
|
"github.com/ipfs/boxo/blockservice"
|
|
bstore "github.com/ipfs/boxo/blockstore"
|
|
chunk "github.com/ipfs/boxo/chunker"
|
|
offline "github.com/ipfs/boxo/exchange/offline"
|
|
"github.com/ipfs/boxo/files"
|
|
"github.com/ipfs/boxo/ipld/merkledag"
|
|
"github.com/ipfs/boxo/ipld/unixfs/importer/balanced"
|
|
ihelper "github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
|
|
"github.com/ipfs/go-cid"
|
|
"github.com/ipfs/go-cidutil"
|
|
ds "github.com/ipfs/go-datastore"
|
|
dssync "github.com/ipfs/go-datastore/sync"
|
|
ipldformat "github.com/ipfs/go-ipld-format"
|
|
"github.com/ipld/go-car"
|
|
"github.com/minio/blake2b-simd"
|
|
mh "github.com/multiformats/go-multihash"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
const unixfsChunkSize int64 = 1 << 10
|
|
|
|
var defaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)
|
|
|
|
// CreateRandomFile creates a random file with the provided seed and the
|
|
// provided size.
|
|
func CreateRandomFile(t *testing.T, rseed, size int) (path string) {
|
|
if size == 0 {
|
|
size = 1600
|
|
}
|
|
|
|
source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size))
|
|
|
|
file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat")
|
|
require.NoError(t, err)
|
|
|
|
n, err := io.Copy(file, source)
|
|
require.NoError(t, err)
|
|
require.EqualValues(t, n, size)
|
|
|
|
return file.Name()
|
|
}
|
|
|
|
// CreateRandomFile creates a normal file with the provided seed and the
|
|
// provided size and then transforms it to a CARv1 file and returns it.
|
|
func CreateRandomCARv1(t *testing.T, rseed, size int, opts ...GeneratedDAGOpts) (carV1FilePath string, origFilePath string) {
|
|
ctx := context.Background()
|
|
if size == 0 {
|
|
size = 1600
|
|
}
|
|
|
|
source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size))
|
|
|
|
file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat")
|
|
require.NoError(t, err)
|
|
|
|
n, err := io.Copy(file, source)
|
|
require.NoError(t, err)
|
|
require.EqualValues(t, n, size)
|
|
|
|
//
|
|
_, err = file.Seek(0, io.SeekStart)
|
|
require.NoError(t, err)
|
|
bs := bstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
|
|
dagSvc := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
|
|
|
root := writeUnixfsDAG(ctx, t, file, dagSvc, opts...)
|
|
|
|
// create a CARv1 file from the DAG
|
|
tmp, err := os.CreateTemp(t.TempDir(), "randcarv1")
|
|
require.NoError(t, err)
|
|
require.NoError(t, car.WriteCar(ctx, dagSvc, []cid.Cid{root}, tmp))
|
|
_, err = tmp.Seek(0, io.SeekStart)
|
|
require.NoError(t, err)
|
|
hd, err := car.ReadHeader(bufio.NewReader(tmp))
|
|
require.NoError(t, err)
|
|
require.EqualValues(t, 1, hd.Version)
|
|
require.Len(t, hd.Roots, 1)
|
|
require.NoError(t, tmp.Close())
|
|
|
|
return tmp.Name(), file.Name()
|
|
}
|
|
|
|
type GeneratedDAGOpts struct {
|
|
ChunkSize int64
|
|
Maxlinks int
|
|
}
|
|
|
|
func writeUnixfsDAG(ctx context.Context, t *testing.T, rd io.Reader, dag ipldformat.DAGService, opts ...GeneratedDAGOpts) cid.Cid {
|
|
dagOpts := GeneratedDAGOpts{
|
|
ChunkSize: unixfsChunkSize,
|
|
Maxlinks: 1024,
|
|
}
|
|
if len(opts) > 0 {
|
|
dagOpts = opts[0]
|
|
}
|
|
|
|
rpf := files.NewReaderFile(rd)
|
|
|
|
// generate the dag and get the root
|
|
// import to UnixFS
|
|
prefix, err := merkledag.PrefixForCidVersion(1)
|
|
require.NoError(t, err)
|
|
prefix.MhType = defaultHashFunction
|
|
|
|
bufferedDS := ipldformat.NewBufferedDAG(ctx, dag)
|
|
params := ihelper.DagBuilderParams{
|
|
Maxlinks: dagOpts.Maxlinks,
|
|
RawLeaves: true,
|
|
CidBuilder: cidutil.InlineBuilder{
|
|
Builder: prefix,
|
|
Limit: 126,
|
|
},
|
|
Dagserv: bufferedDS,
|
|
}
|
|
|
|
db, err := params.New(chunk.NewSizeSplitter(rpf, dagOpts.ChunkSize))
|
|
require.NoError(t, err)
|
|
|
|
nd, err := balanced.Layout(db)
|
|
require.NoError(t, err)
|
|
require.NotEqualValues(t, cid.Undef, nd.Cid())
|
|
|
|
err = bufferedDS.Commit()
|
|
require.NoError(t, err)
|
|
require.NoError(t, rpf.Close())
|
|
return nd.Cid()
|
|
}
|
|
|
|
// AssertFilesEqual compares two files by blake2b hash equality and
|
|
// fails the test if unequal.
|
|
func AssertFilesEqual(t *testing.T, left, right string) {
|
|
// initialize hashes.
|
|
leftH, rightH := blake2b.New256(), blake2b.New256()
|
|
|
|
// open files.
|
|
leftF, err := os.Open(left)
|
|
require.NoError(t, err)
|
|
|
|
rightF, err := os.Open(right)
|
|
require.NoError(t, err)
|
|
|
|
// feed hash functions.
|
|
_, err = io.Copy(leftH, leftF)
|
|
require.NoError(t, err)
|
|
|
|
_, err = io.Copy(rightH, rightF)
|
|
require.NoError(t, err)
|
|
|
|
// compute digests.
|
|
leftD, rightD := leftH.Sum(nil), rightH.Sum(nil)
|
|
|
|
require.True(t, bytes.Equal(leftD, rightD))
|
|
}
|