integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
This commit is contained in:
parent
473a192b85
commit
d7076778e2
@ -12,14 +12,14 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
@ -29,6 +29,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode
|
||||
@ -336,7 +337,7 @@ type FullNode interface {
|
||||
// ClientImport imports file under the specified path into filestore.
|
||||
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) //perm:admin
|
||||
// ClientRemoveImport removes file import
|
||||
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
|
||||
ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin
|
||||
// ClientStartDeal proposes a deal with a miner.
|
||||
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin
|
||||
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
|
||||
@ -728,16 +729,28 @@ type MinerSectors struct {
|
||||
|
||||
type ImportRes struct {
|
||||
Root cid.Cid
|
||||
ImportID multistore.StoreID
|
||||
ImportID imports.ID
|
||||
}
|
||||
|
||||
type Import struct {
|
||||
Key multistore.StoreID
|
||||
Key imports.ID
|
||||
Err string
|
||||
|
||||
Root *cid.Cid
|
||||
Source string
|
||||
Root *cid.Cid
|
||||
|
||||
// Source is the provenance of the import, e.g. "import", "unknown", else.
|
||||
// Currently useless but may be used in the future.
|
||||
Source string
|
||||
|
||||
// FilePath is the path of the original file. It is important that the file
|
||||
// is retained at this path, because it will be referenced during
|
||||
// the transfer (when we do the UnixFS chunking, we don't duplicate the
|
||||
// leaves, but rather point to chunks of the original data through
|
||||
// positional references).
|
||||
FilePath string
|
||||
|
||||
// CARPath is the path of the CAR file containing the DAG for this import.
|
||||
CARPath string
|
||||
}
|
||||
|
||||
type DealInfo struct {
|
||||
@ -920,7 +933,7 @@ type RetrievalOrder struct {
|
||||
Piece *cid.Cid
|
||||
Size uint64
|
||||
|
||||
LocalStore *multistore.StoreID // if specified, get data from local store
|
||||
FromLocalCAR string // if specified, get data from a local CARv2 file.
|
||||
// TODO: support offset
|
||||
Total types.BigInt
|
||||
UnsealPrice types.BigInt
|
||||
|
@ -13,13 +13,14 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
@ -166,6 +167,48 @@ type StorageMiner interface {
|
||||
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write
|
||||
MarketPublishPendingDeals(ctx context.Context) error //perm:admin
|
||||
|
||||
// DagstoreListShards returns information about all shards known to the
|
||||
// DAG store. Only available on nodes running the markets subsystem.
|
||||
DagstoreListShards(ctx context.Context) ([]DagstoreShardInfo, error) //perm:read
|
||||
|
||||
// DagstoreInitializeShard initializes an uninitialized shard.
|
||||
//
|
||||
// Initialization consists of fetching the shard's data (deal payload) from
|
||||
// the storage subsystem, generating an index, and persisting the index
|
||||
// to facilitate later retrievals, and/or to publish to external sources.
|
||||
//
|
||||
// This operation is intended to complement the initial migration. The
|
||||
// migration registers a shard for every unique piece CID, with lazy
|
||||
// initialization. Thus, shards are not initialized immediately to avoid
|
||||
// IO activity competing with proving. Instead, shard are initialized
|
||||
// when first accessed. This method forces the initialization of a shard by
|
||||
// accessing it and immediately releasing it. This is useful to warm up the
|
||||
// cache to facilitate subsequent retrievals, and to generate the indexes
|
||||
// to publish them externally.
|
||||
//
|
||||
// This operation fails if the shard is not in ShardStateNew state.
|
||||
// It blocks until initialization finishes.
|
||||
DagstoreInitializeShard(ctx context.Context, key string) error //perm:write
|
||||
|
||||
// DagstoreRecoverShard attempts to recover a failed shard.
|
||||
//
|
||||
// This operation fails if the shard is not in ShardStateErrored state.
|
||||
// It blocks until recovery finishes. If recovery failed, it returns the
|
||||
// error.
|
||||
DagstoreRecoverShard(ctx context.Context, key string) error //perm:write
|
||||
|
||||
// DagstoreInitializeAll initializes all uninitialized shards in bulk,
|
||||
// according to the policy passed in the parameters.
|
||||
//
|
||||
// It is recommended to set a maximum concurrency to avoid extreme
|
||||
// IO pressure if the storage subsystem has a large amount of deals.
|
||||
//
|
||||
// It returns a stream of events to report progress.
|
||||
DagstoreInitializeAll(ctx context.Context, params DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) //perm:write
|
||||
|
||||
// DagstoreGC runs garbage collection on the DAG store.
|
||||
DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin
|
||||
|
||||
// RuntimeSubsystems returns the subsystems that are enabled
|
||||
// in this instance.
|
||||
RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read
|
||||
@ -336,3 +379,34 @@ type DealSchedule struct {
|
||||
StartEpoch abi.ChainEpoch
|
||||
EndEpoch abi.ChainEpoch
|
||||
}
|
||||
|
||||
// DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that
|
||||
// we expose through JSON-RPC to avoid clients having to depend on the
|
||||
// dagstore lib.
|
||||
type DagstoreShardInfo struct {
|
||||
Key string
|
||||
State string
|
||||
Error string
|
||||
}
|
||||
|
||||
// DagstoreShardResult enumerates results per shard.
|
||||
type DagstoreShardResult struct {
|
||||
Key string
|
||||
Success bool
|
||||
Error string
|
||||
}
|
||||
|
||||
type DagstoreInitializeAllParams struct {
|
||||
MaxConcurrency int
|
||||
IncludeSealed bool
|
||||
}
|
||||
|
||||
// DagstoreInitializeAllEvent represents an initialization event.
|
||||
type DagstoreInitializeAllEvent struct {
|
||||
Key string
|
||||
Event string // "start", "end"
|
||||
Success bool
|
||||
Error string
|
||||
Total int
|
||||
Current int
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func TestReturnTypes(t *testing.T) {
|
||||
seen[typ] = struct{}{}
|
||||
|
||||
if typ.Kind() == reflect.Interface && typ != bareIface && !typ.Implements(jmarsh) {
|
||||
t.Error("methods can't return interfaces", m.Name)
|
||||
t.Error("methods can't return interfaces or struct types not implementing json.Marshaller", m.Name)
|
||||
}
|
||||
|
||||
switch typ.Kind() {
|
||||
|
@ -5,6 +5,7 @@ package api
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
@ -17,6 +18,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-filestore"
|
||||
@ -27,7 +28,6 @@ import (
|
||||
filestore2 "github.com/filecoin-project/go-fil-markets/filestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
@ -43,6 +43,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
var ExampleValues = map[reflect.Type]interface{}{
|
||||
@ -90,6 +91,7 @@ func init() {
|
||||
addExample(&pid)
|
||||
|
||||
multistoreIDExample := multistore.StoreID(50)
|
||||
storeIDExample := imports.ID(50)
|
||||
|
||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
|
||||
@ -120,6 +122,8 @@ func init() {
|
||||
addExample(time.Minute)
|
||||
addExample(datatransfer.TransferID(3))
|
||||
addExample(datatransfer.Ongoing)
|
||||
addExample(storeIDExample)
|
||||
addExample(&storeIDExample)
|
||||
addExample(multistoreIDExample)
|
||||
addExample(&multistoreIDExample)
|
||||
addExample(retrievalmarket.ClientEventDealAccepted)
|
||||
@ -176,7 +180,7 @@ func init() {
|
||||
|
||||
// miner specific
|
||||
addExample(filestore2.Path(".lotusminer/fstmp123"))
|
||||
si := multistore.StoreID(12)
|
||||
si := uint64(12)
|
||||
addExample(&si)
|
||||
addExample(retrievalmarket.DealID(5))
|
||||
addExample(abi.ActorID(1000))
|
||||
@ -271,6 +275,15 @@ func init() {
|
||||
api.SubsystemSectorStorage,
|
||||
api.SubsystemMarkets,
|
||||
})
|
||||
addExample(api.DagstoreShardResult{
|
||||
Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq",
|
||||
Error: "<error>",
|
||||
})
|
||||
addExample(api.DagstoreShardInfo{
|
||||
Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq",
|
||||
State: "ShardStateAvailable",
|
||||
Error: "<error>",
|
||||
})
|
||||
}
|
||||
|
||||
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||
multistore "github.com/filecoin-project/go-multistore"
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
big "github.com/filecoin-project/go-state-types/big"
|
||||
crypto "github.com/filecoin-project/go-state-types/crypto"
|
||||
@ -26,6 +25,7 @@ import (
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
@ -775,7 +775,7 @@ func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{})
|
||||
}
|
||||
|
||||
// ClientRemoveImport mocks base method.
|
||||
func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore.StoreID) error {
|
||||
func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 imports.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
@ -29,6 +28,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -190,7 +190,7 @@ type FullNodeStruct struct {
|
||||
|
||||
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
|
||||
|
||||
ClientRemoveImport func(p0 context.Context, p1 multistore.StoreID) error `perm:"admin"`
|
||||
ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"`
|
||||
|
||||
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
@ -607,6 +607,16 @@ type StorageMinerStruct struct {
|
||||
|
||||
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
DagstoreGC func(p0 context.Context) ([]DagstoreShardResult, error) `perm:"admin"`
|
||||
|
||||
DagstoreInitializeAll func(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) `perm:"write"`
|
||||
|
||||
DagstoreInitializeShard func(p0 context.Context, p1 string) error `perm:"write"`
|
||||
|
||||
DagstoreListShards func(p0 context.Context) ([]DagstoreShardInfo, error) `perm:"read"`
|
||||
|
||||
DagstoreRecoverShard func(p0 context.Context, p1 string) error `perm:"write"`
|
||||
|
||||
DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
|
||||
DealsConsiderOfflineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
@ -1494,14 +1504,14 @@ func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error {
|
||||
func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||
if s.Internal.ClientRemoveImport == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRemoveImport(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error {
|
||||
func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
@ -3595,6 +3605,61 @@ func (s *StorageMinerStub) CreateBackup(p0 context.Context, p1 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreGC(p0 context.Context) ([]DagstoreShardResult, error) {
|
||||
if s.Internal.DagstoreGC == nil {
|
||||
return *new([]DagstoreShardResult), ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreGC(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreGC(p0 context.Context) ([]DagstoreShardResult, error) {
|
||||
return *new([]DagstoreShardResult), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreInitializeAll(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) {
|
||||
if s.Internal.DagstoreInitializeAll == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreInitializeAll(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreInitializeAll(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreInitializeShard(p0 context.Context, p1 string) error {
|
||||
if s.Internal.DagstoreInitializeShard == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreInitializeShard(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreInitializeShard(p0 context.Context, p1 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreListShards(p0 context.Context) ([]DagstoreShardInfo, error) {
|
||||
if s.Internal.DagstoreListShards == nil {
|
||||
return *new([]DagstoreShardInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreListShards(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreListShards(p0 context.Context) ([]DagstoreShardInfo, error) {
|
||||
return *new([]DagstoreShardInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreRecoverShard(p0 context.Context, p1 string) error {
|
||||
if s.Internal.DagstoreRecoverShard == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreRecoverShard(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreRecoverShard(p0 context.Context, p1 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) {
|
||||
if s.Internal.DealsConsiderOfflineRetrievalDeals == nil {
|
||||
return false, ErrNotSupported
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
@ -22,6 +21,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode
|
||||
@ -305,7 +305,7 @@ type FullNode interface {
|
||||
// ClientImport imports file under the specified path into filestore.
|
||||
ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) //perm:admin
|
||||
// ClientRemoveImport removes file import
|
||||
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
|
||||
ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin
|
||||
// ClientStartDeal proposes a deal with a miner.
|
||||
ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin
|
||||
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
@ -22,6 +21,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"golang.org/x/xerrors"
|
||||
@ -121,7 +121,7 @@ type FullNodeStruct struct {
|
||||
|
||||
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
|
||||
|
||||
ClientRemoveImport func(p0 context.Context, p1 multistore.StoreID) error `perm:"admin"`
|
||||
ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"`
|
||||
|
||||
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
@ -939,14 +939,14 @@ func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error {
|
||||
func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||
if s.Internal.ClientRemoveImport == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRemoveImport(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error {
|
||||
func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||
multistore "github.com/filecoin-project/go-multistore"
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
big "github.com/filecoin-project/go-state-types/big"
|
||||
crypto "github.com/filecoin-project/go-state-types/crypto"
|
||||
@ -26,6 +25,7 @@ import (
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
@ -731,7 +731,7 @@ func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{})
|
||||
}
|
||||
|
||||
// ClientRemoveImport mocks base method.
|
||||
func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore.StoreID) error {
|
||||
func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 imports.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -5,6 +5,7 @@ package exchange
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
@ -15,6 +16,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
var lengthBufRequest = []byte{131}
|
||||
|
@ -5,6 +5,7 @@ package market
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
var lengthBufFundedAddressState = []byte{131}
|
||||
|
@ -5,6 +5,7 @@ package types
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
@ -18,6 +19,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
var lengthBufBlockHeader = []byte{144}
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"github.com/docker/go-units"
|
||||
"github.com/fatih/color"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-cidutil/cidenc"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
@ -32,12 +31,14 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
@ -46,6 +47,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/tablewriter"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
var CidBaseFlag = cli.StringFlag{
|
||||
@ -174,18 +176,18 @@ var clientDropCmd = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
var ids []multistore.StoreID
|
||||
var ids []uint64
|
||||
for i, s := range cctx.Args().Slice() {
|
||||
id, err := strconv.ParseInt(s, 10, 0)
|
||||
id, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing %d-th import ID: %w", i, err)
|
||||
}
|
||||
|
||||
ids = append(ids, multistore.StoreID(id))
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
if err := api.ClientRemoveImport(ctx, id); err != nil {
|
||||
if err := api.ClientRemoveImport(ctx, imports.ID(id)); err != nil {
|
||||
return xerrors.Errorf("removing import %d: %w", id, err)
|
||||
}
|
||||
}
|
||||
@ -1104,8 +1106,8 @@ var clientRetrieveCmd = &cli.Command{
|
||||
for _, i := range imports {
|
||||
if i.Root != nil && i.Root.Equals(file) {
|
||||
order = &lapi.RetrievalOrder{
|
||||
Root: file,
|
||||
LocalStore: &i.Key,
|
||||
Root: file,
|
||||
FromLocalCAR: i.CARPath,
|
||||
|
||||
Total: big.Zero(),
|
||||
UnsealPrice: big.Zero(),
|
||||
|
@ -22,7 +22,7 @@ var WaitApiCmd = &cli.Command{
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
_, err = api.ID(ctx)
|
||||
_, err = api.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
267
cmd/lotus-miner/dagstore.go
Normal file
267
cmd/lotus-miner/dagstore.go
Normal file
@ -0,0 +1,267 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/tablewriter"
|
||||
)
|
||||
|
||||
var dagstoreCmd = &cli.Command{
|
||||
Name: "dagstore",
|
||||
Usage: "Manage the dagstore on the markets subsystem",
|
||||
Subcommands: []*cli.Command{
|
||||
dagstoreListShardsCmd,
|
||||
dagstoreInitializeShardCmd,
|
||||
dagstoreRecoverShardCmd,
|
||||
dagstoreInitializeAllCmd,
|
||||
dagstoreGcCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreListShardsCmd = &cli.Command{
|
||||
Name: "list-shards",
|
||||
Usage: "List all shards known to the dagstore, with their current status",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
DefaultText: "depends on output being a TTY",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.IsSet("color") {
|
||||
color.NoColor = !cctx.Bool("color")
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
shards, err := marketsApi.DagstoreListShards(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(shards) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tw := tablewriter.New(
|
||||
tablewriter.Col("Key"),
|
||||
tablewriter.Col("State"),
|
||||
tablewriter.Col("Error"),
|
||||
)
|
||||
|
||||
colors := map[string]color.Attribute{
|
||||
"ShardStateAvailable": color.FgGreen,
|
||||
"ShardStateServing": color.FgBlue,
|
||||
"ShardStateErrored": color.FgRed,
|
||||
"ShardStateNew": color.FgYellow,
|
||||
}
|
||||
|
||||
for _, s := range shards {
|
||||
m := map[string]interface{}{
|
||||
"Key": s.Key,
|
||||
"State": func() string {
|
||||
if c, ok := colors[s.State]; ok {
|
||||
return color.New(c).Sprint(s.State)
|
||||
}
|
||||
return s.State
|
||||
}(),
|
||||
"Error": s.Error,
|
||||
}
|
||||
tw.Write(m)
|
||||
}
|
||||
|
||||
return tw.Flush(os.Stdout)
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreInitializeShardCmd = &cli.Command{
|
||||
Name: "initialize-shard",
|
||||
ArgsUsage: "[key]",
|
||||
Usage: "Initialize the specified shard",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
DefaultText: "depends on output being a TTY",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.IsSet("color") {
|
||||
color.NoColor = !cctx.Bool("color")
|
||||
}
|
||||
|
||||
if cctx.NArg() != 1 {
|
||||
return fmt.Errorf("must provide a single shard key")
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return marketsApi.DagstoreInitializeShard(ctx, cctx.Args().First())
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreRecoverShardCmd = &cli.Command{
|
||||
Name: "recover-shard",
|
||||
ArgsUsage: "[key]",
|
||||
Usage: "Attempt to recover a shard in errored state",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
DefaultText: "depends on output being a TTY",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.IsSet("color") {
|
||||
color.NoColor = !cctx.Bool("color")
|
||||
}
|
||||
|
||||
if cctx.NArg() != 1 {
|
||||
return fmt.Errorf("must provide a single shard key")
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return marketsApi.DagstoreRecoverShard(ctx, cctx.Args().First())
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreInitializeAllCmd = &cli.Command{
|
||||
Name: "initialize-all",
|
||||
Usage: "Initialize all uninitialized shards, streaming results as they're produced; only shards for unsealed pieces are initialized by default",
|
||||
Flags: []cli.Flag{
|
||||
&cli.UintFlag{
|
||||
Name: "concurrency",
|
||||
Usage: "maximum shards to initialize concurrently at a time; use 0 for unlimited",
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "include-sealed",
|
||||
Usage: "initialize sealed pieces as well",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
DefaultText: "depends on output being a TTY",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.IsSet("color") {
|
||||
color.NoColor = !cctx.Bool("color")
|
||||
}
|
||||
|
||||
concurrency := cctx.Uint("concurrency")
|
||||
sealed := cctx.Bool("sealed")
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
params := api.DagstoreInitializeAllParams{
|
||||
MaxConcurrency: int(concurrency),
|
||||
IncludeSealed: sealed,
|
||||
}
|
||||
|
||||
ch, err := marketsApi.DagstoreInitializeAll(ctx, params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt, ok := <-ch:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
_, _ = fmt.Fprint(os.Stdout, color.New(color.BgHiBlack).Sprintf("(%d/%d)", evt.Current, evt.Total))
|
||||
_, _ = fmt.Fprint(os.Stdout, " ")
|
||||
if evt.Event == "start" {
|
||||
_, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.Reset).Sprint("STARTING"))
|
||||
} else {
|
||||
if evt.Success {
|
||||
_, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.FgGreen).Sprint("SUCCESS"))
|
||||
} else {
|
||||
_, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.FgRed).Sprint("ERROR"), evt.Error)
|
||||
}
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("aborted")
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreGcCmd = &cli.Command{
|
||||
Name: "gc",
|
||||
Usage: "Garbage collect the dagstore",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
DefaultText: "depends on output being a TTY",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.IsSet("color") {
|
||||
color.NoColor = !cctx.Bool("color")
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
collected, err := marketsApi.DagstoreGC(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(collected) == 0 {
|
||||
_, _ = fmt.Fprintln(os.Stdout, "no shards collected")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, e := range collected {
|
||||
if e.Error == "" {
|
||||
_, _ = fmt.Fprintln(os.Stdout, e.Key, color.New(color.FgGreen).Sprint("SUCCESS"))
|
||||
} else {
|
||||
_, _ = fmt.Fprintln(os.Stdout, e.Key, color.New(color.FgRed).Sprint("ERROR"), e.Error)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
@ -10,12 +10,13 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
"github.com/filecoin-project/lotus/lib/tracing"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
@ -47,6 +48,7 @@ func main() {
|
||||
lcli.WithCategory("market", storageDealsCmd),
|
||||
lcli.WithCategory("market", retrievalDealsCmd),
|
||||
lcli.WithCategory("market", dataTransfersCmd),
|
||||
lcli.WithCategory("market", dagstoreCmd),
|
||||
lcli.WithCategory("storage", sectorsCmd),
|
||||
lcli.WithCategory("storage", provingCmd),
|
||||
lcli.WithCategory("storage", storageCmd),
|
||||
|
@ -591,7 +591,7 @@ var setSealDurationCmd = &cli.Command{
|
||||
Usage: "Set the expected time, in minutes, that you expect sealing sectors to take. Deals that start before this duration will be rejected.",
|
||||
ArgsUsage: "<minutes>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ var piecesListPiecesCmd = &cli.Command{
|
||||
Name: "list-pieces",
|
||||
Usage: "list registered pieces",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -49,7 +49,7 @@ var piecesListCidInfosCmd = &cli.Command{
|
||||
Name: "list-cids",
|
||||
Usage: "list registered payload CIDs",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -76,7 +76,7 @@ var piecesInfoCmd = &cli.Command{
|
||||
return lcli.ShowHelp(cctx, fmt.Errorf("must specify piece cid"))
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -111,7 +111,7 @@ var piecesCidInfoCmd = &cli.Command{
|
||||
return lcli.ShowHelp(cctx, fmt.Errorf("must specify payload cid"))
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -403,7 +403,7 @@ var sectorsRefsCmd = &cli.Command{
|
||||
Name: "refs",
|
||||
Usage: "List References to sectors",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ package chaos
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
@ -17,6 +18,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
var lengthBufState = []byte{130}
|
||||
|
@ -18,6 +18,12 @@
|
||||
* [ComputeProof](#ComputeProof)
|
||||
* [Create](#Create)
|
||||
* [CreateBackup](#CreateBackup)
|
||||
* [Dagstore](#Dagstore)
|
||||
* [DagstoreGC](#DagstoreGC)
|
||||
* [DagstoreInitializeAll](#DagstoreInitializeAll)
|
||||
* [DagstoreInitializeShard](#DagstoreInitializeShard)
|
||||
* [DagstoreListShards](#DagstoreListShards)
|
||||
* [DagstoreRecoverShard](#DagstoreRecoverShard)
|
||||
* [Deals](#Deals)
|
||||
* [DealsConsiderOfflineRetrievalDeals](#DealsConsiderOfflineRetrievalDeals)
|
||||
* [DealsConsiderOfflineStorageDeals](#DealsConsiderOfflineStorageDeals)
|
||||
@ -345,6 +351,114 @@ Inputs:
|
||||
|
||||
Response: `{}`
|
||||
|
||||
## Dagstore
|
||||
|
||||
|
||||
### DagstoreGC
|
||||
DagstoreGC runs garbage collection on the DAG store.
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `null`
|
||||
|
||||
### DagstoreInitializeAll
|
||||
DagstoreInitializeAll initializes all uninitialized shards in bulk,
|
||||
according to the policy passed in the parameters.
|
||||
|
||||
It is recommended to set a maximum concurrency to avoid extreme
|
||||
IO pressure if the storage subsystem has a large amount of deals.
|
||||
|
||||
It returns a stream of events to report progress.
|
||||
|
||||
|
||||
Perms: write
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"MaxConcurrency": 123,
|
||||
"IncludeSealed": true
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"Key": "string value",
|
||||
"Event": "string value",
|
||||
"Success": true,
|
||||
"Error": "string value",
|
||||
"Total": 123,
|
||||
"Current": 123
|
||||
}
|
||||
```
|
||||
|
||||
### DagstoreInitializeShard
|
||||
DagstoreInitializeShard initializes an uninitialized shard.
|
||||
|
||||
Initialization consists of fetching the shard's data (deal payload) from
|
||||
the storage subsystem, generating an index, and persisting the index
|
||||
to facilitate later retrievals, and/or to publish to external sources.
|
||||
|
||||
This operation is intended to complement the initial migration. The
|
||||
migration registers a shard for every unique piece CID, with lazy
|
||||
initialization. Thus, shards are not initialized immediately to avoid
|
||||
IO activity competing with proving. Instead, shard are initialized
|
||||
when first accessed. This method forces the initialization of a shard by
|
||||
accessing it and immediately releasing it. This is useful to warm up the
|
||||
cache to facilitate subsequent retrievals, and to generate the indexes
|
||||
to publish them externally.
|
||||
|
||||
This operation fails if the shard is not in ShardStateNew state.
|
||||
It blocks until initialization finishes.
|
||||
|
||||
|
||||
Perms: write
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
"string value"
|
||||
]
|
||||
```
|
||||
|
||||
Response: `{}`
|
||||
|
||||
### DagstoreListShards
|
||||
DagstoreListShards returns information about all shards known to the
|
||||
DAG store. Only available on nodes running the markets subsystem.
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `null`
|
||||
|
||||
### DagstoreRecoverShard
|
||||
DagstoreRecoverShard attempts to recover a failed shard.
|
||||
|
||||
This operation fails if the shard is not in ShardStateErrored state.
|
||||
It blocks until recovery finishes. If recovery failed, it returns the
|
||||
error.
|
||||
|
||||
|
||||
Perms: write
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
"string value"
|
||||
]
|
||||
```
|
||||
|
||||
Response: `{}`
|
||||
|
||||
## Deals
|
||||
|
||||
|
||||
@ -690,7 +804,6 @@ Response:
|
||||
"SlashEpoch": 10101,
|
||||
"FastRetrieval": true,
|
||||
"Message": "string value",
|
||||
"StoreID": 12,
|
||||
"FundsReserved": "0",
|
||||
"Ref": {
|
||||
"TransferType": "string value",
|
||||
@ -709,7 +822,8 @@ Response:
|
||||
"Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
|
||||
"ID": 3
|
||||
},
|
||||
"SectorNumber": 9
|
||||
"SectorNumber": 9,
|
||||
"InboundCAR": "string value"
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -1467,7 +1467,7 @@ Inputs:
|
||||
},
|
||||
"Piece": null,
|
||||
"Size": 42,
|
||||
"LocalStore": 12,
|
||||
"FromLocalCAR": "string value",
|
||||
"Total": "0",
|
||||
"UnsealPrice": "0",
|
||||
"PaymentInterval": 42,
|
||||
@ -1521,7 +1521,7 @@ Inputs:
|
||||
},
|
||||
"Piece": null,
|
||||
"Size": 42,
|
||||
"LocalStore": 12,
|
||||
"FromLocalCAR": "string value",
|
||||
"Total": "0",
|
||||
"UnsealPrice": "0",
|
||||
"PaymentInterval": 42,
|
||||
|
@ -1530,7 +1530,7 @@ Inputs:
|
||||
},
|
||||
"Piece": null,
|
||||
"Size": 42,
|
||||
"LocalStore": 12,
|
||||
"FromLocalCAR": "string value",
|
||||
"Total": "0",
|
||||
"UnsealPrice": "0",
|
||||
"PaymentInterval": 42,
|
||||
@ -1584,7 +1584,7 @@ Inputs:
|
||||
},
|
||||
"Piece": null,
|
||||
"Size": 42,
|
||||
"LocalStore": 12,
|
||||
"FromLocalCAR": "string value",
|
||||
"Total": "0",
|
||||
"UnsealPrice": "0",
|
||||
"PaymentInterval": 42,
|
||||
|
@ -29,6 +29,7 @@ COMMANDS:
|
||||
storage-deals Manage storage deals and related configuration
|
||||
retrieval-deals Manage retrieval deals and related configuration
|
||||
data-transfers Manage data transfers
|
||||
dagstore Manage the dagstore on the markets subsystem
|
||||
NETWORK:
|
||||
net Manage P2P Network
|
||||
RETRIEVAL:
|
||||
@ -1000,6 +1001,100 @@ OPTIONS:
|
||||
|
||||
```
|
||||
|
||||
## lotus-miner dagstore
|
||||
```
|
||||
NAME:
|
||||
lotus-miner dagstore - Manage the dagstore on the markets subsystem
|
||||
|
||||
USAGE:
|
||||
lotus-miner dagstore command [command options] [arguments...]
|
||||
|
||||
COMMANDS:
|
||||
list-shards List all shards known to the dagstore, with their current status
|
||||
initialize-shard Initialize the specified shard
|
||||
recover-shard Attempt to recover a shard in errored state
|
||||
initialize-all Initialize all uninitialized shards, streaming results as they're produced; only shards for unsealed pieces are initialized by default
|
||||
gc Garbage collect the dagstore
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help (default: false)
|
||||
--version, -v print the version (default: false)
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner dagstore list-shards
|
||||
```
|
||||
NAME:
|
||||
lotus-miner dagstore list-shards - List all shards known to the dagstore, with their current status
|
||||
|
||||
USAGE:
|
||||
lotus-miner dagstore list-shards [command options] [arguments...]
|
||||
|
||||
OPTIONS:
|
||||
--color use color in display output (default: depends on output being a TTY)
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner dagstore initialize-shard
|
||||
```
|
||||
NAME:
|
||||
lotus-miner dagstore initialize-shard - Initialize the specified shard
|
||||
|
||||
USAGE:
|
||||
lotus-miner dagstore initialize-shard [command options] [key]
|
||||
|
||||
OPTIONS:
|
||||
--color use color in display output (default: depends on output being a TTY)
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner dagstore recover-shard
|
||||
```
|
||||
NAME:
|
||||
lotus-miner dagstore recover-shard - Attempt to recover a shard in errored state
|
||||
|
||||
USAGE:
|
||||
lotus-miner dagstore recover-shard [command options] [key]
|
||||
|
||||
OPTIONS:
|
||||
--color use color in display output (default: depends on output being a TTY)
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner dagstore initialize-all
|
||||
```
|
||||
NAME:
|
||||
lotus-miner dagstore initialize-all - Initialize all uninitialized shards, streaming results as they're produced; only shards for unsealed pieces are initialized by default
|
||||
|
||||
USAGE:
|
||||
lotus-miner dagstore initialize-all [command options] [arguments...]
|
||||
|
||||
OPTIONS:
|
||||
--concurrency value maximum shards to initialize concurrently at a time; use 0 for unlimited (default: 0)
|
||||
--include-sealed initialize sealed pieces as well (default: false)
|
||||
--color use color in display output (default: depends on output being a TTY)
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner dagstore gc
|
||||
```
|
||||
NAME:
|
||||
lotus-miner dagstore gc - Garbage collect the dagstore
|
||||
|
||||
USAGE:
|
||||
lotus-miner dagstore gc [command options] [arguments...]
|
||||
|
||||
OPTIONS:
|
||||
--color use color in display output (default: depends on output being a TTY)
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
## lotus-miner net
|
||||
```
|
||||
NAME:
|
||||
|
2
extern/sector-storage/cbor_gen.go
vendored
2
extern/sector-storage/cbor_gen.go
vendored
@ -5,6 +5,7 @@ package sectorstorage
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
sealtasks "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
@ -15,6 +16,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
func (t *Call) MarshalCBOR(w io.Writer) error {
|
||||
|
2
extern/sector-storage/storiface/cbor_gen.go
vendored
2
extern/sector-storage/storiface/cbor_gen.go
vendored
@ -5,6 +5,7 @@ package storiface
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
func (t *CallID) MarshalCBOR(w io.Writer) error {
|
||||
|
2
extern/storage-sealing/cbor_gen.go
vendored
2
extern/storage-sealing/cbor_gen.go
vendored
@ -5,6 +5,7 @@ package sealing
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
@ -17,6 +18,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
func (t *Piece) MarshalCBOR(w io.Writer) error {
|
||||
|
34
go.mod
34
go.mod
@ -26,17 +26,17 @@ require (
|
||||
github.com/elastic/gosigar v0.12.0
|
||||
github.com/etclabscore/go-openrpc-reflect v0.0.36
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/filecoin-project/dagstore v0.4.2
|
||||
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f
|
||||
github.com/filecoin-project/go-address v0.0.5
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect
|
||||
github.com/filecoin-project/go-bitfield v0.2.4
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
|
||||
github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
|
||||
github.com/filecoin-project/go-data-transfer v1.7.2
|
||||
github.com/filecoin-project/go-data-transfer v1.7.3
|
||||
github.com/filecoin-project/go-fil-commcid v0.1.0
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0
|
||||
github.com/filecoin-project/go-fil-markets v1.6.2
|
||||
github.com/filecoin-project/go-fil-markets v1.8.0
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec
|
||||
github.com/filecoin-project/go-multistore v0.0.3
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1
|
||||
@ -57,7 +57,7 @@ require (
|
||||
github.com/go-kit/kit v0.10.0
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/uuid v1.1.2
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
|
||||
@ -66,10 +66,10 @@ require (
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d
|
||||
github.com/ipfs/bbloom v0.0.4
|
||||
github.com/ipfs/go-bitswap v0.3.2
|
||||
github.com/ipfs/go-bitswap v0.3.4
|
||||
github.com/ipfs/go-block-format v0.0.3
|
||||
github.com/ipfs/go-blockservice v0.1.4
|
||||
github.com/ipfs/go-cid v0.0.7
|
||||
github.com/ipfs/go-blockservice v0.1.5
|
||||
github.com/ipfs/go-cid v0.0.8-0.20210716091050-de6c03deae1c
|
||||
github.com/ipfs/go-cidutil v0.0.2
|
||||
github.com/ipfs/go-datastore v0.4.5
|
||||
github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e
|
||||
@ -78,8 +78,9 @@ require (
|
||||
github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459
|
||||
github.com/ipfs/go-filestore v1.0.0
|
||||
github.com/ipfs/go-fs-lock v0.0.6
|
||||
github.com/ipfs/go-graphsync v0.6.6
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.3
|
||||
github.com/ipfs/go-graphsync v0.6.8
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.4
|
||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.5
|
||||
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.0.1
|
||||
@ -95,9 +96,10 @@ require (
|
||||
github.com/ipfs/go-metrics-interface v0.0.1
|
||||
github.com/ipfs/go-metrics-prometheus v0.0.2
|
||||
github.com/ipfs/go-path v0.0.7
|
||||
github.com/ipfs/go-unixfs v0.2.4
|
||||
github.com/ipfs/go-unixfs v0.2.6
|
||||
github.com/ipfs/interface-go-ipfs-core v0.2.3
|
||||
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d
|
||||
github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7
|
||||
github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
@ -118,7 +120,6 @@ require (
|
||||
github.com/libp2p/go-libp2p-tls v0.1.3
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.4
|
||||
github.com/libp2p/go-maddr-filter v0.1.0
|
||||
github.com/mattn/go-colorable v0.1.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.13
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
@ -127,6 +128,7 @@ require (
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1
|
||||
github.com/multiformats/go-multibase v0.0.3
|
||||
github.com/multiformats/go-multihash v0.0.15
|
||||
github.com/multiformats/go-varint v0.0.6
|
||||
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a
|
||||
@ -134,12 +136,11 @@ require (
|
||||
github.com/raulk/clock v1.1.0
|
||||
github.com/raulk/go-watchdog v1.0.1
|
||||
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/urfave/cli/v2 v2.2.0
|
||||
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8
|
||||
github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4
|
||||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
|
||||
github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325
|
||||
@ -147,19 +148,20 @@ require (
|
||||
go.opencensus.io v0.23.0
|
||||
go.uber.org/dig v1.10.0 // indirect
|
||||
go.uber.org/fx v1.9.0
|
||||
go.uber.org/multierr v1.6.0
|
||||
go.uber.org/multierr v1.7.0
|
||||
go.uber.org/zap v1.16.0
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
golang.org/x/tools v0.1.5
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.28
|
||||
gotest.tools v2.2.0+incompatible
|
||||
honnef.co/go/tools v0.0.1-2020.1.3 // indirect
|
||||
)
|
||||
|
||||
replace github.com/multiformats/go-multihash => github.com/multiformats/go-multihash v0.0.14
|
||||
|
||||
replace github.com/libp2p/go-libp2p-yamux => github.com/libp2p/go-libp2p-yamux v0.5.1
|
||||
|
||||
replace github.com/filecoin-project/lotus => ./
|
||||
|
125
go.sum
125
go.sum
@ -22,6 +22,7 @@ contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
@ -168,6 +169,7 @@ github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmf
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
|
||||
@ -256,12 +258,13 @@ github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGj
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
|
||||
github.com/filecoin-project/dagstore v0.4.2 h1:Ae2+O1DhKCI1JbOZCBkqUksKYofdbRbjkS7OF0A6Jw0=
|
||||
github.com/filecoin-project/dagstore v0.4.2/go.mod h1:WY5OoLfnwISCk6eASSF927KKPqLPIlTwmG1qHpA08KY=
|
||||
github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
|
||||
github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM=
|
||||
github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g=
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o=
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE=
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo=
|
||||
@ -277,8 +280,8 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMX
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
|
||||
github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo=
|
||||
github.com/filecoin-project/go-data-transfer v1.7.0/go.mod h1:GLRr5BmLEqsLwXfiRDG7uJvph22KGL2M4iOuF8EINaU=
|
||||
github.com/filecoin-project/go-data-transfer v1.7.2 h1:iL3q5pxSloA7V2QucFofoVN3lquULz+Ml0KrNqMT5ZU=
|
||||
github.com/filecoin-project/go-data-transfer v1.7.2/go.mod h1:GLRr5BmLEqsLwXfiRDG7uJvph22KGL2M4iOuF8EINaU=
|
||||
github.com/filecoin-project/go-data-transfer v1.7.3 h1:2F3K1LmT6Q97+Bq9gBv/VVqFZ/0FHsRPqYDbCzrGaYg=
|
||||
github.com/filecoin-project/go-data-transfer v1.7.3/go.mod h1:Cbl9lzKOuAyyIxp1tE+VbV5Aix4bxzA7uJGA9wGM4fM=
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ=
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
@ -288,8 +291,8 @@ github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo=
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
|
||||
github.com/filecoin-project/go-fil-markets v1.6.2 h1:ib1sGUOF+hf50YwP7+p9yoK+9g84YcXzvuenxd6MYoE=
|
||||
github.com/filecoin-project/go-fil-markets v1.6.2/go.mod h1:ZuFDagROUV6GfvBU//KReTQDw+EZci4rH7jMYTD10vs=
|
||||
github.com/filecoin-project/go-fil-markets v1.8.0 h1:mqcLexTKU3W9gl7f0GIH5jmzOwefta5RZCc13GL7ax8=
|
||||
github.com/filecoin-project/go-fil-markets v1.8.0/go.mod h1:D5xHWxyuU0EK8wcK4qStO5rjmpH206eb4OdrkWmTdaY=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
|
||||
@ -328,7 +331,6 @@ github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK
|
||||
github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY=
|
||||
github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc=
|
||||
@ -380,6 +382,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
@ -483,8 +486,9 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
@ -576,8 +580,8 @@ github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3
|
||||
github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0=
|
||||
github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs=
|
||||
github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM=
|
||||
github.com/ipfs/go-bitswap v0.3.2 h1:TdKx7lpidYe2dMAKfdeNS26y6Pc/AZX/i8doI1GV210=
|
||||
github.com/ipfs/go-bitswap v0.3.2/go.mod h1:AyWWfN3moBzQX0banEtfKOfbXb3ZeoOeXnZGNPV9S6w=
|
||||
github.com/ipfs/go-bitswap v0.3.4 h1:AhJhRrG8xkxh6x87b4wWs+4U4y3DVB3doI8yFNqgQME=
|
||||
github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI=
|
||||
github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc=
|
||||
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
|
||||
github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc=
|
||||
@ -587,8 +591,8 @@ github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbR
|
||||
github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
|
||||
github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
|
||||
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
|
||||
github.com/ipfs/go-blockservice v0.1.4 h1:Vq+MlsH8000KbbUciRyYMEw/NNP8UAGmcqKi4uWmFGA=
|
||||
github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
|
||||
github.com/ipfs/go-blockservice v0.1.5 h1:euqZu96CCbToPyYVwVshu8ENURi8BhFd7FUFfTLi+fQ=
|
||||
github.com/ipfs/go-blockservice v0.1.5/go.mod h1:yLk8lBJCBRWRqerqCSVi3cE/Dncdt3vGC/PJMVKhLTY=
|
||||
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
@ -597,8 +601,9 @@ github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj
|
||||
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
|
||||
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
|
||||
github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
||||
github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY=
|
||||
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
||||
github.com/ipfs/go-cid v0.0.8-0.20210716091050-de6c03deae1c h1:uVMZWk8sJN1l/47TtXRXDz0M9/6v0yw7neDn5WfeLwg=
|
||||
github.com/ipfs/go-cid v0.0.8-0.20210716091050-de6c03deae1c/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o=
|
||||
github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo=
|
||||
github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s=
|
||||
github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
@ -642,16 +647,17 @@ github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CE
|
||||
github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0=
|
||||
github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY=
|
||||
github.com/ipfs/go-graphsync v0.6.4/go.mod h1:5WyaeigpNdpiYQuW2vwpuecOoEfB4h747ZGEOKmAGTg=
|
||||
github.com/ipfs/go-graphsync v0.6.6 h1:In7jjzvSXlrAUz4OjN41lxYf/dzkf1bVeVxLpwKMRo8=
|
||||
github.com/ipfs/go-graphsync v0.6.6/go.mod h1:GdHT8JeuIZ0R4lSjFR16Oe4zPi5dXwKi9zR9ADVlcdk=
|
||||
github.com/ipfs/go-graphsync v0.6.8 h1:mgyPdBDPcgL8ujO132grQjP3rfQv+KN/riQzbmTVgg4=
|
||||
github.com/ipfs/go-graphsync v0.6.8/go.mod h1:GdHT8JeuIZ0R4lSjFR16Oe4zPi5dXwKi9zR9ADVlcdk=
|
||||
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.3 h1:RDhK6fdg5YsonkpMuMpdvk/pRtOQlrIRIybuQfkvB2M=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.4 h1:DZdeya9Vu4ttvlGheQPGrj6kWehXnYZRFCp9EsZQ1hI=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.4/go.mod h1:uL7/gTJ8QIZ3MtA3dWf+s1a0U3fJy2fcEZAsovpRp+w=
|
||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ=
|
||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk=
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw=
|
||||
@ -746,8 +752,9 @@ github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC
|
||||
github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8=
|
||||
github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k=
|
||||
github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k=
|
||||
github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo=
|
||||
github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw=
|
||||
github.com/ipfs/go-unixfs v0.2.6 h1:gq3U3T2vh8x6tXhfo3uSO3n+2z4yW0tYtNgVP/3sIyA=
|
||||
github.com/ipfs/go-unixfs v0.2.6/go.mod h1:GTTzQvaZsTZARdNkkdjDKFFnBhmO3e5mIM1PkH/x4p0=
|
||||
github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E=
|
||||
github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0=
|
||||
github.com/ipfs/interface-go-ipfs-core v0.2.3 h1:E6uQ+1fJjkxJWlL9lAE72a5FWeyeeNL3GitLy8+jq3Y=
|
||||
@ -760,6 +767,10 @@ github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBH
|
||||
github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw=
|
||||
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d h1:iphSzTuPqyDgH7WUVZsdqUnQNzYgIblsVr1zhVNA33U=
|
||||
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ=
|
||||
github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU=
|
||||
github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU=
|
||||
github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 h1:6Z0beJSZNsRY+7udoqUl4gQ/tqtrPuRvDySrlsvbqZA=
|
||||
github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8=
|
||||
github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM=
|
||||
@ -835,8 +846,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.8 h1:bhR2mgIlno/Sfk4oUbH4sPlc83z1yGrN9bvqiq3C33I=
|
||||
github.com/klauspost/cpuid/v2 v2.0.8/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
|
||||
@ -890,6 +903,8 @@ github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El
|
||||
github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ=
|
||||
github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8=
|
||||
github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0=
|
||||
github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo=
|
||||
github.com/libp2p/go-libp2p v0.14.0/go.mod h1:dsQrWLAoIn+GkHPN/U+yypizkHiB9tnv79Os+kSgQ4Q=
|
||||
github.com/libp2p/go-libp2p v0.14.2 h1:qs0ABtjjNjS+RIXT1uM7sMJEvIc0pq2nKR0VQxFXhHI=
|
||||
github.com/libp2p/go-libp2p v0.14.2/go.mod h1:0PQMADQEjCM2l8cSMYDpTgsb8gr6Zq7i4LUgq1mlW2E=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U=
|
||||
@ -1068,6 +1083,7 @@ github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYR
|
||||
github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM=
|
||||
github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
|
||||
github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
|
||||
github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw=
|
||||
github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4=
|
||||
github.com/libp2p/go-libp2p-swarm v0.5.3 h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M=
|
||||
github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8=
|
||||
@ -1092,6 +1108,7 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 h1:SHt3g0FslnqIkEWF25YOB8UCOCTpGAVvHRWQYJ+veiI=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk=
|
||||
@ -1214,8 +1231,8 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
@ -1319,16 +1336,11 @@ github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g
|
||||
github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
|
||||
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
|
||||
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
||||
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
|
||||
github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po=
|
||||
github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM=
|
||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||
github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ=
|
||||
github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b h1:xtpkfZUOyOzsILgqW923kIs8ZqnfwGhHfPW/fD5Sbi0=
|
||||
github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ=
|
||||
github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I=
|
||||
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||
github.com/multiformats/go-multihash v0.0.15 h1:hWOPdrNqDjwHDx82vsYGSDZNyktOJJ2dzZJzFkOV1jM=
|
||||
github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg=
|
||||
github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
@ -1410,6 +1422,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk=
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@ -1485,6 +1499,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI=
|
||||
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
@ -1527,8 +1543,9 @@ github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81a
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
|
||||
@ -1566,8 +1583,6 @@ github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6
|
||||
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
@ -1621,6 +1636,8 @@ github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMU
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM=
|
||||
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY=
|
||||
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM=
|
||||
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0=
|
||||
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
||||
@ -1634,8 +1651,8 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:f
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 h1:bsUlNhdmbtlfdLVXAVfuvKQ01RnWAM09TVrJkI7NZs4=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8 h1:TEv7MId88TyIqIUL4hbf9otOookIolMxlEbN0ro671Y=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8=
|
||||
github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g=
|
||||
@ -1701,6 +1718,11 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
|
||||
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
|
||||
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
|
||||
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
|
||||
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
@ -1719,8 +1741,9 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec=
|
||||
go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
@ -1743,7 +1766,6 @@ golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnf
|
||||
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@ -1765,21 +1787,26 @@ golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf h1:B2n+Zi5QeYRDAEodEu72OS36gmTWjgpXr2+cWcBW90o=
|
||||
golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc=
|
||||
golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc=
|
||||
golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 h1:Jp57DBw4K7mimZNA3F9f7CndVcUt4kJjmyJf2rzJHoI=
|
||||
golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@ -1796,10 +1823,12 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||
@ -1892,7 +1921,6 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -1949,6 +1977,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -1958,9 +1987,11 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744 h1:yhBbb4IRs2HS9PPlAg6DMC6mUOKexJBNsLf4Z+6En1Q=
|
||||
golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@ -2005,11 +2036,11 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
@ -2020,6 +2051,7 @@ golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc
|
||||
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
@ -2135,8 +2167,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
@ -2146,8 +2179,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
|
||||
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||
modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=
|
||||
|
@ -108,7 +108,7 @@ func TestDealCyclesConcurrent(t *testing.T) {
|
||||
ns := fmt.Sprintf("%d", n)
|
||||
t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) })
|
||||
t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, false, true) })
|
||||
t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) })
|
||||
}
|
||||
}
|
||||
|
@ -37,4 +37,9 @@ func TestDealsWithSealingAndRPC(t *testing.T) {
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
|
||||
})
|
||||
|
||||
t.Run("stdretrieval-carv1", func(t *testing.T) {
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, UseCARFileForStorageDeal: true})
|
||||
})
|
||||
|
||||
}
|
||||
|
@ -33,9 +33,10 @@ type DealHarness struct {
|
||||
}
|
||||
|
||||
type MakeFullDealParams struct {
|
||||
Rseed int
|
||||
FastRet bool
|
||||
StartEpoch abi.ChainEpoch
|
||||
Rseed int
|
||||
FastRet bool
|
||||
StartEpoch abi.ChainEpoch
|
||||
UseCARFileForStorageDeal bool
|
||||
|
||||
// SuspendUntilCryptoeconStable suspends deal-making, until cryptoecon
|
||||
// parameters are stabilised. This affects projected collateral, and tests
|
||||
@ -78,7 +79,11 @@ func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market
|
||||
//
|
||||
// TODO: convert input parameters to struct, and add size as an input param.
|
||||
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
|
||||
res, path = dh.client.CreateImportFile(ctx, params.Rseed, 0)
|
||||
if params.UseCARFileForStorageDeal {
|
||||
res, _, path = dh.client.ClientImportCARFile(ctx, params.Rseed, 200)
|
||||
} else {
|
||||
res, path = dh.client.CreateImportFile(ctx, params.Rseed, 0)
|
||||
}
|
||||
|
||||
dh.t.Logf("FILE CID: %s", res.Root)
|
||||
|
||||
@ -284,10 +289,11 @@ func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) (o
|
||||
}
|
||||
|
||||
type RunConcurrentDealsOpts struct {
|
||||
N int
|
||||
FastRetrieval bool
|
||||
CarExport bool
|
||||
StartEpoch abi.ChainEpoch
|
||||
N int
|
||||
FastRetrieval bool
|
||||
CarExport bool
|
||||
StartEpoch abi.ChainEpoch
|
||||
UseCARFileForStorageDeal bool
|
||||
}
|
||||
|
||||
func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) {
|
||||
@ -307,9 +313,10 @@ func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) {
|
||||
dh.t.Logf("making storage deal %d/%d", i, opts.N)
|
||||
|
||||
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), MakeFullDealParams{
|
||||
Rseed: 5 + i,
|
||||
FastRet: opts.FastRetrieval,
|
||||
StartEpoch: opts.StartEpoch,
|
||||
Rseed: 5 + i,
|
||||
FastRet: opts.FastRetrieval,
|
||||
StartEpoch: opts.StartEpoch,
|
||||
UseCARFileForStorageDeal: opts.UseCARFileForStorageDeal,
|
||||
})
|
||||
|
||||
dh.t.Logf("retrieving deal %d/%d", i, opts.N)
|
||||
|
@ -1,17 +1,38 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
cidutil "github.com/ipfs/go-cidutil"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
bstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
chunk "github.com/ipfs/go-ipfs-chunker"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
ipldformat "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
"github.com/ipfs/go-unixfs/importer/balanced"
|
||||
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/minio/blake2b-simd"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const unixfsChunkSize uint64 = 1 << 10
|
||||
|
||||
var defaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)
|
||||
|
||||
// CreateRandomFile creates a random file with the provided seed and the
|
||||
// provided size.
|
||||
func CreateRandomFile(t *testing.T, rseed, size int) (path string) {
|
||||
@ -31,6 +52,79 @@ func CreateRandomFile(t *testing.T, rseed, size int) (path string) {
|
||||
return file.Name()
|
||||
}
|
||||
|
||||
// CreateRandomFile creates a normal file with the provided seed and the
|
||||
// provided size and then transforms it to a CARv1 file and returns it.
|
||||
func CreateRandomCARv1(t *testing.T, rseed, size int) (carV1FilePath string, origFilePath string) {
|
||||
ctx := context.Background()
|
||||
if size == 0 {
|
||||
size = 1600
|
||||
}
|
||||
|
||||
source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size))
|
||||
|
||||
file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat")
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err := io.Copy(file, source)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, n, size)
|
||||
|
||||
//
|
||||
_, err = file.Seek(0, io.SeekStart)
|
||||
require.NoError(t, err)
|
||||
bs := bstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
|
||||
dagSvc := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
|
||||
root := writeUnixfsDAG(ctx, t, file, dagSvc)
|
||||
|
||||
// create a CARv1 file from the DAG
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "randcarv1")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, car.WriteCar(ctx, dagSvc, []cid.Cid{root}, tmp))
|
||||
_, err = tmp.Seek(0, io.SeekStart)
|
||||
require.NoError(t, err)
|
||||
hd, _, err := car.ReadHeader(bufio.NewReader(tmp))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, hd.Version)
|
||||
require.Len(t, hd.Roots, 1)
|
||||
require.NoError(t, tmp.Close())
|
||||
|
||||
return tmp.Name(), file.Name()
|
||||
}
|
||||
|
||||
func writeUnixfsDAG(ctx context.Context, t *testing.T, rd io.Reader, dag ipldformat.DAGService) cid.Cid {
|
||||
rpf := files.NewReaderFile(rd)
|
||||
|
||||
// generate the dag and get the root
|
||||
// import to UnixFS
|
||||
prefix, err := merkledag.PrefixForCidVersion(1)
|
||||
require.NoError(t, err)
|
||||
prefix.MhType = defaultHashFunction
|
||||
|
||||
bufferedDS := ipldformat.NewBufferedDAG(ctx, dag)
|
||||
params := ihelper.DagBuilderParams{
|
||||
Maxlinks: 1024,
|
||||
RawLeaves: true,
|
||||
CidBuilder: cidutil.InlineBuilder{
|
||||
Builder: prefix,
|
||||
Limit: 126,
|
||||
},
|
||||
Dagserv: bufferedDS,
|
||||
}
|
||||
|
||||
db, err := params.New(chunk.NewSizeSplitter(rpf, int64(unixfsChunkSize)))
|
||||
require.NoError(t, err)
|
||||
|
||||
nd, err := balanced.Layout(db)
|
||||
require.NoError(t, err)
|
||||
require.NotEqualValues(t, cid.Undef, nd.Cid())
|
||||
|
||||
err = bufferedDS.Commit()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, rpf.Close())
|
||||
return nd.Cid()
|
||||
}
|
||||
|
||||
// AssertFilesEqual compares two files by blake2b hash equality and
|
||||
// fails the test if unequal.
|
||||
func AssertFilesEqual(t *testing.T, left, right string) {
|
||||
|
@ -28,6 +28,13 @@ type TestFullNode struct {
|
||||
options nodeOpts
|
||||
}
|
||||
|
||||
func (f *TestFullNode) ClientImportCARFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, carv1FilePath string, origFilePath string) {
|
||||
carv1FilePath, origFilePath = CreateRandomCARv1(f.t, rseed, size)
|
||||
res, err := f.ClientImport(ctx, api.FileRef{Path: carv1FilePath, IsCAR: true})
|
||||
require.NoError(f.t, err)
|
||||
return res, carv1FilePath, origFilePath
|
||||
}
|
||||
|
||||
// CreateImportFile creates a random file with the specified seed and size, and
|
||||
// imports it into the full node.
|
||||
func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) {
|
||||
|
33
markets/dagstore/blockstore.go
Normal file
33
markets/dagstore/blockstore.go
Normal file
@ -0,0 +1,33 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
bstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
)
|
||||
|
||||
// Blockstore promotes a dagstore.ReadBlockstore to a full closeable Blockstore,
|
||||
// stubbing out the write methods with erroring implementations.
|
||||
type Blockstore struct {
|
||||
dagstore.ReadBlockstore
|
||||
io.Closer
|
||||
}
|
||||
|
||||
var _ bstore.Blockstore = (*Blockstore)(nil)
|
||||
|
||||
func (b *Blockstore) DeleteBlock(c cid.Cid) error {
|
||||
return xerrors.Errorf("DeleteBlock called but not implemented")
|
||||
}
|
||||
|
||||
func (b *Blockstore) Put(block blocks.Block) error {
|
||||
return xerrors.Errorf("Put called but not implemented")
|
||||
}
|
||||
|
||||
func (b *Blockstore) PutMany(blocks []blocks.Block) error {
|
||||
return xerrors.Errorf("PutMany called but not implemented")
|
||||
}
|
BIN
markets/dagstore/fixtures/sample-rw-bs-v2.car
Normal file
BIN
markets/dagstore/fixtures/sample-rw-bs-v2.car
Normal file
Binary file not shown.
184
markets/dagstore/miner_api.go
Normal file
184
markets/dagstore/miner_api.go
Normal file
@ -0,0 +1,184 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/dagstore/throttle"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
)
|
||||
|
||||
type MinerAPI interface {
|
||||
FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error)
|
||||
GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error)
|
||||
IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error)
|
||||
Start(ctx context.Context) error
|
||||
}
|
||||
|
||||
type minerAPI struct {
|
||||
pieceStore piecestore.PieceStore
|
||||
sa retrievalmarket.SectorAccessor
|
||||
throttle throttle.Throttler
|
||||
readyMgr *shared.ReadyManager
|
||||
}
|
||||
|
||||
var _ MinerAPI = (*minerAPI)(nil)
|
||||
|
||||
func NewMinerAPI(store piecestore.PieceStore, sa retrievalmarket.SectorAccessor, concurrency int) MinerAPI {
|
||||
return &minerAPI{
|
||||
pieceStore: store,
|
||||
sa: sa,
|
||||
throttle: throttle.Fixed(concurrency),
|
||||
readyMgr: shared.NewReadyManager(),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *minerAPI) Start(_ context.Context) error {
|
||||
return m.readyMgr.FireReady(nil)
|
||||
}
|
||||
|
||||
func (m *minerAPI) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) {
|
||||
err := m.readyMgr.AwaitReady()
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed while waiting for accessor to start: %w", err)
|
||||
}
|
||||
|
||||
var pieceInfo piecestore.PieceInfo
|
||||
err = m.throttle.Do(ctx, func(ctx context.Context) (err error) {
|
||||
pieceInfo, err = m.pieceStore.GetPieceInfo(pieceCid)
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to fetch pieceInfo for piece %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
if len(pieceInfo.Deals) == 0 {
|
||||
return false, xerrors.Errorf("no storage deals found for piece %s", pieceCid)
|
||||
}
|
||||
|
||||
// check if we have an unsealed deal for the given piece in any of the unsealed sectors.
|
||||
for _, deal := range pieceInfo.Deals {
|
||||
deal := deal
|
||||
|
||||
var isUnsealed bool
|
||||
// Throttle this path to avoid flooding the storage subsystem.
|
||||
err := m.throttle.Do(ctx, func(ctx context.Context) (err error) {
|
||||
isUnsealed, err = m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if sector %d for deal %d was unsealed: %w", deal.SectorID, deal.DealID, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("failed to check/retrieve unsealed sector: %s", err)
|
||||
continue // move on to the next match.
|
||||
}
|
||||
|
||||
if isUnsealed {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// we don't have an unsealed sector containing the piece
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) {
|
||||
err := m.readyMgr.AwaitReady()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Throttle this path to avoid flooding the storage subsystem.
|
||||
var pieceInfo piecestore.PieceInfo
|
||||
err = m.throttle.Do(ctx, func(ctx context.Context) (err error) {
|
||||
pieceInfo, err = m.pieceStore.GetPieceInfo(pieceCid)
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to fetch pieceInfo for piece %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
if len(pieceInfo.Deals) == 0 {
|
||||
return nil, xerrors.Errorf("no storage deals found for piece %s", pieceCid)
|
||||
}
|
||||
|
||||
// prefer an unsealed sector containing the piece if one exists
|
||||
for _, deal := range pieceInfo.Deals {
|
||||
deal := deal
|
||||
|
||||
// Throttle this path to avoid flooding the storage subsystem.
|
||||
var reader io.ReadCloser
|
||||
err := m.throttle.Do(ctx, func(ctx context.Context) (err error) {
|
||||
isUnsealed, err := m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if sector %d for deal %d was unsealed: %w", deal.SectorID, deal.DealID, err)
|
||||
}
|
||||
if !isUnsealed {
|
||||
return nil
|
||||
}
|
||||
// Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing.
|
||||
reader, err = m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("failed to check/retrieve unsealed sector: %s", err)
|
||||
continue // move on to the next match.
|
||||
}
|
||||
|
||||
if reader != nil {
|
||||
// we were able to obtain a reader for an already unsealed piece
|
||||
return reader, nil
|
||||
}
|
||||
}
|
||||
|
||||
lastErr := xerrors.New("no sectors found to unseal from")
|
||||
// if there is no unsealed sector containing the piece, just read the piece from the first sector we are able to unseal.
|
||||
for _, deal := range pieceInfo.Deals {
|
||||
// Note that if the deal data is not already unsealed, unsealing may
|
||||
// block for a long time with the current PoRep
|
||||
//
|
||||
// This path is unthrottled.
|
||||
reader, err := m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
|
||||
if err != nil {
|
||||
lastErr = xerrors.Errorf("failed to unseal deal %d: %w", deal.DealID, err)
|
||||
log.Warn(lastErr.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// Successfully fetched the deal data so return a reader over the data
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
return nil, lastErr
|
||||
}
|
||||
|
||||
func (m *minerAPI) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) {
|
||||
err := m.readyMgr.AwaitReady()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
pieceInfo, err := m.pieceStore.GetPieceInfo(pieceCid)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("failed to fetch pieceInfo for piece %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
if len(pieceInfo.Deals) == 0 {
|
||||
return 0, xerrors.Errorf("no storage deals found for piece %s", pieceCid)
|
||||
}
|
||||
|
||||
len := pieceInfo.Deals[0].Length
|
||||
|
||||
return uint64(len), nil
|
||||
}
|
237
markets/dagstore/miner_api_test.go
Normal file
237
markets/dagstore/miner_api_test.go
Normal file
@ -0,0 +1,237 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
ds_sync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
)
|
||||
|
||||
const unsealedSectorID = abi.SectorNumber(1)
|
||||
const sealedSectorID = abi.SectorNumber(2)
|
||||
|
||||
func TestLotusAccessorFetchUnsealedPiece(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cid1, err := cid.Parse("bafkqaaa")
|
||||
require.NoError(t, err)
|
||||
|
||||
unsealedSectorData := "unsealed"
|
||||
sealedSectorData := "sealed"
|
||||
mockData := map[abi.SectorNumber]string{
|
||||
unsealedSectorID: unsealedSectorData,
|
||||
sealedSectorID: sealedSectorData,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
deals []abi.SectorNumber
|
||||
fetchedData string
|
||||
isUnsealed bool
|
||||
|
||||
expectErr bool
|
||||
}{{
|
||||
// Expect error if there is no deal info for piece CID
|
||||
name: "no deals",
|
||||
expectErr: true,
|
||||
}, {
|
||||
// Expect the API to always fetch the unsealed deal (because it's
|
||||
// cheaper than fetching the sealed deal)
|
||||
name: "prefer unsealed deal",
|
||||
deals: []abi.SectorNumber{unsealedSectorID, sealedSectorID},
|
||||
fetchedData: unsealedSectorData,
|
||||
isUnsealed: true,
|
||||
}, {
|
||||
// Expect the API to unseal the data if there are no unsealed deals
|
||||
name: "unseal if necessary",
|
||||
deals: []abi.SectorNumber{sealedSectorID},
|
||||
fetchedData: sealedSectorData,
|
||||
isUnsealed: false,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ps := getPieceStore(t)
|
||||
rpn := &mockRPN{
|
||||
sectors: mockData,
|
||||
}
|
||||
api := NewMinerAPI(ps, rpn, 100)
|
||||
require.NoError(t, api.Start(ctx))
|
||||
|
||||
// Add deals to piece store
|
||||
for _, sectorID := range tc.deals {
|
||||
dealInfo := piecestore.DealInfo{
|
||||
SectorID: sectorID,
|
||||
}
|
||||
err = ps.AddDealForPiece(cid1, dealInfo)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Fetch the piece
|
||||
r, err := api.FetchUnsealedPiece(ctx, cid1)
|
||||
if tc.expectErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Check that the returned reader is for the correct piece
|
||||
require.NoError(t, err)
|
||||
bz, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.fetchedData, string(bz))
|
||||
|
||||
uns, err := api.IsUnsealed(ctx, cid1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.isUnsealed, uns)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLotusAccessorGetUnpaddedCARSize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cid1, err := cid.Parse("bafkqaaa")
|
||||
require.NoError(t, err)
|
||||
|
||||
ps := getPieceStore(t)
|
||||
rpn := &mockRPN{}
|
||||
api := NewMinerAPI(ps, rpn, 100)
|
||||
require.NoError(t, api.Start(ctx))
|
||||
|
||||
// Add a deal with data Length 10
|
||||
dealInfo := piecestore.DealInfo{
|
||||
Length: 10,
|
||||
}
|
||||
err = ps.AddDealForPiece(cid1, dealInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the data length is correct
|
||||
len, err := api.GetUnpaddedCARSize(ctx, cid1)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 10, len)
|
||||
}
|
||||
|
||||
func TestThrottle(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cid1, err := cid.Parse("bafkqaaa")
|
||||
require.NoError(t, err)
|
||||
|
||||
ps := getPieceStore(t)
|
||||
rpn := &mockRPN{
|
||||
sectors: map[abi.SectorNumber]string{
|
||||
unsealedSectorID: "foo",
|
||||
},
|
||||
}
|
||||
api := NewMinerAPI(ps, rpn, 3)
|
||||
require.NoError(t, api.Start(ctx))
|
||||
|
||||
// Add a deal with data Length 10
|
||||
dealInfo := piecestore.DealInfo{
|
||||
SectorID: unsealedSectorID,
|
||||
Length: 10,
|
||||
}
|
||||
err = ps.AddDealForPiece(cid1, dealInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// hold the lock to block.
|
||||
rpn.lk.Lock()
|
||||
|
||||
// fetch the piece concurrently.
|
||||
errgrp, ctx := errgroup.WithContext(context.Background())
|
||||
for i := 0; i < 10; i++ {
|
||||
errgrp.Go(func() error {
|
||||
r, err := api.FetchUnsealedPiece(ctx, cid1)
|
||||
if err == nil {
|
||||
_ = r.Close()
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
require.EqualValues(t, 3, atomic.LoadInt32(&rpn.calls)) // throttled
|
||||
|
||||
// allow to proceed.
|
||||
rpn.lk.Unlock()
|
||||
|
||||
// allow all to finish.
|
||||
err = errgrp.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.EqualValues(t, 10, atomic.LoadInt32(&rpn.calls)) // throttled
|
||||
|
||||
}
|
||||
|
||||
func getPieceStore(t *testing.T) piecestore.PieceStore {
|
||||
ps, err := piecestoreimpl.NewPieceStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||
require.NoError(t, err)
|
||||
|
||||
ch := make(chan struct{}, 1)
|
||||
ps.OnReady(func(_ error) {
|
||||
ch <- struct{}{}
|
||||
})
|
||||
|
||||
err = ps.Start(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-ch
|
||||
return ps
|
||||
}
|
||||
|
||||
type mockRPN struct {
|
||||
calls int32 // guarded by atomic
|
||||
lk sync.RWMutex // lock to simulate blocks.
|
||||
sectors map[abi.SectorNumber]string
|
||||
}
|
||||
|
||||
func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
|
||||
atomic.AddInt32(&m.calls, 1)
|
||||
m.lk.RLock()
|
||||
defer m.lk.RUnlock()
|
||||
|
||||
data, ok := m.sectors[sectorID]
|
||||
if !ok {
|
||||
panic("sector not found")
|
||||
}
|
||||
return io.NopCloser(bytes.NewBuffer([]byte(data))), nil
|
||||
}
|
||||
|
||||
func (m *mockRPN) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) {
|
||||
return sectorID == unsealedSectorID, nil
|
||||
}
|
||||
|
||||
func (m *mockRPN) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockRPN) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockRPN) SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paych.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockRPN) GetRetrievalPricingInput(ctx context.Context, pieceCID cid.Cid, storageDeals []abi.DealID) (retrievalmarket.PricingInput, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
var _ retrievalmarket.RetrievalProviderNode = (*mockRPN)(nil)
|
96
markets/dagstore/mocks/mock_lotus_accessor.go
Normal file
96
markets/dagstore/mocks/mock_lotus_accessor.go
Normal file
@ -0,0 +1,96 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: lotusaccessor.go
|
||||
|
||||
// Package mock_dagstore is a generated GoMock package.
|
||||
package mock_dagstore
|
||||
|
||||
import (
|
||||
context "context"
|
||||
io "io"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
// MockLotusAccessor is a mock of LotusAccessor interface.
|
||||
type MockLotusAccessor struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockLotusAccessorMockRecorder
|
||||
}
|
||||
|
||||
// MockLotusAccessorMockRecorder is the mock recorder for MockLotusAccessor.
|
||||
type MockLotusAccessorMockRecorder struct {
|
||||
mock *MockLotusAccessor
|
||||
}
|
||||
|
||||
// NewMockLotusAccessor creates a new mock instance.
|
||||
func NewMockLotusAccessor(ctrl *gomock.Controller) *MockLotusAccessor {
|
||||
mock := &MockLotusAccessor{ctrl: ctrl}
|
||||
mock.recorder = &MockLotusAccessorMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockLotusAccessor) EXPECT() *MockLotusAccessorMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// FetchUnsealedPiece mocks base method.
|
||||
func (m *MockLotusAccessor) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "FetchUnsealedPiece", ctx, pieceCid)
|
||||
ret0, _ := ret[0].(io.ReadCloser)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// FetchUnsealedPiece indicates an expected call of FetchUnsealedPiece.
|
||||
func (mr *MockLotusAccessorMockRecorder) FetchUnsealedPiece(ctx, pieceCid interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockLotusAccessor)(nil).FetchUnsealedPiece), ctx, pieceCid)
|
||||
}
|
||||
|
||||
// GetUnpaddedCARSize mocks base method.
|
||||
func (m *MockLotusAccessor) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetUnpaddedCARSize", ctx, pieceCid)
|
||||
ret0, _ := ret[0].(uint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetUnpaddedCARSize indicates an expected call of GetUnpaddedCARSize.
|
||||
func (mr *MockLotusAccessorMockRecorder) GetUnpaddedCARSize(ctx, pieceCid interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockLotusAccessor)(nil).GetUnpaddedCARSize), ctx, pieceCid)
|
||||
}
|
||||
|
||||
// IsUnsealed mocks base method.
|
||||
func (m *MockLotusAccessor) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsUnsealed", ctx, pieceCid)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// IsUnsealed indicates an expected call of IsUnsealed.
|
||||
func (mr *MockLotusAccessorMockRecorder) IsUnsealed(ctx, pieceCid interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockLotusAccessor)(nil).IsUnsealed), ctx, pieceCid)
|
||||
}
|
||||
|
||||
// Start mocks base method.
|
||||
func (m *MockLotusAccessor) Start(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Start", ctx)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Start indicates an expected call of Start.
|
||||
func (mr *MockLotusAccessorMockRecorder) Start(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockLotusAccessor)(nil).Start), ctx)
|
||||
}
|
110
markets/dagstore/mount.go
Normal file
110
markets/dagstore/mount.go
Normal file
@ -0,0 +1,110 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
)
|
||||
|
||||
const lotusScheme = "lotus"
|
||||
|
||||
var _ mount.Mount = (*LotusMount)(nil)
|
||||
|
||||
// mountTemplate returns a templated LotusMount containing the supplied API.
|
||||
//
|
||||
// It is called when registering a mount type with the mount registry
|
||||
// of the DAG store. It is used to reinstantiate mounts after a restart.
|
||||
//
|
||||
// When the registry needs to deserialize a mount it clones the template then
|
||||
// calls Deserialize on the cloned instance, which will have a reference to the
|
||||
// lotus mount API supplied here.
|
||||
func mountTemplate(api MinerAPI) *LotusMount {
|
||||
return &LotusMount{API: api}
|
||||
}
|
||||
|
||||
// LotusMount is a DAGStore mount implementation that fetches deal data
|
||||
// from a PieceCID.
|
||||
type LotusMount struct {
|
||||
API MinerAPI
|
||||
PieceCid cid.Cid
|
||||
}
|
||||
|
||||
func NewLotusMount(pieceCid cid.Cid, api MinerAPI) (*LotusMount, error) {
|
||||
return &LotusMount{
|
||||
PieceCid: pieceCid,
|
||||
API: api,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *LotusMount) Serialize() *url.URL {
|
||||
return &url.URL{
|
||||
Host: l.PieceCid.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LotusMount) Deserialize(u *url.URL) error {
|
||||
pieceCid, err := cid.Decode(u.Host)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse PieceCid from host '%s': %w", u.Host, err)
|
||||
}
|
||||
l.PieceCid = pieceCid
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LotusMount) Fetch(ctx context.Context) (mount.Reader, error) {
|
||||
r, err := l.API.FetchUnsealedPiece(ctx, l.PieceCid)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to fetch unsealed piece %s: %w", l.PieceCid, err)
|
||||
}
|
||||
return &readCloser{r}, nil
|
||||
}
|
||||
|
||||
func (l *LotusMount) Info() mount.Info {
|
||||
return mount.Info{
|
||||
Kind: mount.KindRemote,
|
||||
AccessSequential: true,
|
||||
AccessSeek: false,
|
||||
AccessRandom: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LotusMount) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LotusMount) Stat(ctx context.Context) (mount.Stat, error) {
|
||||
size, err := l.API.GetUnpaddedCARSize(ctx, l.PieceCid)
|
||||
if err != nil {
|
||||
return mount.Stat{}, xerrors.Errorf("failed to fetch piece size for piece %s: %w", l.PieceCid, err)
|
||||
}
|
||||
isUnsealed, err := l.API.IsUnsealed(ctx, l.PieceCid)
|
||||
if err != nil {
|
||||
return mount.Stat{}, xerrors.Errorf("failed to verify if we have the unsealed piece %s: %w", l.PieceCid, err)
|
||||
}
|
||||
|
||||
// TODO Mark false when storage deal expires.
|
||||
return mount.Stat{
|
||||
Exists: true,
|
||||
Size: int64(size),
|
||||
Ready: isUnsealed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type readCloser struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
var _ mount.Reader = (*readCloser)(nil)
|
||||
|
||||
func (r *readCloser) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return 0, xerrors.Errorf("ReadAt called but not implemented")
|
||||
}
|
||||
|
||||
func (r *readCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, xerrors.Errorf("Seek called but not implemented")
|
||||
}
|
126
markets/dagstore/mount_test.go
Normal file
126
markets/dagstore/mount_test.go
Normal file
@ -0,0 +1,126 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
blocksutil "github.com/ipfs/go-ipfs-blocksutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
|
||||
mock_dagstore "github.com/filecoin-project/lotus/markets/dagstore/mocks"
|
||||
)
|
||||
|
||||
func TestLotusMount(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
bgen := blocksutil.NewBlockGenerator()
|
||||
cid := bgen.Next().Cid()
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
// when test is done, assert expectations on all mock objects.
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
// create a mock lotus api that returns the reader we want
|
||||
mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl)
|
||||
|
||||
mockLotusMountAPI.EXPECT().IsUnsealed(gomock.Any(), cid).Return(true, nil).Times(1)
|
||||
|
||||
mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1)
|
||||
mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1)
|
||||
mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1)
|
||||
|
||||
mnt, err := NewLotusMount(cid, mockLotusMountAPI)
|
||||
require.NoError(t, err)
|
||||
info := mnt.Info()
|
||||
require.Equal(t, info.Kind, mount.KindRemote)
|
||||
|
||||
// fetch and assert success
|
||||
rd, err := mnt.Fetch(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
bz, err := ioutil.ReadAll(rd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, rd.Close())
|
||||
require.Equal(t, []byte("testing"), bz)
|
||||
|
||||
stat, err := mnt.Stat(ctx)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 100, stat.Size)
|
||||
|
||||
// serialize url then deserialize from mount template -> should get back
|
||||
// the same mount
|
||||
url := mnt.Serialize()
|
||||
mnt2 := mountTemplate(mockLotusMountAPI)
|
||||
err = mnt2.Deserialize(url)
|
||||
require.NoError(t, err)
|
||||
|
||||
// fetching on this mount should get us back the same data.
|
||||
rd, err = mnt2.Fetch(context.Background())
|
||||
require.NoError(t, err)
|
||||
bz, err = ioutil.ReadAll(rd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, rd.Close())
|
||||
require.Equal(t, []byte("testing"), bz)
|
||||
}
|
||||
|
||||
func TestLotusMountDeserialize(t *testing.T) {
|
||||
api := &minerAPI{}
|
||||
|
||||
bgen := blocksutil.NewBlockGenerator()
|
||||
cid := bgen.Next().Cid()
|
||||
|
||||
// success
|
||||
us := lotusScheme + "://" + cid.String()
|
||||
u, err := url.Parse(us)
|
||||
require.NoError(t, err)
|
||||
|
||||
mnt := mountTemplate(api)
|
||||
err = mnt.Deserialize(u)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, cid, mnt.PieceCid)
|
||||
require.Equal(t, api, mnt.API)
|
||||
|
||||
// fails if cid is not valid
|
||||
us = lotusScheme + "://" + "rand"
|
||||
u, err = url.Parse(us)
|
||||
require.NoError(t, err)
|
||||
err = mnt.Deserialize(u)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to parse PieceCid")
|
||||
}
|
||||
|
||||
func TestLotusMountRegistration(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
bgen := blocksutil.NewBlockGenerator()
|
||||
cid := bgen.Next().Cid()
|
||||
|
||||
// success
|
||||
us := lotusScheme + "://" + cid.String()
|
||||
u, err := url.Parse(us)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
// when test is done, assert expectations on all mock objects.
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl)
|
||||
registry := mount.NewRegistry()
|
||||
err = registry.Register(lotusScheme, mountTemplate(mockLotusMountAPI))
|
||||
require.NoError(t, err)
|
||||
|
||||
mnt, err := registry.Instantiate(u)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockLotusMountAPI.EXPECT().IsUnsealed(ctx, cid).Return(true, nil)
|
||||
mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1)
|
||||
stat, err := mnt.Stat(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 100, stat.Size)
|
||||
require.True(t, stat.Ready)
|
||||
}
|
418
markets/dagstore/wrapper.go
Normal file
418
markets/dagstore/wrapper.go
Normal file
@ -0,0 +1,418 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
levelds "github.com/ipfs/go-ds-leveldb"
|
||||
measure "github.com/ipfs/go-ds-measure"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
|
||||
"github.com/filecoin-project/go-statemachine/fsm"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
"github.com/filecoin-project/dagstore/index"
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/dagstore/shard"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates"
|
||||
"github.com/filecoin-project/go-fil-markets/stores"
|
||||
)
|
||||
|
||||
const (
|
||||
maxRecoverAttempts = 1
|
||||
shardRegMarker = ".shard-registration-complete"
|
||||
)
|
||||
|
||||
var log = logging.Logger("dagstore")
|
||||
|
||||
type Wrapper struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
backgroundWg sync.WaitGroup
|
||||
|
||||
cfg config.DAGStoreConfig
|
||||
dagst dagstore.Interface
|
||||
minerAPI MinerAPI
|
||||
failureCh chan dagstore.ShardResult
|
||||
traceCh chan dagstore.Trace
|
||||
gcInterval time.Duration
|
||||
}
|
||||
|
||||
var _ stores.DAGStoreWrapper = (*Wrapper)(nil)
|
||||
|
||||
func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI) (*dagstore.DAGStore, *Wrapper, error) {
|
||||
// construct the DAG Store.
|
||||
registry := mount.NewRegistry()
|
||||
if err := registry.Register(lotusScheme, mountTemplate(minerApi)); err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to create registry: %w", err)
|
||||
}
|
||||
|
||||
// The dagstore will write Shard failures to the `failureCh` here.
|
||||
failureCh := make(chan dagstore.ShardResult, 1)
|
||||
|
||||
// The dagstore will write Trace events to the `traceCh` here.
|
||||
traceCh := make(chan dagstore.Trace, 32)
|
||||
|
||||
var (
|
||||
transientsDir = filepath.Join(cfg.RootDir, "transients")
|
||||
datastoreDir = filepath.Join(cfg.RootDir, "datastore")
|
||||
indexDir = filepath.Join(cfg.RootDir, "index")
|
||||
)
|
||||
|
||||
dstore, err := newDatastore(datastoreDir)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to create dagstore datastore in %s: %w", datastoreDir, err)
|
||||
}
|
||||
|
||||
irepo, err := index.NewFSRepo(indexDir)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to initialise dagstore index repo")
|
||||
}
|
||||
|
||||
dcfg := dagstore.Config{
|
||||
TransientsDir: transientsDir,
|
||||
IndexRepo: irepo,
|
||||
Datastore: dstore,
|
||||
MountRegistry: registry,
|
||||
FailureCh: failureCh,
|
||||
TraceCh: traceCh,
|
||||
// not limiting fetches globally, as the Lotus mount does
|
||||
// conditional throttling.
|
||||
MaxConcurrentIndex: cfg.MaxConcurrentIndex,
|
||||
MaxConcurrentReadyFetches: cfg.MaxConcurrentReadyFetches,
|
||||
RecoverOnStart: dagstore.RecoverOnAcquire,
|
||||
}
|
||||
|
||||
dagst, err := dagstore.NewDAGStore(dcfg)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to create DAG store: %w", err)
|
||||
}
|
||||
|
||||
w := &Wrapper{
|
||||
cfg: cfg,
|
||||
dagst: dagst,
|
||||
minerAPI: minerApi,
|
||||
failureCh: failureCh,
|
||||
traceCh: traceCh,
|
||||
gcInterval: time.Duration(cfg.GCInterval),
|
||||
}
|
||||
|
||||
return dagst, w, nil
|
||||
}
|
||||
|
||||
// newDatastore creates a datastore under the given base directory
|
||||
// for dagstore metadata.
|
||||
func newDatastore(dir string) (ds.Batching, error) {
|
||||
// Create the datastore directory if it doesn't exist yet.
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, xerrors.Errorf("failed to create directory %s for DAG store datastore: %w", dir, err)
|
||||
}
|
||||
|
||||
// Create a new LevelDB datastore
|
||||
dstore, err := levelds.NewDatastore(dir, &levelds.Options{
|
||||
Compression: ldbopts.NoCompression,
|
||||
NoSync: false,
|
||||
Strict: ldbopts.StrictAll,
|
||||
ReadOnly: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to open datastore for DAG store: %w", err)
|
||||
}
|
||||
// Keep statistics about the datastore
|
||||
mds := measure.New("measure.", dstore)
|
||||
return mds, nil
|
||||
}
|
||||
|
||||
func (w *Wrapper) Start(ctx context.Context) error {
|
||||
w.ctx, w.cancel = context.WithCancel(ctx)
|
||||
|
||||
// Run a go-routine to do DagStore GC.
|
||||
w.backgroundWg.Add(1)
|
||||
go w.gcLoop()
|
||||
|
||||
// run a go-routine to read the trace for debugging.
|
||||
w.backgroundWg.Add(1)
|
||||
go w.traceLoop()
|
||||
|
||||
// Run a go-routine for shard recovery
|
||||
if dss, ok := w.dagst.(*dagstore.DAGStore); ok {
|
||||
w.backgroundWg.Add(1)
|
||||
go dagstore.RecoverImmediately(w.ctx, dss, w.failureCh, maxRecoverAttempts, w.backgroundWg.Done)
|
||||
}
|
||||
|
||||
return w.dagst.Start(ctx)
|
||||
}
|
||||
|
||||
func (w *Wrapper) traceLoop() {
|
||||
defer w.backgroundWg.Done()
|
||||
|
||||
for w.ctx.Err() == nil {
|
||||
select {
|
||||
// Log trace events from the DAG store
|
||||
case tr := <-w.traceCh:
|
||||
log.Debugw("trace",
|
||||
"shard-key", tr.Key.String(),
|
||||
"op-type", tr.Op.String(),
|
||||
"after", tr.After.String())
|
||||
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Wrapper) gcLoop() {
|
||||
defer w.backgroundWg.Done()
|
||||
|
||||
ticker := time.NewTicker(w.gcInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for w.ctx.Err() == nil {
|
||||
select {
|
||||
// GC the DAG store on every tick
|
||||
case <-ticker.C:
|
||||
_, _ = w.dagst.GC(w.ctx)
|
||||
|
||||
// Exit when the DAG store wrapper is shutdown
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Wrapper) LoadShard(ctx context.Context, pieceCid cid.Cid) (stores.ClosableBlockstore, error) {
|
||||
log.Debugf("acquiring shard for piece CID %s", pieceCid)
|
||||
|
||||
key := shard.KeyFromCID(pieceCid)
|
||||
resch := make(chan dagstore.ShardResult, 1)
|
||||
err := w.dagst.AcquireShard(ctx, key, resch, dagstore.AcquireOpts{})
|
||||
log.Debugf("sent message to acquire shard for piece CID %s", pieceCid)
|
||||
|
||||
if err != nil {
|
||||
if !errors.Is(err, dagstore.ErrShardUnknown) {
|
||||
return nil, xerrors.Errorf("failed to schedule acquire shard for piece CID %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
// if the DAGStore does not know about the Shard -> register it and then try to acquire it again.
|
||||
log.Warnw("failed to load shard as shard is not registered, will re-register", "pieceCID", pieceCid)
|
||||
// The path of a transient file that we can ask the DAG Store to use
|
||||
// to perform the Indexing rather than fetching it via the Mount if
|
||||
// we already have a transient file. However, we don't have it here
|
||||
// and therefore we pass an empty file path.
|
||||
carPath := ""
|
||||
if err := stores.RegisterShardSync(ctx, w, pieceCid, carPath, false); err != nil {
|
||||
return nil, xerrors.Errorf("failed to re-register shard during loading piece CID %s: %w", pieceCid, err)
|
||||
}
|
||||
log.Warnw("successfully re-registered shard", "pieceCID", pieceCid)
|
||||
|
||||
resch = make(chan dagstore.ShardResult, 1)
|
||||
if err := w.dagst.AcquireShard(ctx, key, resch, dagstore.AcquireOpts{}); err != nil {
|
||||
return nil, xerrors.Errorf("failed to acquire Shard for piece CID %s after re-registering: %w", pieceCid, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: The context is not yet being actively monitored by the DAG store,
|
||||
// so we need to select against ctx.Done() until the following issue is
|
||||
// implemented:
|
||||
// https://github.com/filecoin-project/dagstore/issues/39
|
||||
var res dagstore.ShardResult
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case res = <-resch:
|
||||
if res.Error != nil {
|
||||
return nil, xerrors.Errorf("failed to acquire shard for piece CID %s: %w", pieceCid, res.Error)
|
||||
}
|
||||
}
|
||||
|
||||
bs, err := res.Accessor.Blockstore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("successfully loaded blockstore for piece CID %s", pieceCid)
|
||||
return &Blockstore{ReadBlockstore: bs, Closer: res.Accessor}, nil
|
||||
}
|
||||
|
||||
func (w *Wrapper) RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool, resch chan dagstore.ShardResult) error {
|
||||
// Create a lotus mount with the piece CID
|
||||
key := shard.KeyFromCID(pieceCid)
|
||||
mt, err := NewLotusMount(pieceCid, w.minerAPI)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to create lotus mount for piece CID %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
// Register the shard
|
||||
opts := dagstore.RegisterOpts{
|
||||
ExistingTransient: carPath,
|
||||
LazyInitialization: !eagerInit,
|
||||
}
|
||||
err = w.dagst.RegisterShard(ctx, key, mt, resch, opts)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to schedule register shard for piece CID %s: %w", pieceCid, err)
|
||||
}
|
||||
log.Debugf("successfully submitted Register Shard request for piece CID %s with eagerInit=%t", pieceCid, eagerInit)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Wrapper) MigrateDeals(ctx context.Context, deals []storagemarket.MinerDeal) (bool, error) {
|
||||
log := log.Named("migrator")
|
||||
|
||||
// Check if all deals have already been registered as shards
|
||||
isComplete, err := w.registrationComplete()
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to get dagstore migration status: %w", err)
|
||||
}
|
||||
if isComplete {
|
||||
// All deals have been registered as shards, bail out
|
||||
log.Info("no shard migration necessary; already marked complete")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Infow("registering shards for all active deals in sealing subsystem", "count", len(deals))
|
||||
|
||||
inSealingSubsystem := make(map[fsm.StateKey]struct{}, len(providerstates.StatesKnownBySealingSubsystem))
|
||||
for _, s := range providerstates.StatesKnownBySealingSubsystem {
|
||||
inSealingSubsystem[s] = struct{}{}
|
||||
}
|
||||
|
||||
// channel where results will be received, and channel where the total
|
||||
// number of registered shards will be sent.
|
||||
resch := make(chan dagstore.ShardResult, 32)
|
||||
totalCh := make(chan int)
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
// Start making progress consuming results. We won't know how many to
|
||||
// actually consume until we register all shards.
|
||||
//
|
||||
// If there are any problems registering shards, just log an error
|
||||
go func() {
|
||||
defer close(doneCh)
|
||||
|
||||
var total = math.MaxInt64
|
||||
var res dagstore.ShardResult
|
||||
for rcvd := 0; rcvd < total; {
|
||||
select {
|
||||
case total = <-totalCh:
|
||||
// we now know the total number of registered shards
|
||||
// nullify so that we no longer consume from it after closed.
|
||||
close(totalCh)
|
||||
totalCh = nil
|
||||
case res = <-resch:
|
||||
rcvd++
|
||||
if res.Error == nil {
|
||||
log.Infow("async shard registration completed successfully", "shard_key", res.Key)
|
||||
} else {
|
||||
log.Warnw("async shard registration failed", "shard_key", res.Key, "error", res.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Filter for deals that are handed off.
|
||||
//
|
||||
// If the deal has not yet been handed off to the sealing subsystem, we
|
||||
// don't need to call RegisterShard in this migration; RegisterShard will
|
||||
// be called in the new code once the deal reaches the state where it's
|
||||
// handed off to the sealing subsystem.
|
||||
var registered int
|
||||
for _, deal := range deals {
|
||||
pieceCid := deal.Proposal.PieceCID
|
||||
|
||||
// enrich log statements in this iteration with deal ID and piece CID.
|
||||
log := log.With("deal_id", deal.DealID, "piece_cid", pieceCid)
|
||||
|
||||
// Filter for deals that have been handed off to the sealing subsystem
|
||||
if _, ok := inSealingSubsystem[deal.State]; !ok {
|
||||
log.Infow("deal not ready; skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infow("registering deal in dagstore with lazy init")
|
||||
|
||||
// Register the deal as a shard with the DAG store with lazy initialization.
|
||||
// The index will be populated the first time the deal is retrieved, or
|
||||
// through the bulk initialization script.
|
||||
err = w.RegisterShard(ctx, pieceCid, "", false, resch)
|
||||
if err != nil {
|
||||
log.Warnw("failed to register shard", "error", err)
|
||||
continue
|
||||
}
|
||||
registered++
|
||||
}
|
||||
|
||||
log.Infow("finished registering all shards", "total", registered)
|
||||
totalCh <- registered
|
||||
<-doneCh
|
||||
|
||||
log.Infow("confirmed registration of all shards")
|
||||
|
||||
// Completed registering all shards, so mark the migration as complete
|
||||
err = w.markRegistrationComplete()
|
||||
if err != nil {
|
||||
log.Errorf("failed to mark shards as registered: %s", err)
|
||||
} else {
|
||||
log.Info("successfully marked migration as complete")
|
||||
}
|
||||
|
||||
log.Infow("dagstore migration complete")
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Check for the existence of a "marker" file indicating that the migration
|
||||
// has completed
|
||||
func (w *Wrapper) registrationComplete() (bool, error) {
|
||||
path := filepath.Join(w.cfg.RootDir, shardRegMarker)
|
||||
_, err := os.Stat(path)
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Create a "marker" file indicating that the migration has completed
|
||||
func (w *Wrapper) markRegistrationComplete() error {
|
||||
path := filepath.Join(w.cfg.RootDir, shardRegMarker)
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return file.Close()
|
||||
}
|
||||
|
||||
func (w *Wrapper) Close() error {
|
||||
// Cancel the context
|
||||
w.cancel()
|
||||
|
||||
// Close the DAG store
|
||||
log.Info("will close the dagstore")
|
||||
if err := w.dagst.Close(); err != nil {
|
||||
return xerrors.Errorf("failed to close dagstore: %w", err)
|
||||
}
|
||||
log.Info("dagstore closed")
|
||||
|
||||
// Wait for the background go routine to exit
|
||||
log.Info("waiting for dagstore background wrapper goroutines to exit")
|
||||
w.backgroundWg.Wait()
|
||||
log.Info("exited dagstore background wrapper goroutines")
|
||||
|
||||
return nil
|
||||
}
|
121
markets/dagstore/wrapper_migration_test.go
Normal file
121
markets/dagstore/wrapper_migration_test.go
Normal file
@ -0,0 +1,121 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes"
|
||||
tut "github.com/filecoin-project/go-fil-markets/shared_testutil"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
)
|
||||
|
||||
func TestShardRegistration(t *testing.T) {
|
||||
ps := tut.NewTestPieceStore()
|
||||
sa := testnodes.NewTestSectorAccessor()
|
||||
|
||||
ctx := context.Background()
|
||||
cids := tut.GenerateCids(4)
|
||||
pieceCidUnsealed := cids[0]
|
||||
pieceCidSealed := cids[1]
|
||||
pieceCidUnsealed2 := cids[2]
|
||||
pieceCidUnsealed3 := cids[3]
|
||||
|
||||
sealedSector := abi.SectorNumber(1)
|
||||
unsealedSector1 := abi.SectorNumber(2)
|
||||
unsealedSector2 := abi.SectorNumber(3)
|
||||
unsealedSector3 := abi.SectorNumber(4)
|
||||
|
||||
// ps.ExpectPiece(pieceCidUnsealed, piecestore.PieceInfo{
|
||||
// PieceCID: pieceCidUnsealed,
|
||||
// Deals: []piecestore.DealInfo{
|
||||
// {
|
||||
// SectorID: unsealedSector1,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
//
|
||||
// ps.ExpectPiece(pieceCidSealed, piecestore.PieceInfo{
|
||||
// PieceCID: pieceCidSealed,
|
||||
// Deals: []piecestore.DealInfo{
|
||||
// {
|
||||
// SectorID: sealedSector,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
deals := []storagemarket.MinerDeal{{
|
||||
// Should be registered
|
||||
State: storagemarket.StorageDealSealing,
|
||||
SectorNumber: unsealedSector1,
|
||||
ClientDealProposal: market.ClientDealProposal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: pieceCidUnsealed,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// Should be registered with lazy registration (because sector is sealed)
|
||||
State: storagemarket.StorageDealSealing,
|
||||
SectorNumber: sealedSector,
|
||||
ClientDealProposal: market.ClientDealProposal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: pieceCidSealed,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// Should be ignored because deal is no longer active
|
||||
State: storagemarket.StorageDealError,
|
||||
SectorNumber: unsealedSector2,
|
||||
ClientDealProposal: market.ClientDealProposal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: pieceCidUnsealed2,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// Should be ignored because deal is not yet sealing
|
||||
State: storagemarket.StorageDealFundsReserved,
|
||||
SectorNumber: unsealedSector3,
|
||||
ClientDealProposal: market.ClientDealProposal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: pieceCidUnsealed3,
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
cfg := config.DefaultStorageMiner().DAGStore
|
||||
cfg.RootDir = t.TempDir()
|
||||
|
||||
mapi := NewMinerAPI(ps, sa, 10)
|
||||
dagst, w, err := NewDAGStore(cfg, mapi)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dagst)
|
||||
require.NotNil(t, w)
|
||||
|
||||
err = dagst.Start(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
migrated, err := w.MigrateDeals(ctx, deals)
|
||||
require.True(t, migrated)
|
||||
require.NoError(t, err)
|
||||
|
||||
info := dagst.AllShardsInfo()
|
||||
require.Len(t, info, 2)
|
||||
for _, i := range info {
|
||||
require.Equal(t, dagstore.ShardStateNew, i.ShardState)
|
||||
}
|
||||
|
||||
// Run register shard migration again
|
||||
migrated, err = w.MigrateDeals(ctx, deals)
|
||||
require.False(t, migrated)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ps.VerifyExpectations(t)
|
||||
}
|
214
markets/dagstore/wrapper_test.go
Normal file
214
markets/dagstore/wrapper_test.go
Normal file
@ -0,0 +1,214 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/dagstore/shard"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestWrapperAcquireRecovery verifies that if acquire shard returns a "not found"
|
||||
// error, the wrapper will attempt to register the shard then reacquire
|
||||
func TestWrapperAcquireRecovery(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
pieceCid, err := cid.Parse("bafkqaaa")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a DAG store wrapper
|
||||
dagst, w, err := NewDAGStore(config.DAGStoreConfig{
|
||||
RootDir: t.TempDir(),
|
||||
GCInterval: config.Duration(1 * time.Millisecond),
|
||||
}, mockLotusMount{})
|
||||
require.NoError(t, err)
|
||||
|
||||
defer dagst.Close() //nolint:errcheck
|
||||
|
||||
// Return an error from acquire shard the first time
|
||||
acquireShardErr := make(chan error, 1)
|
||||
acquireShardErr <- xerrors.Errorf("unknown shard: %w", dagstore.ErrShardUnknown)
|
||||
|
||||
// Create a mock DAG store in place of the real DAG store
|
||||
mock := &mockDagStore{
|
||||
acquireShardErr: acquireShardErr,
|
||||
acquireShardRes: dagstore.ShardResult{
|
||||
Accessor: getShardAccessor(t),
|
||||
},
|
||||
register: make(chan shard.Key, 1),
|
||||
}
|
||||
w.dagst = mock
|
||||
|
||||
mybs, err := w.LoadShard(ctx, pieceCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expect the wrapper to try to recover from the error returned from
|
||||
// acquire shard by calling register shard with the same key
|
||||
tctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
select {
|
||||
case <-tctx.Done():
|
||||
require.Fail(t, "failed to call register")
|
||||
case k := <-mock.register:
|
||||
require.Equal(t, k.String(), pieceCid.String())
|
||||
}
|
||||
|
||||
// Verify that we can get things from the acquired blockstore
|
||||
var count int
|
||||
ch, err := mybs.AllKeysChan(ctx)
|
||||
require.NoError(t, err)
|
||||
for range ch {
|
||||
count++
|
||||
}
|
||||
require.Greater(t, count, 0)
|
||||
}
|
||||
|
||||
// TestWrapperBackground verifies the behaviour of the background go routine
|
||||
func TestWrapperBackground(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a DAG store wrapper
|
||||
dagst, w, err := NewDAGStore(config.DAGStoreConfig{
|
||||
RootDir: t.TempDir(),
|
||||
GCInterval: config.Duration(1 * time.Millisecond),
|
||||
}, mockLotusMount{})
|
||||
require.NoError(t, err)
|
||||
|
||||
defer dagst.Close() //nolint:errcheck
|
||||
|
||||
// Create a mock DAG store in place of the real DAG store
|
||||
mock := &mockDagStore{
|
||||
gc: make(chan struct{}, 1),
|
||||
recover: make(chan shard.Key, 1),
|
||||
close: make(chan struct{}, 1),
|
||||
}
|
||||
w.dagst = mock
|
||||
|
||||
// Start up the wrapper
|
||||
err = w.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expect GC to be called automatically
|
||||
tctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
select {
|
||||
case <-tctx.Done():
|
||||
require.Fail(t, "failed to call GC")
|
||||
case <-mock.gc:
|
||||
}
|
||||
|
||||
// Expect that when the wrapper is closed it will call close on the
|
||||
// DAG store
|
||||
err = w.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
tctx, cancel3 := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel3()
|
||||
select {
|
||||
case <-tctx.Done():
|
||||
require.Fail(t, "failed to call close")
|
||||
case <-mock.close:
|
||||
}
|
||||
}
|
||||
|
||||
type mockDagStore struct {
|
||||
acquireShardErr chan error
|
||||
acquireShardRes dagstore.ShardResult
|
||||
register chan shard.Key
|
||||
|
||||
gc chan struct{}
|
||||
recover chan shard.Key
|
||||
close chan struct{}
|
||||
}
|
||||
|
||||
func (m *mockDagStore) DestroyShard(ctx context.Context, key shard.Key, out chan dagstore.ShardResult, _ dagstore.DestroyOpts) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockDagStore) GetShardInfo(k shard.Key) (dagstore.ShardInfo, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockDagStore) AllShardsInfo() dagstore.AllShardsInfo {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockDagStore) Start(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) RegisterShard(ctx context.Context, key shard.Key, mnt mount.Mount, out chan dagstore.ShardResult, opts dagstore.RegisterOpts) error {
|
||||
m.register <- key
|
||||
out <- dagstore.ShardResult{Key: key}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) AcquireShard(ctx context.Context, key shard.Key, out chan dagstore.ShardResult, _ dagstore.AcquireOpts) error {
|
||||
select {
|
||||
case err := <-m.acquireShardErr:
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
out <- m.acquireShardRes
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) RecoverShard(ctx context.Context, key shard.Key, out chan dagstore.ShardResult, _ dagstore.RecoverOpts) error {
|
||||
m.recover <- key
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) GC(ctx context.Context) (*dagstore.GCResult, error) {
|
||||
select {
|
||||
case m.gc <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) Close() error {
|
||||
m.close <- struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockLotusMount struct {
|
||||
}
|
||||
|
||||
func (m mockLotusMount) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m mockLotusMount) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m mockLotusMount) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m mockLotusMount) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func getShardAccessor(t *testing.T) *dagstore.ShardAccessor {
|
||||
data, err := os.ReadFile("./fixtures/sample-rw-bs-v2.car")
|
||||
require.NoError(t, err)
|
||||
buff := bytes.NewReader(data)
|
||||
reader := &mount.NopCloser{Reader: buff, ReaderAt: buff, Seeker: buff}
|
||||
shardAccessor, err := dagstore.NewShardAccessor(reader, nil, nil)
|
||||
require.NoError(t, err)
|
||||
return shardAccessor
|
||||
}
|
83
markets/retrievaladapter/client_blockstore.go
Normal file
83
markets/retrievaladapter/client_blockstore.go
Normal file
@ -0,0 +1,83 @@
|
||||
package retrievaladapter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
bstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
"github.com/ipld/go-car/v2/blockstore"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
)
|
||||
|
||||
// ProxyBlockstoreAccessor is an accessor that returns a fixed blockstore.
|
||||
// To be used in combination with IPFS integration.
|
||||
type ProxyBlockstoreAccessor struct {
|
||||
Blockstore bstore.Blockstore
|
||||
}
|
||||
|
||||
var _ retrievalmarket.BlockstoreAccessor = (*ProxyBlockstoreAccessor)(nil)
|
||||
|
||||
func NewFixedBlockstoreAccessor(bs bstore.Blockstore) retrievalmarket.BlockstoreAccessor {
|
||||
return &ProxyBlockstoreAccessor{Blockstore: bs}
|
||||
}
|
||||
|
||||
func (p *ProxyBlockstoreAccessor) Get(_ retrievalmarket.DealID, _ retrievalmarket.PayloadCID) (bstore.Blockstore, error) {
|
||||
return p.Blockstore, nil
|
||||
}
|
||||
|
||||
func (p *ProxyBlockstoreAccessor) Done(_ retrievalmarket.DealID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type CARBlockstoreAccessor struct {
|
||||
rootdir string
|
||||
lk sync.Mutex
|
||||
open map[retrievalmarket.DealID]*blockstore.ReadWrite
|
||||
}
|
||||
|
||||
var _ retrievalmarket.BlockstoreAccessor = (*CARBlockstoreAccessor)(nil)
|
||||
|
||||
func NewCARBlockstoreAccessor(rootdir string) *CARBlockstoreAccessor {
|
||||
return &CARBlockstoreAccessor{
|
||||
rootdir: rootdir,
|
||||
open: make(map[retrievalmarket.DealID]*blockstore.ReadWrite),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CARBlockstoreAccessor) Get(id retrievalmarket.DealID, payloadCid retrievalmarket.PayloadCID) (bstore.Blockstore, error) {
|
||||
c.lk.Lock()
|
||||
defer c.lk.Unlock()
|
||||
|
||||
bs, ok := c.open[id]
|
||||
if ok {
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
path := c.PathFor(id)
|
||||
bs, err := blockstore.OpenReadWrite(path, []cid.Cid{payloadCid}, blockstore.UseWholeCIDs(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.open[id] = bs
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
func (c *CARBlockstoreAccessor) Done(id retrievalmarket.DealID) error {
|
||||
c.lk.Lock()
|
||||
defer c.lk.Unlock()
|
||||
|
||||
bs, ok := c.open[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
delete(c.open, id)
|
||||
return bs.Finalize()
|
||||
}
|
||||
|
||||
func (c *CARBlockstoreAccessor) PathFor(id retrievalmarket.DealID) string {
|
||||
return filepath.Join(c.rootdir, fmt.Sprintf("%d.car", id))
|
||||
}
|
@ -2,44 +2,34 @@ package retrievaladapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
specstorage "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("retrievaladapter")
|
||||
|
||||
type retrievalProviderNode struct {
|
||||
maddr address.Address
|
||||
secb sectorblocks.SectorBuilder
|
||||
pp sectorstorage.PieceProvider
|
||||
full v1api.FullNode
|
||||
full v1api.FullNode
|
||||
}
|
||||
|
||||
var _ retrievalmarket.RetrievalProviderNode = (*retrievalProviderNode)(nil)
|
||||
|
||||
// NewRetrievalProviderNode returns a new node adapter for a retrieval provider that talks to the
|
||||
// Lotus Node
|
||||
func NewRetrievalProviderNode(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.RetrievalProviderNode {
|
||||
return &retrievalProviderNode{address.Address(maddr), secb, pp, full}
|
||||
func NewRetrievalProviderNode(full v1api.FullNode) retrievalmarket.RetrievalProviderNode {
|
||||
return &retrievalProviderNode{full: full}
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) {
|
||||
@ -52,42 +42,6 @@ func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, min
|
||||
return mi.Worker, err
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
|
||||
log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length)
|
||||
si, err := rpn.sectorsStatus(ctx, sectorID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(rpn.maddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref := specstorage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SealProof,
|
||||
}
|
||||
|
||||
var commD cid.Cid
|
||||
if si.CommD != nil {
|
||||
commD = *si.CommD
|
||||
}
|
||||
|
||||
// Get a reader for the piece, unsealing the piece if necessary
|
||||
log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid)
|
||||
r, unsealed, err := rpn.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err)
|
||||
}
|
||||
_ = unsealed // todo: use
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paych.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) {
|
||||
// TODO: respect the provided TipSetToken (a serialized TipSetKey) when
|
||||
// querying the chain
|
||||
@ -104,29 +58,6 @@ func (rpn *retrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipS
|
||||
return head.Key().Bytes(), head.Height(), nil
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) {
|
||||
si, err := rpn.sectorsStatus(ctx, sectorID, true)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to get sector info: %w", err)
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(rpn.maddr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ref := specstorage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SealProof,
|
||||
}
|
||||
|
||||
log.Debugf("will call IsUnsealed now sector=%+v, offset=%d, size=%d", sectorID, offset, length)
|
||||
return rpn.pp.IsUnsealed(ctx, ref, storiface.UnpaddedByteIndex(offset), length)
|
||||
}
|
||||
|
||||
// GetRetrievalPricingInput takes a set of candidate storage deals that can serve a retrieval request,
|
||||
// and returns an minimally populated PricingInput. This PricingInput should be enhanced
|
||||
// with more data, and passed to the pricing function to determine the final quoted price.
|
||||
@ -175,37 +106,3 @@ func (rpn *retrievalProviderNode) GetRetrievalPricingInput(ctx context.Context,
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) sectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
|
||||
sInfo, err := rpn.secb.SectorsStatus(ctx, sid, false)
|
||||
if err != nil {
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
|
||||
if !showOnChainInfo {
|
||||
return sInfo, nil
|
||||
}
|
||||
|
||||
onChainInfo, err := rpn.full.StateSectorGetInfo(ctx, rpn.maddr, sid, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return sInfo, err
|
||||
}
|
||||
if onChainInfo == nil {
|
||||
return sInfo, nil
|
||||
}
|
||||
sInfo.SealProof = onChainInfo.SealProof
|
||||
sInfo.Activation = onChainInfo.Activation
|
||||
sInfo.Expiration = onChainInfo.Expiration
|
||||
sInfo.DealWeight = onChainInfo.DealWeight
|
||||
sInfo.VerifiedDealWeight = onChainInfo.VerifiedDealWeight
|
||||
sInfo.InitialPledge = onChainInfo.InitialPledge
|
||||
|
||||
ex, err := rpn.full.StateSectorExpiration(ctx, rpn.maddr, sid, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return sInfo, nil
|
||||
}
|
||||
sInfo.OnTime = ex.OnTime
|
||||
sInfo.Early = ex.Early
|
||||
|
||||
return sInfo, nil
|
||||
}
|
||||
|
132
markets/sectoraccessor/sectoraccessor.go
Normal file
132
markets/sectoraccessor/sectoraccessor.go
Normal file
@ -0,0 +1,132 @@
|
||||
package sectoraccessor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
specstorage "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("sectoraccessor")
|
||||
|
||||
type sectorAccessor struct {
|
||||
maddr address.Address
|
||||
secb sectorblocks.SectorBuilder
|
||||
pp sectorstorage.PieceProvider
|
||||
full v1api.FullNode
|
||||
}
|
||||
|
||||
var _ retrievalmarket.SectorAccessor = (*sectorAccessor)(nil)
|
||||
|
||||
func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.SectorAccessor {
|
||||
return §orAccessor{address.Address(maddr), secb, pp, full}
|
||||
}
|
||||
|
||||
func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
|
||||
log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length)
|
||||
si, err := sa.sectorsStatus(ctx, sectorID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(sa.maddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref := specstorage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SealProof,
|
||||
}
|
||||
|
||||
var commD cid.Cid
|
||||
if si.CommD != nil {
|
||||
commD = *si.CommD
|
||||
}
|
||||
|
||||
// Get a reader for the piece, unsealing the piece if necessary
|
||||
log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid)
|
||||
r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err)
|
||||
}
|
||||
_ = unsealed // todo: use
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (sa *sectorAccessor) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) {
|
||||
si, err := sa.sectorsStatus(ctx, sectorID, true)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to get sector info: %w", err)
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(sa.maddr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ref := specstorage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SealProof,
|
||||
}
|
||||
|
||||
log.Debugf("will call IsUnsealed now sector=%+v, offset=%d, size=%d", sectorID, offset, length)
|
||||
return sa.pp.IsUnsealed(ctx, ref, storiface.UnpaddedByteIndex(offset), length)
|
||||
}
|
||||
|
||||
func (sa *sectorAccessor) sectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
|
||||
sInfo, err := sa.secb.SectorsStatus(ctx, sid, false)
|
||||
if err != nil {
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
|
||||
if !showOnChainInfo {
|
||||
return sInfo, nil
|
||||
}
|
||||
|
||||
onChainInfo, err := sa.full.StateSectorGetInfo(ctx, sa.maddr, sid, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return sInfo, err
|
||||
}
|
||||
if onChainInfo == nil {
|
||||
return sInfo, nil
|
||||
}
|
||||
sInfo.SealProof = onChainInfo.SealProof
|
||||
sInfo.Activation = onChainInfo.Activation
|
||||
sInfo.Expiration = onChainInfo.Expiration
|
||||
sInfo.DealWeight = onChainInfo.DealWeight
|
||||
sInfo.VerifiedDealWeight = onChainInfo.VerifiedDealWeight
|
||||
sInfo.InitialPledge = onChainInfo.InitialPledge
|
||||
|
||||
ex, err := sa.full.StateSectorExpiration(ctx, sa.maddr, sid, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return sInfo, nil
|
||||
}
|
||||
sInfo.OnTime = ex.OnTime
|
||||
sInfo.Early = ex.Early
|
||||
|
||||
return sInfo, nil
|
||||
}
|
101
markets/storageadapter/client_blockstore.go
Normal file
101
markets/storageadapter/client_blockstore.go
Normal file
@ -0,0 +1,101 @@
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/stores"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
// ProxyBlockstoreAccessor is an accessor that returns a fixed blockstore.
|
||||
// To be used in combination with IPFS integration.
|
||||
type ProxyBlockstoreAccessor struct {
|
||||
Blockstore blockstore.Blockstore
|
||||
}
|
||||
|
||||
var _ storagemarket.BlockstoreAccessor = (*ProxyBlockstoreAccessor)(nil)
|
||||
|
||||
func NewFixedBlockstoreAccessor(bs blockstore.Blockstore) storagemarket.BlockstoreAccessor {
|
||||
return &ProxyBlockstoreAccessor{Blockstore: bs}
|
||||
}
|
||||
|
||||
func (p *ProxyBlockstoreAccessor) Get(cid storagemarket.PayloadCID) (blockstore.Blockstore, error) {
|
||||
return p.Blockstore, nil
|
||||
}
|
||||
|
||||
func (p *ProxyBlockstoreAccessor) Done(cid storagemarket.PayloadCID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImportsBlockstoreAccessor is a blockstore accessor backed by the
|
||||
// imports.Manager.
|
||||
type ImportsBlockstoreAccessor struct {
|
||||
m *imports.Manager
|
||||
lk sync.Mutex
|
||||
open map[cid.Cid]struct {
|
||||
st stores.ClosableBlockstore
|
||||
refs int
|
||||
}
|
||||
}
|
||||
|
||||
var _ storagemarket.BlockstoreAccessor = (*ImportsBlockstoreAccessor)(nil)
|
||||
|
||||
func NewImportsBlockstoreAccessor(importmgr *imports.Manager) *ImportsBlockstoreAccessor {
|
||||
return &ImportsBlockstoreAccessor{
|
||||
m: importmgr,
|
||||
open: make(map[cid.Cid]struct {
|
||||
st stores.ClosableBlockstore
|
||||
refs int
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ImportsBlockstoreAccessor) Get(payloadCID storagemarket.PayloadCID) (blockstore.Blockstore, error) {
|
||||
s.lk.Lock()
|
||||
defer s.lk.Unlock()
|
||||
|
||||
e, ok := s.open[payloadCID]
|
||||
if ok {
|
||||
e.refs++
|
||||
return e.st, nil
|
||||
}
|
||||
|
||||
path, err := s.m.CARPathFor(payloadCID)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get client blockstore for root %s: %w", payloadCID, err)
|
||||
}
|
||||
if path == "" {
|
||||
return nil, xerrors.Errorf("no client blockstore for root %s", payloadCID)
|
||||
}
|
||||
ret, err := stores.ReadOnlyFilestore(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.st = ret
|
||||
s.open[payloadCID] = e
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (s *ImportsBlockstoreAccessor) Done(payloadCID storagemarket.PayloadCID) error {
|
||||
s.lk.Lock()
|
||||
defer s.lk.Unlock()
|
||||
|
||||
e, ok := s.open[payloadCID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
e.refs--
|
||||
if e.refs == 0 {
|
||||
if err := e.st.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
delete(s.open, payloadCID)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -32,7 +32,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/markets/utils"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
)
|
||||
@ -45,9 +44,6 @@ var log = logging.Logger("storageadapter")
|
||||
type ProviderNodeAdapter struct {
|
||||
v1api.FullNode
|
||||
|
||||
// this goes away with the data transfer module
|
||||
dag dtypes.StagingDAG
|
||||
|
||||
secb *sectorblocks.SectorBlocks
|
||||
ev *events.Events
|
||||
|
||||
@ -59,15 +55,14 @@ type ProviderNodeAdapter struct {
|
||||
scMgr *SectorCommittedManager
|
||||
}
|
||||
|
||||
func NewProviderNodeAdapter(fc *config.MinerFeeConfig, dc *config.DealmakingConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) storagemarket.StorageProviderNode {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) storagemarket.StorageProviderNode {
|
||||
func NewProviderNodeAdapter(fc *config.MinerFeeConfig, dc *config.DealmakingConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) storagemarket.StorageProviderNode {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) storagemarket.StorageProviderNode {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
|
||||
ev := events.NewEvents(ctx, full)
|
||||
na := &ProviderNodeAdapter{
|
||||
FullNode: full,
|
||||
|
||||
dag: dag,
|
||||
secb: secb,
|
||||
ev: ev,
|
||||
dealPublisher: dealPublisher,
|
||||
|
@ -6,9 +6,10 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/impl/net"
|
||||
metricsi "github.com/ipfs/go-metrics-interface"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/impl/net"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/system"
|
||||
|
||||
@ -66,6 +67,7 @@ var (
|
||||
AutoNATSvcKey = special{10} // Libp2p option
|
||||
BandwidthReporterKey = special{11} // Libp2p option
|
||||
ConnGaterKey = special{12} // libp2p option
|
||||
DAGStoreKey = special{13} // constructor returns multiple values
|
||||
)
|
||||
|
||||
type invoke int
|
||||
@ -332,10 +334,9 @@ func Repo(r repo.Repo) Option {
|
||||
),
|
||||
|
||||
Override(new(dtypes.ClientImportMgr), modules.ClientImportMgr),
|
||||
Override(new(dtypes.ClientMultiDstore), modules.ClientMultiDatastore),
|
||||
|
||||
Override(new(dtypes.ClientBlockstore), modules.ClientBlockstore),
|
||||
Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientRetrievalStoreManager),
|
||||
|
||||
Override(new(ci.PrivKey), lp2p.PrivKey),
|
||||
Override(new(ci.PubKey), ci.PrivKey.GetPublic),
|
||||
Override(new(peer.ID), peer.IDFromPublicKey),
|
||||
|
@ -112,12 +112,14 @@ var ChainNode = Options(
|
||||
|
||||
// Markets (retrieval)
|
||||
Override(new(discovery.PeerResolver), modules.RetrievalResolver),
|
||||
Override(new(retrievalmarket.BlockstoreAccessor), modules.RetrievalBlockstoreAccessor),
|
||||
Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient),
|
||||
Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer),
|
||||
|
||||
// Markets (storage)
|
||||
Override(new(*market.FundManager), market.NewFundManager),
|
||||
Override(new(dtypes.ClientDatastore), modules.NewClientDatastore),
|
||||
Override(new(storagemarket.BlockstoreAccessor), modules.StorageBlockstoreAccessor),
|
||||
Override(new(storagemarket.StorageClient), modules.StorageClient),
|
||||
Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter),
|
||||
Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds),
|
||||
@ -167,8 +169,9 @@ func ConfigFullNode(c interface{}) Option {
|
||||
|
||||
If(cfg.Client.UseIpfs,
|
||||
Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr, cfg.Client.IpfsOnlineMode)),
|
||||
Override(new(storagemarket.BlockstoreAccessor), modules.IpfsStorageBlockstoreAccessor),
|
||||
If(cfg.Client.IpfsUseForRetrieval,
|
||||
Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager),
|
||||
Override(new(retrievalmarket.BlockstoreAccessor), modules.IpfsRetrievalBlockstoreAccessor),
|
||||
),
|
||||
),
|
||||
Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfers)),
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
@ -23,7 +22,10 @@ import (
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/markets/dagstore"
|
||||
"github.com/filecoin-project/lotus/markets/dealfilter"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
"github.com/filecoin-project/lotus/markets/sectoraccessor"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
@ -130,9 +132,7 @@ func ConfigStorageMiner(c interface{}) Option {
|
||||
|
||||
If(cfg.Subsystems.EnableMarkets,
|
||||
// Markets
|
||||
Override(new(dtypes.StagingMultiDstore), modules.StagingMultiDatastore),
|
||||
Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore),
|
||||
Override(new(dtypes.StagingDAG), modules.StagingDAG),
|
||||
Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfers)),
|
||||
Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore),
|
||||
Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks),
|
||||
@ -147,7 +147,12 @@ func ConfigStorageMiner(c interface{}) Option {
|
||||
})),
|
||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)),
|
||||
|
||||
// DAG Store
|
||||
Override(new(dagstore.MinerAPI), modules.NewMinerAPI),
|
||||
Override(DAGStoreKey, modules.DAGStore),
|
||||
|
||||
// Markets (retrieval)
|
||||
Override(new(retrievalmarket.SectorAccessor), sectoraccessor.NewSectorAccessor),
|
||||
Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode),
|
||||
Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork),
|
||||
Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider),
|
||||
|
@ -191,6 +191,12 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
TerminateControl: []string{},
|
||||
DealPublishControl: []string{},
|
||||
},
|
||||
|
||||
DAGStore: DAGStoreConfig{
|
||||
MaxConcurrentIndex: 5,
|
||||
MaxConcurrencyStorageCalls: 100,
|
||||
GCInterval: Duration(1 * time.Minute),
|
||||
},
|
||||
}
|
||||
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
|
||||
cfg.Common.API.RemoteListenAddress = "127.0.0.1:2345"
|
||||
|
@ -125,6 +125,55 @@ and storage providers`,
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
"DAGStoreConfig": []DocField{
|
||||
{
|
||||
Name: "RootDir",
|
||||
Type: "string",
|
||||
|
||||
Comment: `Path to the dagstore root directory. This directory contains three
|
||||
subdirectories, which can be symlinked to alternative locations if
|
||||
need be:
|
||||
- ./transients: caches unsealed deals that have been fetched from the
|
||||
storage subsystem for serving retrievals.
|
||||
- ./indices: stores shard indices.
|
||||
- ./datastore: holds the KV store tracking the state of every shard
|
||||
known to the DAG store.
|
||||
Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or
|
||||
<LOTUS_MINER_PATH>/dagstore (monolith deployment)`,
|
||||
},
|
||||
{
|
||||
Name: "MaxConcurrentIndex",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum amount of indexing jobs that can run simultaneously.
|
||||
0 means unlimited.
|
||||
Default value: 5.`,
|
||||
},
|
||||
{
|
||||
Name: "MaxConcurrentReadyFetches",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum amount of unsealed deals that can be fetched simultaneously
|
||||
from the storage subsystem. 0 means unlimited.
|
||||
Default value: 0 (unlimited).`,
|
||||
},
|
||||
{
|
||||
Name: "MaxConcurrencyStorageCalls",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum number of simultaneous inflight API calls to the storage
|
||||
subsystem.
|
||||
Default value: 100.`,
|
||||
},
|
||||
{
|
||||
Name: "GCInterval",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `The time between calls to periodic dagstore GC, in time.Duration string
|
||||
representation, e.g. 1m, 5m, 1h.
|
||||
Default value: 1 minute.`,
|
||||
},
|
||||
},
|
||||
"DealmakingConfig": []DocField{
|
||||
{
|
||||
Name: "ConsiderOnlineStorageDeals",
|
||||
@ -741,6 +790,12 @@ Default is 20 (about once a week).`,
|
||||
Name: "Addresses",
|
||||
Type: "MinerAddressConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "DAGStore",
|
||||
Type: "DAGStoreConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
|
@ -49,6 +49,41 @@ type StorageMiner struct {
|
||||
Storage sectorstorage.SealerConfig
|
||||
Fees MinerFeeConfig
|
||||
Addresses MinerAddressConfig
|
||||
DAGStore DAGStoreConfig
|
||||
}
|
||||
|
||||
type DAGStoreConfig struct {
|
||||
// Path to the dagstore root directory. This directory contains three
|
||||
// subdirectories, which can be symlinked to alternative locations if
|
||||
// need be:
|
||||
// - ./transients: caches unsealed deals that have been fetched from the
|
||||
// storage subsystem for serving retrievals.
|
||||
// - ./indices: stores shard indices.
|
||||
// - ./datastore: holds the KV store tracking the state of every shard
|
||||
// known to the DAG store.
|
||||
// Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or
|
||||
// <LOTUS_MINER_PATH>/dagstore (monolith deployment)
|
||||
RootDir string
|
||||
|
||||
// The maximum amount of indexing jobs that can run simultaneously.
|
||||
// 0 means unlimited.
|
||||
// Default value: 5.
|
||||
MaxConcurrentIndex int
|
||||
|
||||
// The maximum amount of unsealed deals that can be fetched simultaneously
|
||||
// from the storage subsystem. 0 means unlimited.
|
||||
// Default value: 0 (unlimited).
|
||||
MaxConcurrentReadyFetches int
|
||||
|
||||
// The maximum number of simultaneous inflight API calls to the storage
|
||||
// subsystem.
|
||||
// Default value: 100.
|
||||
MaxConcurrencyStorageCalls int
|
||||
|
||||
// The time between calls to periodic dagstore GC, in time.Duration string
|
||||
// representation, e.g. 1m, 5m, 1h.
|
||||
// Default value: 1 minute.
|
||||
GCInterval Duration
|
||||
}
|
||||
|
||||
type MinerSubsystemConfig struct {
|
||||
|
@ -5,6 +5,7 @@ package hello
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
@ -15,6 +16,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
var lengthBufHelloMessage = []byte{132}
|
||||
|
91
node/impl/client/car_helpers.go
Normal file
91
node/impl/client/car_helpers.go
Normal file
@ -0,0 +1,91 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/ipld/go-car/util"
|
||||
"github.com/multiformats/go-varint"
|
||||
)
|
||||
|
||||
// —————————————————————————————————————————————————————————
|
||||
//
|
||||
// This code is temporary, and should be deleted when
|
||||
// https://github.com/ipld/go-car/issues/196 is resolved.
|
||||
//
|
||||
// —————————————————————————————————————————————————————————
|
||||
|
||||
func init() {
|
||||
cbor.RegisterCborType(CarHeader{})
|
||||
}
|
||||
|
||||
type CarHeader struct {
|
||||
Roots []cid.Cid
|
||||
Version uint64
|
||||
}
|
||||
|
||||
func readHeader(r io.Reader) (*CarHeader, error) {
|
||||
hb, err := ldRead(r, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ch CarHeader
|
||||
if err := cbor.DecodeInto(hb, &ch); err != nil {
|
||||
return nil, fmt.Errorf("invalid header: %v", err)
|
||||
}
|
||||
|
||||
return &ch, nil
|
||||
}
|
||||
|
||||
func writeHeader(h *CarHeader, w io.Writer) error {
|
||||
hb, err := cbor.DumpObject(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return util.LdWrite(w, hb)
|
||||
}
|
||||
|
||||
func ldRead(r io.Reader, zeroLenAsEOF bool) ([]byte, error) {
|
||||
l, err := varint.ReadUvarint(toByteReader(r))
|
||||
if err != nil {
|
||||
// If the length of bytes read is non-zero when the error is EOF then signal an unclean EOF.
|
||||
if l > 0 && err == io.EOF {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, err
|
||||
} else if l == 0 && zeroLenAsEOF {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
buf := make([]byte, l)
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
type readerPlusByte struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (rb readerPlusByte) ReadByte() (byte, error) {
|
||||
return readByte(rb)
|
||||
}
|
||||
|
||||
func readByte(r io.Reader) (byte, error) {
|
||||
var p [1]byte
|
||||
_, err := io.ReadFull(r, p[:])
|
||||
return p[0], err
|
||||
}
|
||||
|
||||
func toByteReader(r io.Reader) io.ByteReader {
|
||||
if br, ok := r.(io.ByteReader); ok {
|
||||
return br
|
||||
}
|
||||
return &readerPlusByte{r}
|
||||
}
|
@ -2,6 +2,7 @@ package client
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -9,8 +10,11 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
"github.com/ipld/go-car"
|
||||
carv2 "github.com/ipld/go-car/v2"
|
||||
"github.com/ipld/go-car/v2/blockstore"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-padreader"
|
||||
@ -18,16 +22,9 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-cidutil"
|
||||
chunker "github.com/ipfs/go-ipfs-chunker"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
"github.com/ipfs/go-unixfs/importer/balanced"
|
||||
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
|
||||
"github.com/ipld/go-car"
|
||||
basicnode "github.com/ipld/go-ipld-prime/node/basic"
|
||||
"github.com/ipld/go-ipld-prime/traversal/selector"
|
||||
"github.com/ipld/go-ipld-prime/traversal/selector/builder"
|
||||
@ -42,34 +39,41 @@ import (
|
||||
"github.com/filecoin-project/go-commp-utils/ffiwrapper"
|
||||
"github.com/filecoin-project/go-commp-utils/writer"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/discovery"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
rm "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/network"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-fil-markets/stores"
|
||||
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
|
||||
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/markets/utils"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
"github.com/filecoin-project/lotus/node/impl/paych"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/importmgr"
|
||||
"github.com/filecoin-project/lotus/node/repo/retrievalstoremgr"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)
|
||||
|
||||
// 8 days ~= SealDuration + PreCommit + MaxProveCommitDuration + 8 hour buffer
|
||||
const dealStartBufferHours uint64 = 8 * 24
|
||||
const DefaultDAGStoreDir = "dagstore"
|
||||
|
||||
type API struct {
|
||||
fx.In
|
||||
@ -84,13 +88,15 @@ type API struct {
|
||||
Retrieval rm.RetrievalClient
|
||||
Chain *store.ChainStore
|
||||
|
||||
Imports dtypes.ClientImportMgr
|
||||
Mds dtypes.ClientMultiDstore
|
||||
// accessors for imports and retrievals.
|
||||
Imports dtypes.ClientImportMgr
|
||||
StorageBlockstoreAccessor storagemarket.BlockstoreAccessor
|
||||
RtvlBlockstoreAccessor retrievalmarket.BlockstoreAccessor
|
||||
|
||||
CombinedBstore dtypes.ClientBlockstore // TODO: try to remove
|
||||
RetrievalStoreMgr dtypes.ClientRetrievalStoreManager
|
||||
DataTransfer dtypes.ClientDataTransfer
|
||||
Host host.Host
|
||||
DataTransfer dtypes.ClientDataTransfer
|
||||
Host host.Host
|
||||
|
||||
Repo repo.LockedRepo
|
||||
}
|
||||
|
||||
func calcDealExpiration(minDuration uint64, md *dline.Info, startEpoch abi.ChainEpoch) abi.ChainEpoch {
|
||||
@ -107,7 +113,8 @@ func calcDealExpiration(minDuration uint64, md *dline.Info, startEpoch abi.Chain
|
||||
return exp
|
||||
}
|
||||
|
||||
func (a *API) imgr() *importmgr.Mgr {
|
||||
// importManager converts the injected type to the required type.
|
||||
func (a *API) importManager() *imports.Manager {
|
||||
return a.Imports
|
||||
}
|
||||
|
||||
@ -120,7 +127,6 @@ func (a *API) ClientStatelessDeal(ctx context.Context, params *api.StartDealPara
|
||||
}
|
||||
|
||||
func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isStateless bool) (*cid.Cid, error) {
|
||||
var storeID *multistore.StoreID
|
||||
if isStateless {
|
||||
if params.Data.TransferType != storagemarket.TTManual {
|
||||
return nil, xerrors.Errorf("invalid transfer type %s for stateless storage deal", params.Data.TransferType)
|
||||
@ -129,24 +135,16 @@ func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isSt
|
||||
return nil, xerrors.New("stateless storage deals can only be initiated with storage price of 0")
|
||||
}
|
||||
} else if params.Data.TransferType == storagemarket.TTGraphsync {
|
||||
importIDs := a.imgr().List()
|
||||
for _, importID := range importIDs {
|
||||
info, err := a.imgr().Info(importID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if info.Labels[importmgr.LRootCid] == "" {
|
||||
continue
|
||||
}
|
||||
c, err := cid.Parse(info.Labels[importmgr.LRootCid])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if c.Equals(params.Data.Root) {
|
||||
storeID = &importID //nolint
|
||||
break
|
||||
}
|
||||
bs, onDone, err := a.dealBlockstore(params.Data.Root)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to find blockstore for root CID: %w", err)
|
||||
}
|
||||
if has, err := bs.Has(params.Data.Root); err != nil {
|
||||
return nil, xerrors.Errorf("failed to query blockstore for root CID: %w", err)
|
||||
} else if !has {
|
||||
return nil, xerrors.Errorf("failed to find root CID in blockstore: %w", err)
|
||||
}
|
||||
onDone()
|
||||
}
|
||||
|
||||
walletKey, err := a.StateAccountKey(ctx, params.Wallet, types.EmptyTSK)
|
||||
@ -212,7 +210,6 @@ func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isSt
|
||||
Rt: st,
|
||||
FastRetrieval: params.FastRetrieval,
|
||||
VerifiedDeal: params.VerifiedDeal,
|
||||
StoreID: storeID,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@ -411,17 +408,12 @@ func (a *API) newDealInfoWithTransfer(transferCh *api.DataTransferChannel, v sto
|
||||
}
|
||||
}
|
||||
|
||||
func (a *API) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) {
|
||||
// TODO: check if we have the ENTIRE dag
|
||||
|
||||
offExch := merkledag.NewDAGService(blockservice.New(a.Imports.Blockstore, offline.Exchange(a.Imports.Blockstore)))
|
||||
_, err := offExch.Get(ctx, root)
|
||||
if err == ipld.ErrNotFound {
|
||||
return false, nil
|
||||
}
|
||||
func (a *API) ClientHasLocal(_ context.Context, root cid.Cid) (bool, error) {
|
||||
_, onDone, err := a.dealBlockstore(root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
onDone()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -495,88 +487,219 @@ func (a *API) makeRetrievalQuery(ctx context.Context, rp rm.RetrievalPeer, paylo
|
||||
}
|
||||
}
|
||||
|
||||
func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) {
|
||||
id, st, err := a.imgr().NewStore()
|
||||
func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (res *api.ImportRes, err error) {
|
||||
var (
|
||||
imgr = a.importManager()
|
||||
id imports.ID
|
||||
root cid.Cid
|
||||
carPath string
|
||||
)
|
||||
|
||||
id, err = imgr.CreateImport()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := a.imgr().AddLabel(id, importmgr.LSource, "import"); err != nil {
|
||||
return nil, err
|
||||
return nil, xerrors.Errorf("failed to create import: %w", err)
|
||||
}
|
||||
|
||||
if err := a.imgr().AddLabel(id, importmgr.LFileName, ref.Path); err != nil {
|
||||
return nil, err
|
||||
if ref.IsCAR {
|
||||
// user gave us a CAR fil, use it as-is
|
||||
// validate that it's either a carv1 or carv2, and has one root.
|
||||
f, err := os.Open(ref.Path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to open CAR file: %w", err)
|
||||
}
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
hd, _, err := car.ReadHeader(bufio.NewReader(f))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to read CAR header: %w", err)
|
||||
}
|
||||
if len(hd.Roots) != 1 {
|
||||
return nil, xerrors.New("car file can have one and only one header")
|
||||
}
|
||||
if hd.Version != 1 && hd.Version != 2 {
|
||||
return nil, xerrors.Errorf("car version must be 1 or 2, is %d", hd.Version)
|
||||
}
|
||||
|
||||
carPath = ref.Path
|
||||
root = hd.Roots[0]
|
||||
} else {
|
||||
carPath, err = imgr.AllocateCAR(id)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to create car path for import: %w", err)
|
||||
}
|
||||
|
||||
// remove the import if something went wrong.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = os.Remove(carPath)
|
||||
_ = imgr.Remove(id)
|
||||
}
|
||||
}()
|
||||
|
||||
// perform the unixfs chunking.
|
||||
root, err = a.createUnixFSFilestore(ctx, ref.Path, carPath)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to import file using unixfs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
nd, err := a.clientImport(ctx, ref, st)
|
||||
if err != nil {
|
||||
if err = imgr.AddLabel(id, imports.LSource, "import"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := a.imgr().AddLabel(id, importmgr.LRootCid, nd.String()); err != nil {
|
||||
if err = imgr.AddLabel(id, imports.LFileName, ref.Path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = imgr.AddLabel(id, imports.LCARPath, carPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = imgr.AddLabel(id, imports.LRootCid, root.String()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &api.ImportRes{
|
||||
Root: nd,
|
||||
Root: root,
|
||||
ImportID: id,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *API) ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error {
|
||||
return a.imgr().Remove(importID)
|
||||
func (a *API) ClientRemoveImport(ctx context.Context, id imports.ID) error {
|
||||
info, err := a.importManager().Info(id)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get import metadata: %w", err)
|
||||
}
|
||||
|
||||
owner := info.Labels[imports.LCAROwner]
|
||||
path := info.Labels[imports.LCARPath]
|
||||
|
||||
// CARv2 file was not provided by the user, delete it.
|
||||
if path != "" && owner == imports.CAROwnerImportMgr {
|
||||
_ = os.Remove(path)
|
||||
}
|
||||
|
||||
return a.importManager().Remove(id)
|
||||
}
|
||||
|
||||
func (a *API) ClientImportLocal(ctx context.Context, f io.Reader) (cid.Cid, error) {
|
||||
file := files.NewReaderFile(f)
|
||||
// ClientImportLocal imports a standard file into this node as a UnixFS payload,
|
||||
// storing it in a CARv2 file. Note that this method is NOT integrated with the
|
||||
// IPFS blockstore. That is, if client-side IPFS integration is enabled, this
|
||||
// method won't import the file into that
|
||||
func (a *API) ClientImportLocal(ctx context.Context, r io.Reader) (cid.Cid, error) {
|
||||
file := files.NewReaderFile(r)
|
||||
|
||||
id, st, err := a.imgr().NewStore()
|
||||
// write payload to temp file
|
||||
id, err := a.importManager().CreateImport()
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
if err := a.imgr().AddLabel(id, "source", "import-local"); err != nil {
|
||||
return cid.Cid{}, err
|
||||
if err := a.importManager().AddLabel(id, imports.LSource, "import-local"); err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
bufferedDS := ipld.NewBufferedDAG(ctx, st.DAG)
|
||||
|
||||
prefix, err := merkledag.PrefixForCidVersion(1)
|
||||
path, err := a.importManager().AllocateCAR(id)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
prefix.MhType = DefaultHashFunction
|
||||
|
||||
params := ihelper.DagBuilderParams{
|
||||
Maxlinks: build.UnixfsLinksPerLevel,
|
||||
RawLeaves: true,
|
||||
CidBuilder: cidutil.InlineBuilder{
|
||||
Builder: prefix,
|
||||
Limit: 126,
|
||||
},
|
||||
Dagserv: bufferedDS,
|
||||
}
|
||||
// writing a carv2 requires knowing the root ahead of time, which makes
|
||||
// streaming cases impossible.
|
||||
// https://github.com/ipld/go-car/issues/196
|
||||
// we work around this limitation by informing a placeholder root CID of the
|
||||
// same length as our unixfs chunking strategy will generate.
|
||||
// once the DAG is formed and the root is calculated, we overwrite the
|
||||
// inner carv1 header with the final root.
|
||||
|
||||
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
|
||||
b, err := unixFSCidBuilder()
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
nd, err := balanced.Layout(db)
|
||||
|
||||
// placeholder payload needs to be larger than inline CID threshold; 256
|
||||
// bytes is a safe value.
|
||||
placeholderRoot, err := b.Sum(make([]byte, 256))
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
if err := a.imgr().AddLabel(id, "root", nd.Cid().String()); err != nil {
|
||||
return cid.Cid{}, err
|
||||
return cid.Undef, xerrors.Errorf("failed to calculate placeholder root: %w", err)
|
||||
}
|
||||
|
||||
return nd.Cid(), bufferedDS.Commit()
|
||||
bs, err := blockstore.OpenReadWrite(path, []cid.Cid{placeholderRoot}, blockstore.UseWholeCIDs(true))
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create carv2 read/write blockstore: %w", err)
|
||||
}
|
||||
|
||||
root, err := buildUnixFS(ctx, file, bs, false)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to build unixfs dag: %w", err)
|
||||
}
|
||||
|
||||
err = bs.Finalize()
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to finalize carv2 read/write blockstore: %w", err)
|
||||
}
|
||||
|
||||
// record the root in the import manager.
|
||||
if err := a.importManager().AddLabel(id, imports.LRootCid, root.String()); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to record root CID in import manager: %w", err)
|
||||
}
|
||||
|
||||
// now go ahead and overwrite the root in the carv1 header.
|
||||
reader, err := carv2.OpenReader(path)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create car reader: %w", err)
|
||||
}
|
||||
|
||||
// save the header offset.
|
||||
headerOff := reader.Header.DataOffset
|
||||
|
||||
// read the old header.
|
||||
dr := reader.DataReader()
|
||||
header, err := readHeader(dr)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to read car reader: %w", err)
|
||||
}
|
||||
_ = reader.Close() // close the CAR reader.
|
||||
|
||||
// write the old header into a buffer.
|
||||
var oldBuf bytes.Buffer
|
||||
if err = writeHeader(header, &oldBuf); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to write header into buffer: %w", err)
|
||||
}
|
||||
|
||||
// replace the root.
|
||||
header.Roots = []cid.Cid{root}
|
||||
|
||||
// write the new header into a buffer.
|
||||
var newBuf bytes.Buffer
|
||||
err = writeHeader(header, &newBuf)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to write header into buffer: %w", err)
|
||||
}
|
||||
|
||||
// verify the length matches.
|
||||
if newBuf.Len() != oldBuf.Len() {
|
||||
return cid.Undef, xerrors.Errorf("failed to replace carv1 header; length mismatch (old: %d, new: %d)", oldBuf.Len(), newBuf.Len())
|
||||
}
|
||||
|
||||
// open the file again, seek to the header position, and write.
|
||||
f, err := os.OpenFile(path, os.O_WRONLY, 0755)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to open car: %w", err)
|
||||
}
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
n, err := f.WriteAt(newBuf.Bytes(), int64(headerOff))
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to write new header to car (bytes written: %d): %w", n, err)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func (a *API) ClientListImports(ctx context.Context) ([]api.Import, error) {
|
||||
importIDs := a.imgr().List()
|
||||
func (a *API) ClientListImports(_ context.Context) ([]api.Import, error) {
|
||||
ids, err := a.importManager().List()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to fetch imports: %w", err)
|
||||
}
|
||||
|
||||
out := make([]api.Import, len(importIDs))
|
||||
for i, id := range importIDs {
|
||||
info, err := a.imgr().Info(id)
|
||||
out := make([]api.Import, len(ids))
|
||||
for i, id := range ids {
|
||||
info, err := a.importManager().Info(id)
|
||||
if err != nil {
|
||||
out[i] = api.Import{
|
||||
Key: id,
|
||||
@ -587,12 +710,13 @@ func (a *API) ClientListImports(ctx context.Context) ([]api.Import, error) {
|
||||
|
||||
ai := api.Import{
|
||||
Key: id,
|
||||
Source: info.Labels[importmgr.LSource],
|
||||
FilePath: info.Labels[importmgr.LFileName],
|
||||
Source: info.Labels[imports.LSource],
|
||||
FilePath: info.Labels[imports.LFileName],
|
||||
CARPath: info.Labels[imports.LCARPath],
|
||||
}
|
||||
|
||||
if info.Labels[importmgr.LRootCid] != "" {
|
||||
c, err := cid.Parse(info.Labels[importmgr.LRootCid])
|
||||
if info.Labels[imports.LRootCid] != "" {
|
||||
c, err := cid.Parse(info.Labels[imports.LRootCid])
|
||||
if err != nil {
|
||||
ai.Err = err.Error()
|
||||
} else {
|
||||
@ -660,7 +784,7 @@ type retrievalSubscribeEvent struct {
|
||||
state rm.ClientDealState
|
||||
}
|
||||
|
||||
func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents chan retrievalSubscribeEvent, events chan marketevents.RetrievalEvent) error {
|
||||
func consumeAllEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents chan retrievalSubscribeEvent, events chan marketevents.RetrievalEvent) error {
|
||||
for {
|
||||
var subscribeEvent retrievalSubscribeEvent
|
||||
select {
|
||||
@ -711,9 +835,28 @@ func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
|
||||
}
|
||||
}
|
||||
|
||||
var store retrievalstoremgr.RetrievalStore
|
||||
// summary:
|
||||
// 1. if we're retrieving from an import, FromLocalCAR will be informed.
|
||||
// Open as a Filestore and populate the target CAR or UnixFS export from it.
|
||||
// (cannot use ExtractV1File because user wants a dense CAR, not a ref CAR/filestore)
|
||||
// 2. if we're using an IPFS blockstore for retrieval, retrieve into it,
|
||||
// then extract the CAR or UnixFS export from it.
|
||||
// 3. if we have to retrieve, perform a CARv2 retrieval, then extract
|
||||
// the CARv1 (with ExtractV1File) or UnixFS export from it.
|
||||
|
||||
// this indicates we're proxying to IPFS.
|
||||
proxyBss, retrieveIntoIPFS := a.RtvlBlockstoreAccessor.(*retrievaladapter.ProxyBlockstoreAccessor)
|
||||
carBss, retrieveIntoCAR := a.RtvlBlockstoreAccessor.(*retrievaladapter.CARBlockstoreAccessor)
|
||||
|
||||
carPath := order.FromLocalCAR
|
||||
if carPath == "" {
|
||||
if !retrieveIntoIPFS && !retrieveIntoCAR {
|
||||
// we actually need to retrieve from the network, but we don't
|
||||
// recognize the blockstore accessor.
|
||||
finish(xerrors.Errorf("unsupported retrieval blockstore accessor"))
|
||||
return
|
||||
}
|
||||
|
||||
if order.LocalStore == nil {
|
||||
if order.MinerPeer == nil || order.MinerPeer.ID == "" {
|
||||
mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK)
|
||||
if err != nil {
|
||||
@ -737,14 +880,6 @@ func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
|
||||
return
|
||||
}
|
||||
|
||||
/*id, st, err := a.imgr().NewStore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.imgr().AddLabel(id, "source", "retrieval"); err != nil {
|
||||
return err
|
||||
}*/
|
||||
|
||||
ppb := types.BigDiv(order.Total, types.NewInt(order.Size))
|
||||
|
||||
params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, shared.AllSelector(), order.Piece, order.UnsealPrice)
|
||||
@ -753,22 +888,12 @@ func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
|
||||
return
|
||||
}
|
||||
|
||||
store, err = a.RetrievalStoreMgr.NewStore()
|
||||
if err != nil {
|
||||
finish(xerrors.Errorf("Error setting up new store: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = a.RetrievalStoreMgr.ReleaseStore(store)
|
||||
}()
|
||||
|
||||
// Subscribe to events before retrieving to avoid losing events.
|
||||
subscribeEvents := make(chan retrievalSubscribeEvent, 1)
|
||||
subscribeCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) {
|
||||
// We'll check the deal IDs inside readSubscribeEvents.
|
||||
// We'll check the deal IDs inside consumeAllEvents.
|
||||
if state.PayloadCID.Equals(order.Root) {
|
||||
select {
|
||||
case <-subscribeCtx.Done():
|
||||
@ -777,15 +902,17 @@ func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
|
||||
}
|
||||
})
|
||||
|
||||
dealID, err := a.Retrieval.Retrieve(
|
||||
id := a.Retrieval.NextID()
|
||||
id, err = a.Retrieval.Retrieve(
|
||||
ctx,
|
||||
id,
|
||||
order.Root,
|
||||
params,
|
||||
order.Total,
|
||||
*order.MinerPeer,
|
||||
order.Client,
|
||||
order.Miner,
|
||||
store.StoreID())
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
unsubscribe()
|
||||
@ -793,62 +920,82 @@ func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref
|
||||
return
|
||||
}
|
||||
|
||||
err = readSubscribeEvents(ctx, dealID, subscribeEvents, events)
|
||||
err = consumeAllEvents(ctx, id, subscribeEvents, events)
|
||||
|
||||
unsubscribe()
|
||||
if err != nil {
|
||||
finish(xerrors.Errorf("Retrieve: %w", err))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// local retrieval
|
||||
st, err := ((*multistore.MultiStore)(a.Mds)).Get(*order.LocalStore)
|
||||
if err != nil {
|
||||
finish(xerrors.Errorf("Retrieve: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
store = &multiStoreRetrievalStore{
|
||||
storeID: *order.LocalStore,
|
||||
store: st,
|
||||
if retrieveIntoCAR {
|
||||
carPath = carBss.PathFor(id)
|
||||
}
|
||||
}
|
||||
|
||||
// If ref is nil, it only fetches the data into the configured blockstore.
|
||||
if ref == nil {
|
||||
// If ref is nil, it only fetches the data into the configured blockstore
|
||||
// (if fetching from network).
|
||||
finish(nil)
|
||||
return
|
||||
}
|
||||
|
||||
rdag := store.DAGService()
|
||||
|
||||
// Are we outputting a CAR?
|
||||
if ref.IsCAR {
|
||||
f, err := os.OpenFile(ref.Path, os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
finish(err)
|
||||
if retrieveIntoIPFS {
|
||||
// generating a CARv1 from IPFS.
|
||||
f, err := os.OpenFile(ref.Path, os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
finish(err)
|
||||
return
|
||||
}
|
||||
|
||||
bs := proxyBss.Blockstore
|
||||
dags := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
err = car.WriteCar(ctx, dags, []cid.Cid{order.Root}, f)
|
||||
if err != nil {
|
||||
finish(err)
|
||||
return
|
||||
}
|
||||
finish(f.Close())
|
||||
return
|
||||
}
|
||||
err = car.WriteCar(ctx, rdag, []cid.Cid{order.Root}, f)
|
||||
if err != nil {
|
||||
finish(err)
|
||||
return
|
||||
}
|
||||
finish(f.Close())
|
||||
|
||||
// generating a CARv1 from the CARv2 where we stored the retrieval.
|
||||
err := carv2.ExtractV1File(carPath, ref.Path)
|
||||
finish(err)
|
||||
return
|
||||
}
|
||||
|
||||
nd, err := rdag.Get(ctx, order.Root)
|
||||
// we are extracting a UnixFS file.
|
||||
var bs bstore.Blockstore
|
||||
if retrieveIntoIPFS {
|
||||
bs = proxyBss.Blockstore
|
||||
} else {
|
||||
cbs, err := stores.ReadOnlyFilestore(carPath)
|
||||
if err != nil {
|
||||
finish(err)
|
||||
return
|
||||
}
|
||||
defer cbs.Close() //nolint:errcheck
|
||||
bs = cbs
|
||||
}
|
||||
|
||||
bsvc := blockservice.New(bs, offline.Exchange(bs))
|
||||
dag := merkledag.NewDAGService(bsvc)
|
||||
|
||||
nd, err := dag.Get(ctx, order.Root)
|
||||
if err != nil {
|
||||
finish(xerrors.Errorf("ClientRetrieve: %w", err))
|
||||
return
|
||||
}
|
||||
file, err := unixfile.NewUnixfsFile(ctx, rdag, nd)
|
||||
file, err := unixfile.NewUnixfsFile(ctx, dag, nd)
|
||||
if err != nil {
|
||||
finish(xerrors.Errorf("ClientRetrieve: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
finish(files.WriteTo(file, ref.Path))
|
||||
return
|
||||
}
|
||||
|
||||
func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) {
|
||||
@ -928,19 +1075,6 @@ func (a *API) newRetrievalInfo(ctx context.Context, v rm.ClientDealState) api.Re
|
||||
return a.newRetrievalInfoWithTransfer(transferCh, v)
|
||||
}
|
||||
|
||||
type multiStoreRetrievalStore struct {
|
||||
storeID multistore.StoreID
|
||||
store *multistore.Store
|
||||
}
|
||||
|
||||
func (mrs *multiStoreRetrievalStore) StoreID() *multistore.StoreID {
|
||||
return &mrs.storeID
|
||||
}
|
||||
|
||||
func (mrs *multiStoreRetrievalStore) DAGService() ipld.DAGService {
|
||||
return mrs.store.DAG
|
||||
}
|
||||
|
||||
func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) {
|
||||
mi, err := a.StateMinerInfo(ctx, miner, types.EmptyTSK)
|
||||
if err != nil {
|
||||
@ -1009,11 +1143,16 @@ func (w *lenWriter) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (a *API) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) {
|
||||
dag := merkledag.NewDAGService(blockservice.New(a.CombinedBstore, offline.Exchange(a.CombinedBstore)))
|
||||
bs, onDone, err := a.dealBlockstore(root)
|
||||
if err != nil {
|
||||
return api.DataSize{}, err
|
||||
}
|
||||
defer onDone()
|
||||
|
||||
w := lenWriter(0)
|
||||
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
|
||||
err := car.WriteCar(ctx, dag, []cid.Cid{root}, &w)
|
||||
var w lenWriter
|
||||
err = car.WriteCar(ctx, dag, []cid.Cid{root}, &w)
|
||||
if err != nil {
|
||||
return api.DataSize{}, err
|
||||
}
|
||||
@ -1027,12 +1166,17 @@ func (a *API) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, e
|
||||
}
|
||||
|
||||
func (a *API) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) {
|
||||
dag := merkledag.NewDAGService(blockservice.New(a.CombinedBstore, offline.Exchange(a.CombinedBstore)))
|
||||
bs, onDone, err := a.dealBlockstore(root)
|
||||
if err != nil {
|
||||
return api.DataCIDSize{}, err
|
||||
}
|
||||
defer onDone()
|
||||
|
||||
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
w := &writer.Writer{}
|
||||
bw := bufio.NewWriterSize(w, int(writer.CommPBuf))
|
||||
|
||||
err := car.WriteCar(ctx, dag, []cid.Cid{root}, w)
|
||||
err = car.WriteCar(ctx, dag, []cid.Cid{root}, w)
|
||||
if err != nil {
|
||||
return api.DataCIDSize{}, err
|
||||
}
|
||||
@ -1046,113 +1190,49 @@ func (a *API) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCID
|
||||
}
|
||||
|
||||
func (a *API) ClientGenCar(ctx context.Context, ref api.FileRef, outputPath string) error {
|
||||
id, st, err := a.imgr().NewStore()
|
||||
// create a temporary import to represent this job and obtain a staging CAR.
|
||||
id, err := a.importManager().CreateImport()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.imgr().AddLabel(id, "source", "gen-car"); err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to create temporary import: %w", err)
|
||||
}
|
||||
defer a.importManager().Remove(id) //nolint:errcheck
|
||||
|
||||
bufferedDS := ipld.NewBufferedDAG(ctx, st.DAG)
|
||||
c, err := a.clientImport(ctx, ref, st)
|
||||
|
||||
tmp, err := a.importManager().AllocateCAR(id)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to allocate temporary CAR: %w", err)
|
||||
}
|
||||
defer os.Remove(tmp) //nolint:errcheck
|
||||
|
||||
// generate and import the UnixFS DAG into a filestore (positional reference) CAR.
|
||||
root, err := a.createUnixFSFilestore(ctx, ref.Path, tmp)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to import file using unixfs: %w", err)
|
||||
}
|
||||
|
||||
// TODO: does that defer mean to remove the whole blockstore?
|
||||
defer bufferedDS.Remove(ctx, c) //nolint:errcheck
|
||||
// open the positional reference CAR as a filestore.
|
||||
fs, err := stores.ReadOnlyFilestore(tmp)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to open filestore from carv2 in path %s: %w", tmp, err)
|
||||
}
|
||||
defer fs.Close() //nolint:errcheck
|
||||
|
||||
// build a dense deterministic CAR (dense = containing filled leaves)
|
||||
ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
|
||||
|
||||
// entire DAG selector
|
||||
allSelector := ssb.ExploreRecursive(selector.RecursionLimitNone(),
|
||||
allSelector := ssb.ExploreRecursive(
|
||||
selector.RecursionLimitNone(),
|
||||
ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node()
|
||||
|
||||
sc := car.NewSelectiveCar(ctx, fs, []car.Dag{{Root: root, Selector: allSelector}})
|
||||
f, err := os.Create(outputPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sc := car.NewSelectiveCar(ctx, st.Bstore, []car.Dag{{Root: c, Selector: allSelector}})
|
||||
if err = sc.Write(f); err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to write CAR to output file: %w", err)
|
||||
}
|
||||
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func (a *API) clientImport(ctx context.Context, ref api.FileRef, store *multistore.Store) (cid.Cid, error) {
|
||||
f, err := os.Open(ref.Path)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
file, err := files.NewReaderPathFile(ref.Path, f, stat)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
if ref.IsCAR {
|
||||
var st car.Store
|
||||
if store.Fstore == nil {
|
||||
st = store.Bstore
|
||||
} else {
|
||||
st = store.Fstore
|
||||
}
|
||||
result, err := car.LoadCar(st, file)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
if len(result.Roots) != 1 {
|
||||
return cid.Undef, xerrors.New("cannot import car with more than one root")
|
||||
}
|
||||
|
||||
return result.Roots[0], nil
|
||||
}
|
||||
|
||||
bufDs := ipld.NewBufferedDAG(ctx, store.DAG)
|
||||
|
||||
prefix, err := merkledag.PrefixForCidVersion(1)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
prefix.MhType = DefaultHashFunction
|
||||
|
||||
params := ihelper.DagBuilderParams{
|
||||
Maxlinks: build.UnixfsLinksPerLevel,
|
||||
RawLeaves: true,
|
||||
CidBuilder: cidutil.InlineBuilder{
|
||||
Builder: prefix,
|
||||
Limit: 126,
|
||||
},
|
||||
Dagserv: bufDs,
|
||||
NoCopy: true,
|
||||
}
|
||||
|
||||
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
nd, err := balanced.Layout(db)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
if err := bufDs.Commit(); err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return nd.Cid(), nil
|
||||
}
|
||||
|
||||
func (a *API) ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) {
|
||||
inProgressChannels, err := a.DataTransfer.InProgressChannels(ctx)
|
||||
if err != nil {
|
||||
@ -1214,3 +1294,27 @@ func (a *API) ClientGetDealStatus(ctx context.Context, statusCode uint64) (strin
|
||||
|
||||
return ststr, nil
|
||||
}
|
||||
|
||||
// dealBlockstore picks the source blockstore for a storage deal; either the
|
||||
// IPFS blockstore, or an import CARv2 file. It also returns a function that
|
||||
// must be called when done.
|
||||
func (a *API) dealBlockstore(root cid.Cid) (bstore.Blockstore, func(), error) {
|
||||
switch acc := a.StorageBlockstoreAccessor.(type) {
|
||||
case *storageadapter.ImportsBlockstoreAccessor:
|
||||
bs, err := acc.Get(root)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("no import found for root %s: %w", root, err)
|
||||
}
|
||||
|
||||
doneFn := func() {
|
||||
_ = acc.Done(root) //nolint:errcheck
|
||||
}
|
||||
return bs, doneFn, nil
|
||||
|
||||
case *storageadapter.ProxyBlockstoreAccessor:
|
||||
return acc.Blockstore, func() {}, nil
|
||||
|
||||
default:
|
||||
return nil, nil, xerrors.Errorf("unsupported blockstore accessor type: %T", acc)
|
||||
}
|
||||
}
|
||||
|
@ -1 +1,130 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
"github.com/ipld/go-car"
|
||||
carv2 "github.com/ipld/go-car/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
//go:embed testdata/*
|
||||
var testdata embed.FS
|
||||
|
||||
func TestImportLocal(t *testing.T) {
|
||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||
dir := t.TempDir()
|
||||
im := imports.NewManager(ds, dir)
|
||||
ctx := context.Background()
|
||||
|
||||
a := &API{
|
||||
Imports: im,
|
||||
StorageBlockstoreAccessor: storageadapter.NewImportsBlockstoreAccessor(im),
|
||||
}
|
||||
|
||||
b, err := testdata.ReadFile("testdata/payload.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := a.ClientImportLocal(ctx, bytes.NewReader(b))
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, cid.Undef, root)
|
||||
|
||||
list, err := a.ClientListImports(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 1)
|
||||
|
||||
it := list[0]
|
||||
require.Equal(t, root, *it.Root)
|
||||
require.True(t, strings.HasPrefix(it.CARPath, dir))
|
||||
|
||||
local, err := a.ClientHasLocal(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.True(t, local)
|
||||
|
||||
order := api.RetrievalOrder{
|
||||
Root: root,
|
||||
FromLocalCAR: it.CARPath,
|
||||
}
|
||||
|
||||
// retrieve as UnixFS.
|
||||
out1 := filepath.Join(dir, "retrieval1.data") // as unixfs
|
||||
out2 := filepath.Join(dir, "retrieval2.data") // as car
|
||||
err = a.ClientRetrieve(ctx, order, &api.FileRef{
|
||||
Path: out1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
outBytes, err := ioutil.ReadFile(out1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, outBytes)
|
||||
|
||||
err = a.ClientRetrieve(ctx, order, &api.FileRef{
|
||||
Path: out2,
|
||||
IsCAR: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// open the CARv2 being custodied by the import manager
|
||||
orig, err := carv2.OpenReader(it.CARPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// open the CARv1 we just exported
|
||||
exported, err := carv2.OpenReader(out2)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.EqualValues(t, 1, exported.Version)
|
||||
require.EqualValues(t, 2, orig.Version)
|
||||
|
||||
origRoots, err := orig.Roots()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, origRoots, 1)
|
||||
|
||||
exportedRoots, err := exported.Roots()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, exportedRoots, 1)
|
||||
|
||||
require.EqualValues(t, origRoots, exportedRoots)
|
||||
|
||||
// recreate the unixfs dag, and see if it matches the original file byte by byte
|
||||
// import the car into a memory blockstore, then export the unixfs file.
|
||||
bs := blockstore.NewBlockstore(datastore.NewMapDatastore())
|
||||
_, err = car.LoadCar(bs, exported.DataReader())
|
||||
require.NoError(t, err)
|
||||
|
||||
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
|
||||
nd, err := dag.Get(ctx, exportedRoots[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err := unixfile.NewUnixfsFile(ctx, dag, nd)
|
||||
require.NoError(t, err)
|
||||
|
||||
exportedPath := filepath.Join(dir, "exported.data")
|
||||
err = files.WriteTo(file, exportedPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
exportedBytes, err := ioutil.ReadFile(exportedPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// compare original file to recreated unixfs file.
|
||||
require.Equal(t, b, exportedBytes)
|
||||
}
|
||||
|
156
node/impl/client/import.go
Normal file
156
node/impl/client/import.go
Normal file
@ -0,0 +1,156 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/stores"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-cidutil"
|
||||
bstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
chunker "github.com/ipfs/go-ipfs-chunker"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
"github.com/ipfs/go-unixfs/importer/balanced"
|
||||
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
)
|
||||
|
||||
func unixFSCidBuilder() (cid.Builder, error) {
|
||||
prefix, err := merkledag.PrefixForCidVersion(1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize UnixFS CID Builder: %w", err)
|
||||
}
|
||||
prefix.MhType = DefaultHashFunction
|
||||
b := cidutil.InlineBuilder{
|
||||
Builder: prefix,
|
||||
Limit: 126,
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// createUnixFSFilestore takes a standard file whose path is src, forms a UnixFS DAG, and
|
||||
// writes a CARv2 file with positional mapping (backed by the go-filestore library).
|
||||
func (a *API) createUnixFSFilestore(ctx context.Context, srcPath string, dstPath string) (cid.Cid, error) {
|
||||
// This method uses a two-phase approach with a staging CAR blockstore and
|
||||
// a final CAR blockstore.
|
||||
//
|
||||
// This is necessary because of https://github.com/ipld/go-car/issues/196
|
||||
//
|
||||
// TODO: do we need to chunk twice? Isn't the first output already in the
|
||||
// right order? Can't we just copy the CAR file and replace the header?
|
||||
|
||||
src, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to open input file: %w", err)
|
||||
}
|
||||
defer src.Close() //nolint:errcheck
|
||||
|
||||
stat, err := src.Stat()
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to stat file :%w", err)
|
||||
}
|
||||
|
||||
file, err := files.NewReaderPathFile(srcPath, src, stat)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create reader path file: %w", err)
|
||||
}
|
||||
|
||||
f, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
_ = f.Close() // close; we only want the path.
|
||||
|
||||
tmp := f.Name()
|
||||
defer os.Remove(tmp) //nolint:errcheck
|
||||
|
||||
// Step 1. Compute the UnixFS DAG and write it to a CARv2 file to get
|
||||
// the root CID of the DAG.
|
||||
fstore, err := stores.ReadWriteFilestore(tmp)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create temporary filestore: %w", err)
|
||||
}
|
||||
|
||||
finalRoot1, err := buildUnixFS(ctx, file, fstore, true)
|
||||
if err != nil {
|
||||
_ = fstore.Close()
|
||||
return cid.Undef, xerrors.Errorf("failed to import file to store to compute root: %w", err)
|
||||
}
|
||||
|
||||
if err := fstore.Close(); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to finalize car filestore: %w", err)
|
||||
}
|
||||
|
||||
// Step 2. We now have the root of the UnixFS DAG, and we can write the
|
||||
// final CAR for real under `dst`.
|
||||
bs, err := stores.ReadWriteFilestore(dstPath, finalRoot1)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create a carv2 read/write filestore: %w", err)
|
||||
}
|
||||
|
||||
// rewind file to the beginning.
|
||||
if _, err := src.Seek(0, 0); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to rewind file: %w", err)
|
||||
}
|
||||
|
||||
finalRoot2, err := buildUnixFS(ctx, file, bs, true)
|
||||
if err != nil {
|
||||
_ = bs.Close()
|
||||
return cid.Undef, xerrors.Errorf("failed to create UnixFS DAG with carv2 blockstore: %w", err)
|
||||
}
|
||||
|
||||
if err := bs.Close(); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to finalize car blockstore: %w", err)
|
||||
}
|
||||
|
||||
if finalRoot1 != finalRoot2 {
|
||||
return cid.Undef, xerrors.New("roots do not match")
|
||||
}
|
||||
|
||||
return finalRoot1, nil
|
||||
}
|
||||
|
||||
// buildUnixFS builds a UnixFS DAG out of the supplied reader,
|
||||
// and imports the DAG into the supplied service.
|
||||
func buildUnixFS(ctx context.Context, reader io.Reader, into bstore.Blockstore, filestore bool) (cid.Cid, error) {
|
||||
b, err := unixFSCidBuilder()
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
bsvc := blockservice.New(into, offline.Exchange(into))
|
||||
dags := merkledag.NewDAGService(bsvc)
|
||||
bufdag := ipld.NewBufferedDAG(ctx, dags)
|
||||
|
||||
params := ihelper.DagBuilderParams{
|
||||
Maxlinks: build.UnixfsLinksPerLevel,
|
||||
RawLeaves: true,
|
||||
CidBuilder: b,
|
||||
Dagserv: bufdag,
|
||||
NoCopy: filestore,
|
||||
}
|
||||
|
||||
db, err := params.New(chunker.NewSizeSplitter(reader, int64(build.UnixfsChunkSize)))
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
nd, err := balanced.Layout(db)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
if err := bufdag.Commit(); err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return nd.Cid(), nil
|
||||
}
|
133
node/impl/client/import_test.go
Normal file
133
node/impl/client/import_test.go
Normal file
@ -0,0 +1,133 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
carv2 "github.com/ipld/go-car/v2"
|
||||
"github.com/ipld/go-car/v2/blockstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/stores"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
// This test uses a full "dense" CARv2, and not a filestore (positional mapping).
|
||||
func TestRoundtripUnixFS_Dense(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
inputPath, inputContents := genInputFile(t)
|
||||
defer os.Remove(inputPath) //nolint:errcheck
|
||||
|
||||
carv2File := newTmpFile(t)
|
||||
defer os.Remove(carv2File) //nolint:errcheck
|
||||
|
||||
// import a file to a Unixfs DAG using a CARv2 read/write blockstore.
|
||||
bs, err := blockstore.OpenReadWrite(carv2File, nil,
|
||||
carv2.ZeroLengthSectionAsEOF(true),
|
||||
blockstore.UseWholeCIDs(true))
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := buildUnixFS(ctx, bytes.NewBuffer(inputContents), bs, false)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, cid.Undef, root)
|
||||
require.NoError(t, bs.Finalize())
|
||||
|
||||
// reconstruct the file.
|
||||
readOnly, err := blockstore.OpenReadOnly(carv2File,
|
||||
carv2.ZeroLengthSectionAsEOF(true),
|
||||
blockstore.UseWholeCIDs(true))
|
||||
require.NoError(t, err)
|
||||
defer readOnly.Close() //nolint:errcheck
|
||||
|
||||
dags := merkledag.NewDAGService(blockservice.New(readOnly, offline.Exchange(readOnly)))
|
||||
|
||||
nd, err := dags.Get(ctx, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err := unixfile.NewUnixfsFile(ctx, dags, nd)
|
||||
require.NoError(t, err)
|
||||
|
||||
tmpOutput := newTmpFile(t)
|
||||
defer os.Remove(tmpOutput) //nolint:errcheck
|
||||
require.NoError(t, files.WriteTo(file, tmpOutput))
|
||||
|
||||
// ensure contents of the initial input file and the output file are identical.
|
||||
fo, err := os.Open(tmpOutput)
|
||||
require.NoError(t, err)
|
||||
bz2, err := ioutil.ReadAll(fo)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fo.Close())
|
||||
require.Equal(t, inputContents, bz2)
|
||||
}
|
||||
|
||||
func TestRoundtripUnixFS_Filestore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
a := &API{
|
||||
Imports: &imports.Manager{},
|
||||
}
|
||||
|
||||
inputPath, inputContents := genInputFile(t)
|
||||
defer os.Remove(inputPath) //nolint:errcheck
|
||||
|
||||
dst := newTmpFile(t)
|
||||
defer os.Remove(dst) //nolint:errcheck
|
||||
|
||||
root, err := a.createUnixFSFilestore(ctx, inputPath, dst)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, cid.Undef, root)
|
||||
|
||||
// convert the CARv2 to a normal file again and ensure the contents match
|
||||
fs, err := stores.ReadOnlyFilestore(dst)
|
||||
require.NoError(t, err)
|
||||
defer fs.Close() //nolint:errcheck
|
||||
|
||||
dags := merkledag.NewDAGService(blockservice.New(fs, offline.Exchange(fs)))
|
||||
|
||||
nd, err := dags.Get(ctx, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err := unixfile.NewUnixfsFile(ctx, dags, nd)
|
||||
require.NoError(t, err)
|
||||
|
||||
tmpOutput := newTmpFile(t)
|
||||
defer os.Remove(tmpOutput) //nolint:errcheck
|
||||
require.NoError(t, files.WriteTo(file, tmpOutput))
|
||||
|
||||
// ensure contents of the initial input file and the output file are identical.
|
||||
fo, err := os.Open(tmpOutput)
|
||||
require.NoError(t, err)
|
||||
bz2, err := ioutil.ReadAll(fo)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fo.Close())
|
||||
require.Equal(t, inputContents, bz2)
|
||||
}
|
||||
|
||||
func newTmpFile(t *testing.T) string {
|
||||
f, err := os.CreateTemp("", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.Close())
|
||||
return f.Name()
|
||||
}
|
||||
|
||||
func genInputFile(t *testing.T) (filepath string, contents []byte) {
|
||||
s := strings.Repeat("abcde", 100)
|
||||
tmp, err := os.CreateTemp("", "")
|
||||
require.NoError(t, err)
|
||||
_, err = io.Copy(tmp, strings.NewReader(s))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tmp.Close())
|
||||
return tmp.Name(), []byte(s)
|
||||
}
|
1
node/impl/client/testdata/duplicate_blocks.txt
vendored
Normal file
1
node/impl/client/testdata/duplicate_blocks.txt
vendored
Normal file
File diff suppressed because one or more lines are too long
49
node/impl/client/testdata/payload.txt
vendored
Normal file
49
node/impl/client/testdata/payload.txt
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu.
|
||||
|
||||
Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed.
|
||||
|
||||
Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique.
|
||||
|
||||
Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus.
|
||||
|
||||
Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh.
|
||||
|
||||
Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit.
|
||||
|
||||
Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet.
|
||||
|
||||
Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices.
|
||||
|
||||
Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis.
|
||||
|
||||
Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem.
|
||||
|
||||
Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis.
|
||||
|
||||
Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo.
|
||||
|
||||
Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam.
|
||||
|
||||
Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada.
|
||||
|
||||
Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque.
|
||||
|
||||
Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl.
|
||||
|
||||
Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla.
|
||||
|
||||
Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet.
|
||||
|
||||
Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam.
|
||||
|
||||
Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus.
|
||||
|
||||
Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor.
|
||||
|
||||
Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est.
|
||||
|
||||
Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat.
|
||||
|
||||
Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum.
|
||||
|
||||
Eros in cursus turpis massa tincidunt dui ut. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed.
|
49
node/impl/client/testdata/payload2.txt
vendored
Normal file
49
node/impl/client/testdata/payload2.txt
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu.
|
||||
|
||||
Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed.
|
||||
|
||||
Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique.
|
||||
|
||||
Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus.
|
||||
|
||||
Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh.
|
||||
|
||||
Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit.
|
||||
|
||||
Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet.
|
||||
|
||||
Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices.
|
||||
|
||||
Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis.
|
||||
|
||||
Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem.
|
||||
|
||||
Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis.
|
||||
|
||||
Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo.
|
||||
|
||||
Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam.
|
||||
|
||||
Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada.
|
||||
|
||||
Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque.
|
||||
|
||||
Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl.
|
||||
|
||||
Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla.
|
||||
|
||||
Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet.
|
||||
|
||||
Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam.
|
||||
|
||||
Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus.
|
||||
|
||||
Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor.
|
||||
|
||||
Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est.
|
||||
|
||||
Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat.
|
||||
|
||||
Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum.
|
||||
|
||||
Eros in cursus turpis massa tincidunt dui ut. Aarsh shah is simply an amazing person. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed.
|
@ -3,16 +3,20 @@ package impl
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
"github.com/filecoin-project/dagstore/shard"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
@ -20,6 +24,8 @@ import (
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
@ -34,6 +40,8 @@ import (
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
|
||||
sto "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -42,7 +50,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
sto "github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
type StorageMinerAPI struct {
|
||||
@ -61,10 +68,12 @@ type StorageMinerAPI struct {
|
||||
PieceStore dtypes.ProviderPieceStore `optional:"true"`
|
||||
StorageProvider storagemarket.StorageProvider `optional:"true"`
|
||||
RetrievalProvider retrievalmarket.RetrievalProvider `optional:"true"`
|
||||
SectorAccessor retrievalmarket.SectorAccessor `optional:"true"`
|
||||
DataTransfer dtypes.ProviderDataTransfer `optional:"true"`
|
||||
DealPublisher *storageadapter.DealPublisher `optional:"true"`
|
||||
SectorBlocks *sectorblocks.SectorBlocks `optional:"true"`
|
||||
Host host.Host `optional:"true"`
|
||||
DAGStore *dagstore.DAGStore `optional:"true"`
|
||||
|
||||
// Miner / storage
|
||||
Miner *storage.Miner `optional:"true"`
|
||||
@ -98,6 +107,8 @@ type StorageMinerAPI struct {
|
||||
SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc `optional:"true"`
|
||||
}
|
||||
|
||||
var _ api.StorageMiner = &StorageMinerAPI{}
|
||||
|
||||
func (sm *StorageMinerAPI) ServeRemote(perm bool) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if perm == true {
|
||||
@ -545,6 +556,267 @@ func (sm *StorageMinerAPI) MarketPublishPendingDeals(ctx context.Context) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreListShards(ctx context.Context) ([]api.DagstoreShardInfo, error) {
|
||||
if sm.DAGStore == nil {
|
||||
return nil, fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
info := sm.DAGStore.AllShardsInfo()
|
||||
ret := make([]api.DagstoreShardInfo, 0, len(info))
|
||||
for k, i := range info {
|
||||
ret = append(ret, api.DagstoreShardInfo{
|
||||
Key: k.String(),
|
||||
State: i.ShardState.String(),
|
||||
Error: func() string {
|
||||
if i.Error == nil {
|
||||
return ""
|
||||
}
|
||||
return i.Error.Error()
|
||||
}(),
|
||||
})
|
||||
}
|
||||
|
||||
// order by key.
|
||||
sort.SliceStable(ret, func(i, j int) bool {
|
||||
return ret[i].Key < ret[j].Key
|
||||
})
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreInitializeShard(ctx context.Context, key string) error {
|
||||
if sm.DAGStore == nil {
|
||||
return fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
k := shard.KeyFromString(key)
|
||||
|
||||
info, err := sm.DAGStore.GetShardInfo(k)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get shard info: %w", err)
|
||||
}
|
||||
if st := info.ShardState; st != dagstore.ShardStateNew {
|
||||
return fmt.Errorf("cannot initialize shard; expected state ShardStateNew, was: %s", st.String())
|
||||
}
|
||||
|
||||
ch := make(chan dagstore.ShardResult, 1)
|
||||
if err = sm.DAGStore.AcquireShard(ctx, k, ch, dagstore.AcquireOpts{}); err != nil {
|
||||
return fmt.Errorf("failed to acquire shard: %w", err)
|
||||
}
|
||||
|
||||
var res dagstore.ShardResult
|
||||
select {
|
||||
case res = <-ch:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if err := res.Error; err != nil {
|
||||
return fmt.Errorf("failed to acquire shard: %w", err)
|
||||
}
|
||||
|
||||
if res.Accessor != nil {
|
||||
err = res.Accessor.Close()
|
||||
if err != nil {
|
||||
log.Warnw("failed to close shard accessor; continuing", "shard_key", k, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreInitializeAll(ctx context.Context, params api.DagstoreInitializeAllParams) (<-chan api.DagstoreInitializeAllEvent, error) {
|
||||
if sm.DAGStore == nil {
|
||||
return nil, fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
if sm.SectorAccessor == nil {
|
||||
return nil, fmt.Errorf("sector accessor not available on this node")
|
||||
}
|
||||
|
||||
// prepare the thottler tokens.
|
||||
var throttle chan struct{}
|
||||
if c := params.MaxConcurrency; c > 0 {
|
||||
throttle = make(chan struct{}, c)
|
||||
for i := 0; i < c; i++ {
|
||||
throttle <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// are we initializing only unsealed pieces?
|
||||
onlyUnsealed := !params.IncludeSealed
|
||||
|
||||
info := sm.DAGStore.AllShardsInfo()
|
||||
var toInitialize []string
|
||||
for k, i := range info {
|
||||
if i.ShardState != dagstore.ShardStateNew {
|
||||
continue
|
||||
}
|
||||
|
||||
// if we're initializing only unsealed pieces, check if there's an
|
||||
// unsealed deal for this piece available.
|
||||
if onlyUnsealed {
|
||||
pieceCid, err := cid.Decode(k.String())
|
||||
if err != nil {
|
||||
log.Warnw("DagstoreInitializeAll: failed to decode shard key as piece CID; skipping", "shard_key", k.String(), "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
pi, err := sm.PieceStore.GetPieceInfo(pieceCid)
|
||||
if err != nil {
|
||||
log.Warnw("DagstoreInitializeAll: failed to get piece info; skipping", "piece_cid", pieceCid, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
var isUnsealed bool
|
||||
for _, d := range pi.Deals {
|
||||
isUnsealed, err = sm.SectorAccessor.IsUnsealed(ctx, d.SectorID, d.Offset.Unpadded(), d.Length.Unpadded())
|
||||
if err != nil {
|
||||
log.Warnw("DagstoreInitializeAll: failed to get unsealed status; skipping deal", "deal_id", d.DealID, "error", err)
|
||||
continue
|
||||
}
|
||||
if isUnsealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !isUnsealed {
|
||||
log.Infow("DagstoreInitializeAll: skipping piece because it's sealed", "piece_cid", pieceCid, "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// yes, we're initializing this shard.
|
||||
toInitialize = append(toInitialize, k.String())
|
||||
}
|
||||
|
||||
total := len(toInitialize)
|
||||
if total == 0 {
|
||||
out := make(chan api.DagstoreInitializeAllEvent)
|
||||
close(out)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// response channel must be closed when we're done, or the context is cancelled.
|
||||
// this buffering is necessary to prevent inflight children goroutines from
|
||||
// publishing to a closed channel (res) when the context is cancelled.
|
||||
out := make(chan api.DagstoreInitializeAllEvent, 32) // internal buffer.
|
||||
res := make(chan api.DagstoreInitializeAllEvent, 32) // returned to caller.
|
||||
|
||||
// pump events back to caller.
|
||||
// two events per shard.
|
||||
go func() {
|
||||
defer close(res)
|
||||
|
||||
for i := 0; i < total*2; i++ {
|
||||
select {
|
||||
case res <- <-out:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for i, k := range toInitialize {
|
||||
select {
|
||||
case <-throttle:
|
||||
// acquired a throttle token, proceed.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
go func(k string, i int) {
|
||||
r := api.DagstoreInitializeAllEvent{
|
||||
Key: k,
|
||||
Event: "start",
|
||||
Total: total,
|
||||
Current: i + 1, // start with 1
|
||||
}
|
||||
select {
|
||||
case out <- r:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
err := sm.DagstoreInitializeShard(ctx, k)
|
||||
throttle <- struct{}{}
|
||||
|
||||
r.Event = "end"
|
||||
if err == nil {
|
||||
r.Success = true
|
||||
} else {
|
||||
r.Success = false
|
||||
r.Error = err.Error()
|
||||
}
|
||||
|
||||
select {
|
||||
case out <- r:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}(k, i)
|
||||
}
|
||||
}()
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreRecoverShard(ctx context.Context, key string) error {
|
||||
if sm.DAGStore == nil {
|
||||
return fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
k := shard.KeyFromString(key)
|
||||
|
||||
info, err := sm.DAGStore.GetShardInfo(k)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get shard info: %w", err)
|
||||
}
|
||||
if st := info.ShardState; st != dagstore.ShardStateErrored {
|
||||
return fmt.Errorf("cannot recover shard; expected state ShardStateErrored, was: %s", st.String())
|
||||
}
|
||||
|
||||
ch := make(chan dagstore.ShardResult, 1)
|
||||
if err = sm.DAGStore.RecoverShard(ctx, k, ch, dagstore.RecoverOpts{}); err != nil {
|
||||
return fmt.Errorf("failed to recover shard: %w", err)
|
||||
}
|
||||
|
||||
var res dagstore.ShardResult
|
||||
select {
|
||||
case res = <-ch:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return res.Error
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreGC(ctx context.Context) ([]api.DagstoreShardResult, error) {
|
||||
if sm.DAGStore == nil {
|
||||
return nil, fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
res, err := sm.DAGStore.GC(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to gc: %w", err)
|
||||
}
|
||||
|
||||
ret := make([]api.DagstoreShardResult, 0, len(res.Shards))
|
||||
for k, err := range res.Shards {
|
||||
r := api.DagstoreShardResult{Key: k.String()}
|
||||
if err == nil {
|
||||
r.Success = true
|
||||
} else {
|
||||
r.Success = false
|
||||
r.Error = err.Error()
|
||||
}
|
||||
ret = append(ret, r)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]api.MarketDeal, error) {
|
||||
return sm.listDeals(ctx)
|
||||
}
|
||||
@ -708,5 +980,3 @@ func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.Secto
|
||||
func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubsystems, err error) {
|
||||
return sm.EnabledSubsystems, nil
|
||||
}
|
||||
|
||||
var _ api.StorageMiner = &StorageMinerAPI{}
|
||||
|
@ -14,6 +14,11 @@ import (
|
||||
dtimpl "github.com/filecoin-project/go-data-transfer/impl"
|
||||
dtnet "github.com/filecoin-project/go-data-transfer/network"
|
||||
dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/discovery"
|
||||
discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
@ -23,13 +28,9 @@ import (
|
||||
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation"
|
||||
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/market"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/markets"
|
||||
@ -38,10 +39,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
payapi "github.com/filecoin-project/lotus/node/impl/paych"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/node/repo/importmgr"
|
||||
"github.com/filecoin-project/lotus/node/repo/retrievalstoremgr"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) {
|
||||
@ -78,34 +77,21 @@ func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full
|
||||
})
|
||||
}
|
||||
|
||||
func ClientMultiDatastore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.ClientMultiDstore, error) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
ds, err := r.Datastore(ctx, "/client")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting datastore out of repo: %w", err)
|
||||
func ClientImportMgr(ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientImportMgr, error) {
|
||||
// store the imports under the repo's `imports` subdirectory.
|
||||
dir := filepath.Join(r.Path(), "imports")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, xerrors.Errorf("failed to create directory %s: %w", dir, err)
|
||||
}
|
||||
|
||||
mds, err := multistore.NewMultiDstore(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
return mds.Close()
|
||||
},
|
||||
})
|
||||
|
||||
return mds, nil
|
||||
ns := namespace.Wrap(ds, datastore.NewKey("/client"))
|
||||
return imports.NewManager(ns, dir), nil
|
||||
}
|
||||
|
||||
func ClientImportMgr(mds dtypes.ClientMultiDstore, ds dtypes.MetadataDS) dtypes.ClientImportMgr {
|
||||
return importmgr.New(mds, namespace.Wrap(ds, datastore.NewKey("/client")))
|
||||
}
|
||||
|
||||
func ClientBlockstore(imgr dtypes.ClientImportMgr) dtypes.ClientBlockstore {
|
||||
// TODO this should be removed.
|
||||
func ClientBlockstore() dtypes.ClientBlockstore {
|
||||
// in most cases this is now unused in normal operations -- however, it's important to preserve for the IPFS use case
|
||||
return blockstore.WrapIDStore(imgr.Blockstore)
|
||||
return blockstore.WrapIDStore(blockstore.FromDatastore(datastore.NewMapDatastore()))
|
||||
}
|
||||
|
||||
// RegisterClientValidator is an initialization hook that registers the client
|
||||
@ -173,13 +159,30 @@ func NewClientDatastore(ds dtypes.MetadataDS) dtypes.ClientDatastore {
|
||||
return namespace.Wrap(ds, datastore.NewKey("/deals/client"))
|
||||
}
|
||||
|
||||
func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, mds dtypes.ClientMultiDstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, j journal.Journal) (storagemarket.StorageClient, error) {
|
||||
// StorageBlockstoreAccessor returns the default storage blockstore accessor
|
||||
// from the import manager.
|
||||
func StorageBlockstoreAccessor(importmgr dtypes.ClientImportMgr) storagemarket.BlockstoreAccessor {
|
||||
return storageadapter.NewImportsBlockstoreAccessor(importmgr)
|
||||
}
|
||||
|
||||
// RetrievalBlockstoreAccessor returns the default retrieval blockstore accessor
|
||||
// using the subdirectory `retrievals` under the repo.
|
||||
func RetrievalBlockstoreAccessor(r repo.LockedRepo) (retrievalmarket.BlockstoreAccessor, error) {
|
||||
dir := filepath.Join(r.Path(), "retrievals")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, xerrors.Errorf("failed to create directory %s: %w", dir, err)
|
||||
}
|
||||
return retrievaladapter.NewCARBlockstoreAccessor(dir), nil
|
||||
}
|
||||
|
||||
func StorageClient(lc fx.Lifecycle, h host.Host, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local,
|
||||
deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, accessor storagemarket.BlockstoreAccessor, j journal.Journal) (storagemarket.StorageClient, error) {
|
||||
// go-fil-markets protocol retries:
|
||||
// 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour
|
||||
marketsRetryParams := smnet.RetryParameters(time.Second, 5*time.Minute, 15, 5)
|
||||
net := smnet.NewFromLibp2pHost(h, marketsRetryParams)
|
||||
|
||||
c, err := storageimpl.NewClient(net, ibs, mds, dataTransfer, discovery, deals, scn, storageimpl.DealPollingInterval(time.Second))
|
||||
c, err := storageimpl.NewClient(net, dataTransfer, discovery, deals, scn, accessor, storageimpl.DealPollingInterval(time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -201,10 +204,13 @@ func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, md
|
||||
}
|
||||
|
||||
// RetrievalClient creates a new retrieval client attached to the client blockstore
|
||||
func RetrievalClient(lc fx.Lifecycle, h host.Host, mds dtypes.ClientMultiDstore, dt dtypes.ClientDataTransfer, payAPI payapi.PaychAPI, resolver discovery.PeerResolver, ds dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI, j journal.Journal) (retrievalmarket.RetrievalClient, error) {
|
||||
func RetrievalClient(lc fx.Lifecycle, h host.Host, r repo.LockedRepo, dt dtypes.ClientDataTransfer, payAPI payapi.PaychAPI, resolver discovery.PeerResolver,
|
||||
ds dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI, accessor retrievalmarket.BlockstoreAccessor, j journal.Journal) (retrievalmarket.RetrievalClient, error) {
|
||||
|
||||
adapter := retrievaladapter.NewRetrievalClientNode(payAPI, chainAPI, stateAPI)
|
||||
network := rmnet.NewFromLibp2pHost(h)
|
||||
client, err := retrievalimpl.NewClient(network, mds, dt, adapter, resolver, namespace.Wrap(ds, datastore.NewKey("/retrievals/client")))
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/retrievals/client"))
|
||||
client, err := retrievalimpl.NewClient(network, dt, adapter, resolver, ds, accessor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -221,13 +227,3 @@ func RetrievalClient(lc fx.Lifecycle, h host.Host, mds dtypes.ClientMultiDstore,
|
||||
})
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// ClientRetrievalStoreManager is the default version of the RetrievalStoreManager that runs on multistore
|
||||
func ClientRetrievalStoreManager(imgr dtypes.ClientImportMgr) dtypes.ClientRetrievalStoreManager {
|
||||
return retrievalstoremgr.NewMultiStoreRetrievalStoreManager(imgr)
|
||||
}
|
||||
|
||||
// ClientBlockstoreRetrievalStoreManager is the default version of the RetrievalStoreManager that runs on multistore
|
||||
func ClientBlockstoreRetrievalStoreManager(bs dtypes.ClientBlockstore) dtypes.ClientRetrievalStoreManager {
|
||||
return retrievalstoremgr.NewBlockstoreRetrievalStoreManager(bs)
|
||||
}
|
||||
|
@ -1,22 +1,18 @@
|
||||
package dtypes
|
||||
|
||||
import (
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
bserv "github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
exchange "github.com/ipfs/go-ipfs-exchange-interface"
|
||||
format "github.com/ipfs/go-ipld-format"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/node/repo/importmgr"
|
||||
"github.com/filecoin-project/lotus/node/repo/retrievalstoremgr"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
// MetadataDS stores metadata. By default it's namespaced under /metadata in
|
||||
@ -71,13 +67,11 @@ type (
|
||||
type ChainBitswap exchange.Interface
|
||||
type ChainBlockService bserv.BlockService
|
||||
|
||||
type ClientMultiDstore *multistore.MultiStore
|
||||
type ClientImportMgr *importmgr.Mgr
|
||||
type ClientImportMgr *imports.Manager
|
||||
type ClientBlockstore blockstore.BasicBlockstore
|
||||
type ClientDealStore *statestore.StateStore
|
||||
type ClientRequestValidator *requestvalidation.UnifiedRequestValidator
|
||||
type ClientDatastore datastore.Batching
|
||||
type ClientRetrievalStoreManager retrievalstoremgr.RetrievalStoreManager
|
||||
|
||||
type Graphsync graphsync.GraphExchange
|
||||
|
||||
@ -92,7 +86,5 @@ type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator
|
||||
// ProviderDataTransfer is a data transfer manager for the provider
|
||||
type ProviderDataTransfer datatransfer.Manager
|
||||
|
||||
type StagingDAG format.DAGService
|
||||
type StagingBlockstore blockstore.BasicBlockstore
|
||||
type StagingGraphsync graphsync.GraphExchange
|
||||
type StagingMultiDstore *multistore.MultiStore
|
||||
|
@ -1,9 +1,6 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
graphsyncimpl "github.com/ipfs/go-graphsync/impl"
|
||||
gsnet "github.com/ipfs/go-graphsync/network"
|
||||
@ -11,6 +8,10 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
// Graphsync creates a graphsync instance from the given loader and storer
|
||||
|
@ -1,16 +1,29 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
bstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
)
|
||||
|
||||
func IpfsStorageBlockstoreAccessor(ipfsBlockstore dtypes.ClientBlockstore) storagemarket.BlockstoreAccessor {
|
||||
return storageadapter.NewFixedBlockstoreAccessor(bstore.Blockstore(ipfsBlockstore))
|
||||
}
|
||||
|
||||
func IpfsRetrievalBlockstoreAccessor(ipfsBlockstore dtypes.ClientBlockstore) retrievalmarket.BlockstoreAccessor {
|
||||
return retrievaladapter.NewFixedBlockstoreAccessor(bstore.Blockstore(ipfsBlockstore))
|
||||
}
|
||||
|
||||
// IpfsClientBlockstore returns a ClientBlockstore implementation backed by an IPFS node.
|
||||
// If ipfsMaddr is empty, a local IPFS node is assumed considering IPFS_PATH configuration.
|
||||
// If ipfsMaddr is not empty, it will connect to the remote IPFS node with the provided multiaddress.
|
@ -11,24 +11,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/markets/pricing"
|
||||
"go.uber.org/fx"
|
||||
"go.uber.org/multierr"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-bitswap"
|
||||
"github.com/ipfs/go-bitswap/network"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
graphsync "github.com/ipfs/go-graphsync/impl"
|
||||
gsnet "github.com/ipfs/go-graphsync/network"
|
||||
"github.com/ipfs/go-graphsync/storeutil"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"github.com/libp2p/go-libp2p-core/routing"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
dtimpl "github.com/filecoin-project/go-data-transfer/impl"
|
||||
dtnet "github.com/filecoin-project/go-data-transfer/network"
|
||||
@ -44,19 +30,25 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
|
||||
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
"github.com/filecoin-project/go-storedcounter"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
graphsync "github.com/ipfs/go-graphsync/impl"
|
||||
gsnet "github.com/ipfs/go-graphsync/network"
|
||||
"github.com/ipfs/go-graphsync/storeutil"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
@ -67,7 +59,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/markets"
|
||||
"github.com/filecoin-project/lotus/markets/dagstore"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/markets/pricing"
|
||||
lotusminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
@ -383,27 +377,6 @@ func NewProviderPieceStore(lc fx.Lifecycle, ds dtypes.MetadataDS) (dtypes.Provid
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func StagingMultiDatastore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.StagingMultiDstore, error) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
ds, err := r.Datastore(ctx, "/staging")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting datastore out of reop: %w", err)
|
||||
}
|
||||
|
||||
mds, err := multistore.NewMultiDstore(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
return mds.Close()
|
||||
},
|
||||
})
|
||||
|
||||
return mds, nil
|
||||
}
|
||||
|
||||
// StagingBlockstore creates a blockstore for staging blocks for a miner
|
||||
// in a storage deal, prior to sealing
|
||||
func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.StagingBlockstore, error) {
|
||||
@ -416,26 +389,6 @@ func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRe
|
||||
return blockstore.FromDatastore(stagingds), nil
|
||||
}
|
||||
|
||||
// StagingDAG is a DAGService for the StagingBlockstore
|
||||
func StagingDAG(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, rt routing.Routing, h host.Host) (dtypes.StagingDAG, error) {
|
||||
|
||||
bitswapNetwork := network.NewFromIpfsHost(h, rt)
|
||||
bitswapOptions := []bitswap.Option{bitswap.ProvideEnabled(false)}
|
||||
exch := bitswap.New(mctx, bitswapNetwork, ibs, bitswapOptions...)
|
||||
|
||||
bsvc := blockservice.New(ibs, exch)
|
||||
dag := merkledag.NewDAGService(bsvc)
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(_ context.Context) error {
|
||||
// blockservice closes the exchange
|
||||
return bsvc.Close()
|
||||
},
|
||||
})
|
||||
|
||||
return dag, nil
|
||||
}
|
||||
|
||||
// StagingGraphsync creates a graphsync instance which reads and writes blocks
|
||||
// to the StagingBlockstore
|
||||
func StagingGraphsync(parallelTransfers uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync {
|
||||
@ -599,22 +552,34 @@ func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.Conside
|
||||
func StorageProvider(minerAddress dtypes.MinerAddress,
|
||||
storedAsk *storedask.StoredAsk,
|
||||
h host.Host, ds dtypes.MetadataDS,
|
||||
mds dtypes.StagingMultiDstore,
|
||||
r repo.LockedRepo,
|
||||
pieceStore dtypes.ProviderPieceStore,
|
||||
dataTransfer dtypes.ProviderDataTransfer,
|
||||
spn storagemarket.StorageProviderNode,
|
||||
df dtypes.StorageDealFilter,
|
||||
dsw *dagstore.Wrapper,
|
||||
) (storagemarket.StorageProvider, error) {
|
||||
net := smnet.NewFromLibp2pHost(h)
|
||||
store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(filepath.Join(r.Path(), "deal-staging")))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opt := storageimpl.CustomDealDecisionLogic(storageimpl.DealDeciderFunc(df))
|
||||
|
||||
return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), storedAsk, opt)
|
||||
return storageimpl.NewProvider(
|
||||
net,
|
||||
namespace.Wrap(ds, datastore.NewKey("/deals/provider")),
|
||||
store,
|
||||
dsw,
|
||||
pieceStore,
|
||||
dataTransfer,
|
||||
spn,
|
||||
address.Address(minerAddress),
|
||||
storedAsk,
|
||||
opt,
|
||||
)
|
||||
}
|
||||
|
||||
func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc,
|
||||
@ -672,17 +637,28 @@ func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnl
|
||||
func RetrievalProvider(
|
||||
maddr dtypes.MinerAddress,
|
||||
adapter retrievalmarket.RetrievalProviderNode,
|
||||
sa retrievalmarket.SectorAccessor,
|
||||
netwk rmnet.RetrievalMarketNetwork,
|
||||
ds dtypes.MetadataDS,
|
||||
pieceStore dtypes.ProviderPieceStore,
|
||||
mds dtypes.StagingMultiDstore,
|
||||
dt dtypes.ProviderDataTransfer,
|
||||
pricingFnc dtypes.RetrievalPricingFunc,
|
||||
userFilter dtypes.RetrievalDealFilter,
|
||||
dagStore *dagstore.Wrapper,
|
||||
) (retrievalmarket.RetrievalProvider, error) {
|
||||
opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter))
|
||||
return retrievalimpl.NewProvider(address.Address(maddr), adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")),
|
||||
retrievalimpl.RetrievalPricingFunc(pricingFnc), opt)
|
||||
return retrievalimpl.NewProvider(
|
||||
address.Address(maddr),
|
||||
adapter,
|
||||
sa,
|
||||
netwk,
|
||||
pieceStore,
|
||||
dagStore,
|
||||
dt,
|
||||
namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")),
|
||||
retrievalimpl.RetrievalPricingFunc(pricingFnc),
|
||||
opt,
|
||||
)
|
||||
}
|
||||
|
||||
var WorkerCallsPrefix = datastore.NewKey("/worker/calls")
|
||||
|
112
node/modules/storageminer_dagstore.go
Normal file
112
node/modules/storageminer_dagstore.go
Normal file
@ -0,0 +1,112 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
|
||||
mdagstore "github.com/filecoin-project/lotus/markets/dagstore"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
const (
|
||||
EnvDAGStoreCopyConcurrency = "LOTUS_DAGSTORE_COPY_CONCURRENCY"
|
||||
DefaultDAGStoreDir = "dagstore"
|
||||
)
|
||||
|
||||
// NewMinerAPI creates a new MinerAPI adaptor for the dagstore mounts.
|
||||
func NewMinerAPI(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, sa retrievalmarket.SectorAccessor) (mdagstore.MinerAPI, error) {
|
||||
cfg, err := extractDAGStoreConfig(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// caps the amount of concurrent calls to the storage, so that we don't
|
||||
// spam it during heavy processes like bulk migration.
|
||||
if v, ok := os.LookupEnv("LOTUS_DAGSTORE_MOUNT_CONCURRENCY"); ok {
|
||||
concurrency, err := strconv.Atoi(v)
|
||||
if err == nil {
|
||||
cfg.MaxConcurrencyStorageCalls = concurrency
|
||||
}
|
||||
}
|
||||
|
||||
mountApi := mdagstore.NewMinerAPI(pieceStore, sa, cfg.MaxConcurrencyStorageCalls)
|
||||
ready := make(chan error, 1)
|
||||
pieceStore.OnReady(func(err error) {
|
||||
ready <- err
|
||||
})
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
if err := <-ready; err != nil {
|
||||
return fmt.Errorf("aborting dagstore start; piecestore failed to start: %s", err)
|
||||
}
|
||||
return mountApi.Start(ctx)
|
||||
},
|
||||
OnStop: func(context.Context) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
return mountApi, nil
|
||||
}
|
||||
|
||||
// DAGStore constructs a DAG store using the supplied minerAPI, and the
|
||||
// user configuration. It returns both the DAGStore and the Wrapper suitable for
|
||||
// passing to markets.
|
||||
func DAGStore(lc fx.Lifecycle, r repo.LockedRepo, minerAPI mdagstore.MinerAPI) (*dagstore.DAGStore, *mdagstore.Wrapper, error) {
|
||||
cfg, err := extractDAGStoreConfig(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// fall back to default root directory if not explicitly set in the config.
|
||||
if cfg.RootDir == "" {
|
||||
cfg.RootDir = filepath.Join(r.Path(), DefaultDAGStoreDir)
|
||||
}
|
||||
|
||||
v, ok := os.LookupEnv(EnvDAGStoreCopyConcurrency)
|
||||
if ok {
|
||||
concurrency, err := strconv.Atoi(v)
|
||||
if err == nil {
|
||||
cfg.MaxConcurrentReadyFetches = concurrency
|
||||
}
|
||||
}
|
||||
|
||||
dagst, w, err := mdagstore.NewDAGStore(cfg, minerAPI)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to create DAG store: %w", err)
|
||||
}
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
return w.Start(ctx)
|
||||
},
|
||||
OnStop: func(context.Context) error {
|
||||
return w.Close()
|
||||
},
|
||||
})
|
||||
|
||||
return dagst, w, nil
|
||||
}
|
||||
|
||||
func extractDAGStoreConfig(r repo.LockedRepo) (config.DAGStoreConfig, error) {
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return config.DAGStoreConfig{}, xerrors.Errorf("could not load config: %w", err)
|
||||
}
|
||||
mcfg, ok := cfg.(*config.StorageMiner)
|
||||
if !ok {
|
||||
return config.DAGStoreConfig{}, xerrors.Errorf("config not expected type; expected config.StorageMiner, got: %T", cfg)
|
||||
}
|
||||
return mcfg.DAGStore, nil
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
package importmgr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
)
|
||||
|
||||
type Mgr struct {
|
||||
mds *multistore.MultiStore
|
||||
ds datastore.Batching
|
||||
|
||||
Blockstore blockstore.BasicBlockstore
|
||||
}
|
||||
|
||||
type Label string
|
||||
|
||||
const (
|
||||
LSource = "source" // Function which created the import
|
||||
LRootCid = "root" // Root CID
|
||||
LFileName = "filename" // Local file path
|
||||
LMTime = "mtime" // File modification timestamp
|
||||
)
|
||||
|
||||
func New(mds *multistore.MultiStore, ds datastore.Batching) *Mgr {
|
||||
return &Mgr{
|
||||
mds: mds,
|
||||
Blockstore: blockstore.Adapt(mds.MultiReadBlockstore()),
|
||||
|
||||
ds: datastore.NewLogDatastore(namespace.Wrap(ds, datastore.NewKey("/stores")), "storess"),
|
||||
}
|
||||
}
|
||||
|
||||
type StoreMeta struct {
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
func (m *Mgr) NewStore() (multistore.StoreID, *multistore.Store, error) {
|
||||
id := m.mds.Next()
|
||||
st, err := m.mds.Get(id)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
meta, err := json.Marshal(&StoreMeta{Labels: map[string]string{
|
||||
"source": "unknown",
|
||||
}})
|
||||
if err != nil {
|
||||
return 0, nil, xerrors.Errorf("marshaling empty store metadata: %w", err)
|
||||
}
|
||||
|
||||
err = m.ds.Put(datastore.NewKey(fmt.Sprintf("%d", id)), meta)
|
||||
return id, st, err
|
||||
}
|
||||
|
||||
func (m *Mgr) AddLabel(id multistore.StoreID, key, value string) error { // source, file path, data CID..
|
||||
meta, err := m.ds.Get(datastore.NewKey(fmt.Sprintf("%d", id)))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting metadata form datastore: %w", err)
|
||||
}
|
||||
|
||||
var sm StoreMeta
|
||||
if err := json.Unmarshal(meta, &sm); err != nil {
|
||||
return xerrors.Errorf("unmarshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
sm.Labels[key] = value
|
||||
|
||||
meta, err = json.Marshal(&sm)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("marshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
return m.ds.Put(datastore.NewKey(fmt.Sprintf("%d", id)), meta)
|
||||
}
|
||||
|
||||
func (m *Mgr) List() []multistore.StoreID {
|
||||
return m.mds.List()
|
||||
}
|
||||
|
||||
func (m *Mgr) Info(id multistore.StoreID) (*StoreMeta, error) {
|
||||
meta, err := m.ds.Get(datastore.NewKey(fmt.Sprintf("%d", id)))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting metadata form datastore: %w", err)
|
||||
}
|
||||
|
||||
var sm StoreMeta
|
||||
if err := json.Unmarshal(meta, &sm); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
return &sm, nil
|
||||
}
|
||||
|
||||
func (m *Mgr) Remove(id multistore.StoreID) error {
|
||||
if err := m.mds.Delete(id); err != nil {
|
||||
return xerrors.Errorf("removing import: %w", err)
|
||||
}
|
||||
|
||||
if err := m.ds.Delete(datastore.NewKey(fmt.Sprintf("%d", id))); err != nil {
|
||||
return xerrors.Errorf("removing import metadata: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
268
node/repo/imports/manager.go
Normal file
268
node/repo/imports/manager.go
Normal file
@ -0,0 +1,268 @@
|
||||
package imports
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
)
|
||||
|
||||
var log = logging.Logger("importmgr")
|
||||
|
||||
type ID uint64
|
||||
|
||||
func (id ID) dsKey() datastore.Key {
|
||||
return datastore.NewKey(fmt.Sprintf("%d", id))
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
ds datastore.Batching
|
||||
rootDir string
|
||||
counter *shared.TimeCounter
|
||||
}
|
||||
|
||||
type LabelKey = string
|
||||
type LabelValue = string
|
||||
|
||||
const (
|
||||
CAROwnerImportMgr = "importmgr"
|
||||
CAROwnerUser = "user"
|
||||
)
|
||||
|
||||
const (
|
||||
LSource = LabelKey("source") // Function which created the import
|
||||
LRootCid = LabelKey("root") // Root CID
|
||||
LFileName = LabelKey("filename") // Local file path of the source file.
|
||||
LCARPath = LabelKey("car_path") // Path of the CARv2 file containing the imported data.
|
||||
LCAROwner = LabelKey("car_owner") // Owner of the CAR; "importmgr" is us; "user" or empty is them.
|
||||
)
|
||||
|
||||
func NewManager(ds datastore.Batching, rootDir string) *Manager {
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/stores"))
|
||||
ds = datastore.NewLogDatastore(ds, "storess")
|
||||
|
||||
m := &Manager{
|
||||
ds: ds,
|
||||
rootDir: rootDir,
|
||||
counter: shared.NewTimeCounter(),
|
||||
}
|
||||
|
||||
log.Info("sanity checking imports")
|
||||
|
||||
ids, err := m.List()
|
||||
if err != nil {
|
||||
log.Warnw("failed to enumerate imports on initialization", "error", err)
|
||||
return m
|
||||
}
|
||||
|
||||
var broken int
|
||||
for _, id := range ids {
|
||||
log := log.With("id", id)
|
||||
|
||||
info, err := m.Info(id)
|
||||
if err != nil {
|
||||
log.Warnw("failed to query metadata for import; skipping", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log = log.With("source", info.Labels[LSource], "root", info.Labels[LRootCid], "original", info.Labels[LFileName])
|
||||
|
||||
path, ok := info.Labels[LCARPath]
|
||||
if !ok {
|
||||
broken++
|
||||
log.Warnw("import lacks carv2 path; import will not work; please reimport")
|
||||
continue
|
||||
}
|
||||
|
||||
stat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
broken++
|
||||
log.Warnw("import has missing/broken carv2; please reimport", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infow("import ok", "size", stat.Size())
|
||||
}
|
||||
|
||||
log.Infow("sanity check completed", "broken", broken, "total", len(ids))
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
type Meta struct {
|
||||
Labels map[LabelKey]LabelValue
|
||||
}
|
||||
|
||||
// CreateImport initializes a new import, returning its ID and optionally a
|
||||
// CAR path where to place the data, if requested.
|
||||
func (m *Manager) CreateImport() (id ID, err error) {
|
||||
id = ID(m.counter.Next())
|
||||
|
||||
meta := &Meta{Labels: map[LabelKey]LabelValue{
|
||||
LSource: "unknown",
|
||||
}}
|
||||
|
||||
metajson, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("marshaling store metadata: %w", err)
|
||||
}
|
||||
|
||||
err = m.ds.Put(id.dsKey(), metajson)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("failed to insert import metadata: %w", err)
|
||||
}
|
||||
|
||||
return id, err
|
||||
}
|
||||
|
||||
// AllocateCAR creates a new CAR allocated to the supplied import under the
|
||||
// root directory.
|
||||
func (m *Manager) AllocateCAR(id ID) (path string, err error) {
|
||||
meta, err := m.ds.Get(id.dsKey())
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("getting metadata form datastore: %w", err)
|
||||
}
|
||||
|
||||
var sm Meta
|
||||
if err := json.Unmarshal(meta, &sm); err != nil {
|
||||
return "", xerrors.Errorf("unmarshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
// refuse if a CAR path already exists.
|
||||
if curr := sm.Labels[LCARPath]; curr != "" {
|
||||
return "", xerrors.Errorf("import CAR already exists at %s: %w", curr, err)
|
||||
}
|
||||
|
||||
path = filepath.Join(m.rootDir, fmt.Sprintf("%d.car", id))
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("failed to create car file for import: %w", err)
|
||||
}
|
||||
|
||||
// close the file before returning the path.
|
||||
if err := file.Close(); err != nil {
|
||||
return "", xerrors.Errorf("failed to close temp file: %w", err)
|
||||
}
|
||||
|
||||
// record the path and ownership.
|
||||
sm.Labels[LCARPath] = path
|
||||
sm.Labels[LCAROwner] = CAROwnerImportMgr
|
||||
|
||||
if meta, err = json.Marshal(sm); err != nil {
|
||||
return "", xerrors.Errorf("marshaling store metadata: %w", err)
|
||||
}
|
||||
|
||||
err = m.ds.Put(id.dsKey(), meta)
|
||||
return path, err
|
||||
}
|
||||
|
||||
// AddLabel adds a label associated with an import, such as the source,
|
||||
// car path, CID, etc.
|
||||
func (m *Manager) AddLabel(id ID, key LabelKey, value LabelValue) error {
|
||||
meta, err := m.ds.Get(id.dsKey())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting metadata form datastore: %w", err)
|
||||
}
|
||||
|
||||
var sm Meta
|
||||
if err := json.Unmarshal(meta, &sm); err != nil {
|
||||
return xerrors.Errorf("unmarshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
sm.Labels[key] = value
|
||||
|
||||
meta, err = json.Marshal(&sm)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("marshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
return m.ds.Put(id.dsKey(), meta)
|
||||
}
|
||||
|
||||
// List returns all import IDs known by this Manager.
|
||||
func (m *Manager) List() ([]ID, error) {
|
||||
var keys []ID
|
||||
|
||||
qres, err := m.ds.Query(query.Query{KeysOnly: true})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("query error: %w", err)
|
||||
}
|
||||
defer qres.Close() //nolint:errcheck
|
||||
|
||||
for r := range qres.Next() {
|
||||
k := r.Key
|
||||
if string(k[0]) == "/" {
|
||||
k = k[1:]
|
||||
}
|
||||
|
||||
id, err := strconv.ParseUint(k, 10, 64)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to parse key %s to uint64, err=%w", r.Key, err)
|
||||
}
|
||||
keys = append(keys, ID(id))
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// Info returns the metadata known to this store for the specified import ID.
|
||||
func (m *Manager) Info(id ID) (*Meta, error) {
|
||||
meta, err := m.ds.Get(id.dsKey())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting metadata form datastore: %w", err)
|
||||
}
|
||||
|
||||
var sm Meta
|
||||
if err := json.Unmarshal(meta, &sm); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
return &sm, nil
|
||||
}
|
||||
|
||||
// Remove drops all data associated with the supplied import ID.
|
||||
func (m *Manager) Remove(id ID) error {
|
||||
if err := m.ds.Delete(id.dsKey()); err != nil {
|
||||
return xerrors.Errorf("removing import metadata: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) CARPathFor(dagRoot cid.Cid) (string, error) {
|
||||
ids, err := m.List()
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("failed to fetch import IDs: %w", err)
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
info, err := m.Info(id)
|
||||
if err != nil {
|
||||
log.Errorf("failed to fetch info, importID=%d: %s", id, err)
|
||||
continue
|
||||
}
|
||||
if info.Labels[LRootCid] == "" {
|
||||
continue
|
||||
}
|
||||
c, err := cid.Parse(info.Labels[LRootCid])
|
||||
if err != nil {
|
||||
log.Errorf("failed to parse root cid %s: %s", info.Labels[LRootCid], err)
|
||||
continue
|
||||
}
|
||||
if c.Equals(dagRoot) {
|
||||
return info.Labels[LCARPath], nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
package retrievalstoremgr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/node/repo/importmgr"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
ipldformat "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
)
|
||||
|
||||
// RetrievalStore references a store for a retrieval deal
|
||||
// which may or may not have a multistore ID associated with it
|
||||
type RetrievalStore interface {
|
||||
StoreID() *multistore.StoreID
|
||||
DAGService() ipldformat.DAGService
|
||||
}
|
||||
|
||||
// RetrievalStoreManager manages stores for retrieval deals, abstracting
|
||||
// the underlying storage mechanism
|
||||
type RetrievalStoreManager interface {
|
||||
NewStore() (RetrievalStore, error)
|
||||
ReleaseStore(RetrievalStore) error
|
||||
}
|
||||
|
||||
// MultiStoreRetrievalStoreManager manages stores on top of the import manager
|
||||
type MultiStoreRetrievalStoreManager struct {
|
||||
imgr *importmgr.Mgr
|
||||
}
|
||||
|
||||
var _ RetrievalStoreManager = &MultiStoreRetrievalStoreManager{}
|
||||
|
||||
// NewMultiStoreRetrievalStoreManager returns a new multstore based RetrievalStoreManager
|
||||
func NewMultiStoreRetrievalStoreManager(imgr *importmgr.Mgr) RetrievalStoreManager {
|
||||
return &MultiStoreRetrievalStoreManager{
|
||||
imgr: imgr,
|
||||
}
|
||||
}
|
||||
|
||||
// NewStore creates a new store (uses multistore)
|
||||
func (mrsm *MultiStoreRetrievalStoreManager) NewStore() (RetrievalStore, error) {
|
||||
storeID, store, err := mrsm.imgr.NewStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &multiStoreRetrievalStore{storeID, store}, nil
|
||||
}
|
||||
|
||||
// ReleaseStore releases a store (uses multistore remove)
|
||||
func (mrsm *MultiStoreRetrievalStoreManager) ReleaseStore(retrievalStore RetrievalStore) error {
|
||||
mrs, ok := retrievalStore.(*multiStoreRetrievalStore)
|
||||
if !ok {
|
||||
return errors.New("Cannot release this store type")
|
||||
}
|
||||
return mrsm.imgr.Remove(mrs.storeID)
|
||||
}
|
||||
|
||||
type multiStoreRetrievalStore struct {
|
||||
storeID multistore.StoreID
|
||||
store *multistore.Store
|
||||
}
|
||||
|
||||
func (mrs *multiStoreRetrievalStore) StoreID() *multistore.StoreID {
|
||||
return &mrs.storeID
|
||||
}
|
||||
|
||||
func (mrs *multiStoreRetrievalStore) DAGService() ipldformat.DAGService {
|
||||
return mrs.store.DAG
|
||||
}
|
||||
|
||||
// BlockstoreRetrievalStoreManager manages a single blockstore as if it were multiple stores
|
||||
type BlockstoreRetrievalStoreManager struct {
|
||||
bs blockstore.BasicBlockstore
|
||||
}
|
||||
|
||||
var _ RetrievalStoreManager = &BlockstoreRetrievalStoreManager{}
|
||||
|
||||
// NewBlockstoreRetrievalStoreManager returns a new blockstore based RetrievalStoreManager
|
||||
func NewBlockstoreRetrievalStoreManager(bs blockstore.BasicBlockstore) RetrievalStoreManager {
|
||||
return &BlockstoreRetrievalStoreManager{
|
||||
bs: bs,
|
||||
}
|
||||
}
|
||||
|
||||
// NewStore creates a new store (just uses underlying blockstore)
|
||||
func (brsm *BlockstoreRetrievalStoreManager) NewStore() (RetrievalStore, error) {
|
||||
return &blockstoreRetrievalStore{
|
||||
dagService: merkledag.NewDAGService(blockservice.New(brsm.bs, offline.Exchange(brsm.bs))),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReleaseStore for this implementation does nothing
|
||||
func (brsm *BlockstoreRetrievalStoreManager) ReleaseStore(RetrievalStore) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockstoreRetrievalStore struct {
|
||||
dagService ipldformat.DAGService
|
||||
}
|
||||
|
||||
func (brs *blockstoreRetrievalStore) StoreID() *multistore.StoreID {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (brs *blockstoreRetrievalStore) DAGService() ipldformat.DAGService {
|
||||
return brs.dagService
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
package retrievalstoremgr_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
dss "github.com/ipfs/go-datastore/sync"
|
||||
format "github.com/ipfs/go-ipld-format"
|
||||
dag "github.com/ipfs/go-merkledag"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/node/repo/importmgr"
|
||||
"github.com/filecoin-project/lotus/node/repo/retrievalstoremgr"
|
||||
)
|
||||
|
||||
func TestMultistoreRetrievalStoreManager(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ds := dss.MutexWrap(datastore.NewMapDatastore())
|
||||
multiDS, err := multistore.NewMultiDstore(ds)
|
||||
require.NoError(t, err)
|
||||
imgr := importmgr.New(multiDS, ds)
|
||||
retrievalStoreMgr := retrievalstoremgr.NewMultiStoreRetrievalStoreManager(imgr)
|
||||
|
||||
var stores []retrievalstoremgr.RetrievalStore
|
||||
for i := 0; i < 5; i++ {
|
||||
store, err := retrievalStoreMgr.NewStore()
|
||||
require.NoError(t, err)
|
||||
stores = append(stores, store)
|
||||
nds := generateNodesOfSize(5, 100)
|
||||
err = store.DAGService().AddMany(ctx, nds)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Run("creates all keys", func(t *testing.T) {
|
||||
qres, err := ds.Query(query.Query{KeysOnly: true})
|
||||
require.NoError(t, err)
|
||||
all, err := qres.Rest()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, all, 31)
|
||||
})
|
||||
|
||||
t.Run("loads DAG services", func(t *testing.T) {
|
||||
for _, store := range stores {
|
||||
mstore, err := multiDS.Get(*store.StoreID())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, mstore.DAG, store.DAGService())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("delete stores", func(t *testing.T) {
|
||||
err := retrievalStoreMgr.ReleaseStore(stores[4])
|
||||
require.NoError(t, err)
|
||||
storeIndexes := multiDS.List()
|
||||
require.Len(t, storeIndexes, 4)
|
||||
|
||||
qres, err := ds.Query(query.Query{KeysOnly: true})
|
||||
require.NoError(t, err)
|
||||
all, err := qres.Rest()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, all, 25)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlockstoreRetrievalStoreManager(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ds := dss.MutexWrap(datastore.NewMapDatastore())
|
||||
bs := blockstore.FromDatastore(ds)
|
||||
retrievalStoreMgr := retrievalstoremgr.NewBlockstoreRetrievalStoreManager(bs)
|
||||
var stores []retrievalstoremgr.RetrievalStore
|
||||
var cids []cid.Cid
|
||||
for i := 0; i < 5; i++ {
|
||||
store, err := retrievalStoreMgr.NewStore()
|
||||
require.NoError(t, err)
|
||||
stores = append(stores, store)
|
||||
nds := generateNodesOfSize(5, 100)
|
||||
err = store.DAGService().AddMany(ctx, nds)
|
||||
require.NoError(t, err)
|
||||
for _, nd := range nds {
|
||||
cids = append(cids, nd.Cid())
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("creates all keys", func(t *testing.T) {
|
||||
qres, err := ds.Query(query.Query{KeysOnly: true})
|
||||
require.NoError(t, err)
|
||||
all, err := qres.Rest()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, all, 25)
|
||||
})
|
||||
|
||||
t.Run("loads DAG services, all DAG has all nodes", func(t *testing.T) {
|
||||
for _, store := range stores {
|
||||
dagService := store.DAGService()
|
||||
for _, cid := range cids {
|
||||
_, err := dagService.Get(ctx, cid)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("release store has no effect", func(t *testing.T) {
|
||||
err := retrievalStoreMgr.ReleaseStore(stores[4])
|
||||
require.NoError(t, err)
|
||||
qres, err := ds.Query(query.Query{KeysOnly: true})
|
||||
require.NoError(t, err)
|
||||
all, err := qres.Rest()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, all, 25)
|
||||
})
|
||||
}
|
||||
|
||||
var seedSeq int64 = 0
|
||||
|
||||
func randomBytes(n int64) []byte {
|
||||
randBytes := make([]byte, n)
|
||||
r := rand.New(rand.NewSource(seedSeq))
|
||||
_, _ = r.Read(randBytes)
|
||||
seedSeq++
|
||||
return randBytes
|
||||
}
|
||||
|
||||
func generateNodesOfSize(n int, size int64) []format.Node {
|
||||
generatedNodes := make([]format.Node, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
b := dag.NewRawNode(randomBytes(size))
|
||||
generatedNodes = append(generatedNodes, b)
|
||||
|
||||
}
|
||||
return generatedNodes
|
||||
}
|
@ -5,6 +5,7 @@ package paychmgr
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
@ -16,6 +17,7 @@ import (
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
func (t *VoucherInfo) MarshalCBOR(w io.Writer) error {
|
||||
|
@ -8,25 +8,24 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/drand/drand v1.2.1
|
||||
github.com/filecoin-project/go-address v0.0.5
|
||||
github.com/filecoin-project/go-data-transfer v1.6.0
|
||||
github.com/filecoin-project/go-fil-markets v1.5.0
|
||||
github.com/filecoin-project/go-data-transfer v1.7.0
|
||||
github.com/filecoin-project/go-fil-markets v1.6.0-rc1.0.20210715124641-2412ccd89114
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec
|
||||
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
||||
github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4
|
||||
github.com/filecoin-project/lotus v1.10.1-0.20210715125135-ed058ae1936d
|
||||
github.com/filecoin-project/specs-actors v0.9.14
|
||||
github.com/google/uuid v1.1.2
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/hashicorp/go-multierror v1.1.0
|
||||
github.com/influxdata/influxdb v1.8.3 // indirect
|
||||
github.com/ipfs/go-cid v0.0.7
|
||||
github.com/ipfs/go-cid v0.0.8-0.20210702173502-41f2377d9672
|
||||
github.com/ipfs/go-datastore v0.4.5
|
||||
github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17 // indirect
|
||||
github.com/ipfs/go-ipfs-files v0.0.8
|
||||
github.com/ipfs/go-ipld-format v0.2.0
|
||||
github.com/ipfs/go-log/v2 v2.1.3
|
||||
github.com/ipfs/go-log/v2 v2.3.0
|
||||
github.com/ipfs/go-merkledag v0.3.2
|
||||
github.com/ipfs/go-unixfs v0.2.4
|
||||
github.com/ipfs/go-unixfs v0.2.6
|
||||
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d
|
||||
github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c
|
||||
github.com/libp2p/go-libp2p v0.14.2
|
||||
|
@ -25,6 +25,7 @@ contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
@ -175,6 +176,7 @@ github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmf
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
|
||||
@ -263,6 +265,8 @@ github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGj
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
|
||||
github.com/filecoin-project/dagstore v0.1.0 h1:lENA+8LlO2TtGBTP2MzZGF3kmjmzE9hB7hZ+bDGsnPY=
|
||||
github.com/filecoin-project/dagstore v0.1.0/go.mod h1:cqqORk5fbkKVwwZkFk3D7XfeLpsTbWkX5Uj1GrsBOmM=
|
||||
github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
|
||||
github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM=
|
||||
github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
|
||||
@ -278,22 +282,22 @@ github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW
|
||||
github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8=
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg=
|
||||
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U=
|
||||
github.com/filecoin-project/go-commp-utils v0.1.0/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U=
|
||||
github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0=
|
||||
github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U=
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
|
||||
github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo=
|
||||
github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o=
|
||||
github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc=
|
||||
github.com/filecoin-project/go-data-transfer v1.7.0 h1:mFRn+UuTdPROmhplLSekzd4rAs9ug8ubtSY4nw9wYkU=
|
||||
github.com/filecoin-project/go-data-transfer v1.7.0/go.mod h1:GLRr5BmLEqsLwXfiRDG7uJvph22KGL2M4iOuF8EINaU=
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ=
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
|
||||
github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k=
|
||||
github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk=
|
||||
github.com/filecoin-project/go-fil-markets v1.6.0-rc1.0.20210715124641-2412ccd89114 h1:uEJghQAwCTCPpR/aQLGvnqahWPDOLYL4jnYsdeItsKc=
|
||||
github.com/filecoin-project/go-fil-markets v1.6.0-rc1.0.20210715124641-2412ccd89114/go.mod h1:iegdHk34YkHHpgGVB/dKij1emfhoTb2lat80WMWw3Ag=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
|
||||
@ -323,15 +327,14 @@ github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/
|
||||
github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg=
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8=
|
||||
github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4 h1:u5/uky+PdeaGuEGsExtVP8UUB8No/e873xjqcb7h3CM=
|
||||
github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4/go.mod h1:8ooe5Rzw80rJL0br81A8NNiwZ4BUVzPRwAnDxUG4E7g=
|
||||
github.com/filecoin-project/lotus v1.10.1-0.20210715125135-ed058ae1936d h1:hGVeAKlfdyk6cjiU/vK8pl9+Oj8OKM4PLt3j6cAGMvg=
|
||||
github.com/filecoin-project/lotus v1.10.1-0.20210715125135-ed058ae1936d/go.mod h1:ZU8NxaMnfUp5uPL+8sYwBsL2qQX6ZxeIEzVz76soAPE=
|
||||
github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4=
|
||||
github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
|
||||
github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
|
||||
github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY=
|
||||
github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc=
|
||||
@ -384,6 +387,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
@ -468,8 +472,9 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@ -608,8 +613,9 @@ github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj
|
||||
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
|
||||
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
|
||||
github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
||||
github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY=
|
||||
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
||||
github.com/ipfs/go-cid v0.0.8-0.20210702173502-41f2377d9672 h1:PabVicIEIt7qUwx5gu80wZsALHUZ4Zux37M+x0n/Erk=
|
||||
github.com/ipfs/go-cid v0.0.8-0.20210702173502-41f2377d9672/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o=
|
||||
github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo=
|
||||
github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s=
|
||||
github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
@ -650,9 +656,8 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28
|
||||
github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
|
||||
github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0=
|
||||
github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY=
|
||||
github.com/ipfs/go-graphsync v0.6.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk=
|
||||
github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17 h1:rOoF88dVuDGbIx7idSdimN7JvXriyOIT96WD3eX9sHA=
|
||||
github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17/go.mod h1:5WyaeigpNdpiYQuW2vwpuecOoEfB4h747ZGEOKmAGTg=
|
||||
github.com/ipfs/go-graphsync v0.6.4 h1:g6wFRK2BkLPnx8nfoSdnokp5gtpuGyWZjbqI6q3NGb8=
|
||||
github.com/ipfs/go-graphsync v0.6.4/go.mod h1:5WyaeigpNdpiYQuW2vwpuecOoEfB4h747ZGEOKmAGTg=
|
||||
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
|
||||
@ -718,8 +723,9 @@ github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSI
|
||||
github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I=
|
||||
github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk=
|
||||
github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A=
|
||||
github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY=
|
||||
github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs=
|
||||
github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8=
|
||||
github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo=
|
||||
github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
|
||||
github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
|
||||
github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
|
||||
@ -728,8 +734,9 @@ github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscw
|
||||
github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
|
||||
github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
|
||||
github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
|
||||
github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk=
|
||||
github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g=
|
||||
github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU=
|
||||
github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g=
|
||||
github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA=
|
||||
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
||||
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||
@ -752,8 +759,9 @@ github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC
|
||||
github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8=
|
||||
github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k=
|
||||
github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k=
|
||||
github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo=
|
||||
github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw=
|
||||
github.com/ipfs/go-unixfs v0.2.6 h1:gq3U3T2vh8x6tXhfo3uSO3n+2z4yW0tYtNgVP/3sIyA=
|
||||
github.com/ipfs/go-unixfs v0.2.6/go.mod h1:GTTzQvaZsTZARdNkkdjDKFFnBhmO3e5mIM1PkH/x4p0=
|
||||
github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E=
|
||||
github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0=
|
||||
github.com/ipfs/interface-go-ipfs-core v0.2.3 h1:E6uQ+1fJjkxJWlL9lAE72a5FWeyeeNL3GitLy8+jq3Y=
|
||||
@ -766,6 +774,9 @@ github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBH
|
||||
github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw=
|
||||
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d h1:iphSzTuPqyDgH7WUVZsdqUnQNzYgIblsVr1zhVNA33U=
|
||||
github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ=
|
||||
github.com/ipld/go-car/v2 v2.0.0-20210708104948-d79de78d9567/go.mod h1:Ueq4zx/SNx7yHwmfr9xKlKpXxRCMM6wyqC8B0rv9oig=
|
||||
github.com/ipld/go-car/v2 v2.0.0-20210715123707-a315bb047f6b h1:jr7cFCEeu+rDhkivLTI5BX1JrPNLvzYtsOpIHAcfdR8=
|
||||
github.com/ipld/go-car/v2 v2.0.0-20210715123707-a315bb047f6b/go.mod h1:0nAH3QhJOua+Dz6SkD6zOYtoZMNCJSDHY4IrbYe3AQs=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8=
|
||||
github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM=
|
||||
@ -842,7 +853,10 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@ -1017,6 +1031,7 @@ github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFx
|
||||
github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ=
|
||||
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
|
||||
github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM=
|
||||
github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE=
|
||||
github.com/libp2p/go-libp2p-noise v0.2.0 h1:wmk5nhB9a2w2RxMOyvsoKjizgJOEaJdfAakr0jN8gds=
|
||||
github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q=
|
||||
github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo=
|
||||
@ -1111,6 +1126,7 @@ github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU=
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4=
|
||||
github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30=
|
||||
github.com/libp2p/go-libp2p-yamux v0.4.1/go.mod h1:FA/NjRYRVNjqOzpGuGqcruH7jAU2mYIjtKBicVOL3dc=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE=
|
||||
@ -1227,15 +1243,16 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
|
||||
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
@ -1267,8 +1284,9 @@ github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+
|
||||
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
@ -1327,6 +1345,8 @@ github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g
|
||||
github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
|
||||
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
|
||||
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
||||
github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae h1:wfljHPpiR0UDOjeqld9ds0Zxl3Nt/j+0wnvyBc01JgY=
|
||||
github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ=
|
||||
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
|
||||
github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po=
|
||||
github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM=
|
||||
@ -1334,8 +1354,9 @@ github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa
|
||||
github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||
github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I=
|
||||
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||
github.com/multiformats/go-multihash v0.0.15 h1:hWOPdrNqDjwHDx82vsYGSDZNyktOJJ2dzZJzFkOV1jM=
|
||||
github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg=
|
||||
github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
@ -1360,6 +1381,7 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA=
|
||||
github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28=
|
||||
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=
|
||||
@ -1415,6 +1437,8 @@ github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChl
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk=
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw=
|
||||
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
@ -1490,6 +1514,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI=
|
||||
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
@ -1534,8 +1560,9 @@ github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81a
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
|
||||
@ -1572,7 +1599,6 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
|
||||
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
@ -1628,6 +1654,8 @@ github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMU
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM=
|
||||
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY=
|
||||
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM=
|
||||
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0=
|
||||
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
||||
@ -1708,8 +1736,14 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
|
||||
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
|
||||
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
|
||||
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
|
||||
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
@ -1774,8 +1808,10 @@ golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf h1:B2n+Zi5QeYRDAEodEu72OS36gmTWjgpXr2+cWcBW90o=
|
||||
golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@ -1783,13 +1819,15 @@ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc=
|
||||
golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/exp v0.0.0-20210615023648-acb5c1269671 h1:ddvpKwqE7dm58PoWjRCmaCiA3DANEW0zWGfNYQD212Y=
|
||||
golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
@ -1807,13 +1845,16 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -1881,6 +1922,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -1953,10 +1995,14 @@ golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 h1:kHSDPqCtsHZOg0nVylfTo20DDhE9gG4Y0jn7hKQ0QAM=
|
||||
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@ -2002,12 +2048,12 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
@ -2016,8 +2062,9 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -2110,6 +2157,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
@ -2145,8 +2193,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
|
||||
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||
modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=
|
||||
|
Loading…
Reference in New Issue
Block a user