Annotate client storage deals feature
This commit is contained in:
parent
2f1f35cc71
commit
3e32aa896c
@ -19,6 +19,7 @@ func TestStorageDealMissingBlock(t *testing.T) {
|
|||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// enable 512MiB proofs so we can conduct larger transfers.
|
// enable 512MiB proofs so we can conduct larger transfers.
|
||||||
|
@ -19,6 +19,7 @@ func TestMaxStagingDeals(t *testing.T) {
|
|||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// enable 512MiB proofs so we can conduct larger transfers.
|
// enable 512MiB proofs so we can conduct larger transfers.
|
||||||
|
@ -23,6 +23,7 @@ func TestOfflineDealFlow(t *testing.T) {
|
|||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_DATA_CALCULATE_COMMP_001, @CLIENT_DATA_GENERATE_CAR_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001
|
||||||
runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) {
|
runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs
|
client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs
|
||||||
@ -66,6 +67,7 @@ func TestOfflineDealFlow(t *testing.T) {
|
|||||||
|
|
||||||
proposalCid := dh.StartDeal(ctx, dp)
|
proposalCid := dh.StartDeal(ctx, dp)
|
||||||
|
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||||
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
||||||
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -21,6 +21,7 @@ func TestDealPadding(t *testing.T) {
|
|||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_DATA_GET_DEAL_PIECE_CID_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
var blockTime = 250 * time.Millisecond
|
var blockTime = 250 * time.Millisecond
|
||||||
@ -64,6 +65,7 @@ func TestDealPadding(t *testing.T) {
|
|||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||||
di, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
di, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, di.PieceCID.Equals(pcid))
|
require.True(t, di.PieceCID.Equals(pcid))
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -42,6 +43,7 @@ func TestPartialRetrieval(t *testing.T) {
|
|||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_RETRIEVAL_RETRIEVE_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
policy.SetPreCommitChallengeDelay(2)
|
policy.SetPreCommitChallengeDelay(2)
|
||||||
@ -79,6 +81,7 @@ func TestPartialRetrieval(t *testing.T) {
|
|||||||
}
|
}
|
||||||
proposalCid := dh.StartDeal(ctx, dp)
|
proposalCid := dh.StartDeal(ctx, dp)
|
||||||
|
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||||
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
||||||
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -87,12 +90,14 @@ func TestPartialRetrieval(t *testing.T) {
|
|||||||
return cd.State == storagemarket.StorageDealCheckForAcceptance
|
return cd.State == storagemarket.StorageDealCheckForAcceptance
|
||||||
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
||||||
|
|
||||||
|
//stm: @MINER_IMPORT_DEAL_DATA_001
|
||||||
err = miner.DealsImportData(ctx, *proposalCid, sourceCar)
|
err = miner.DealsImportData(ctx, *proposalCid, sourceCar)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Wait for the deal to be published, we should be able to start retrieval right away
|
// Wait for the deal to be published, we should be able to start retrieval right away
|
||||||
dh.WaitDealPublished(ctx, proposalCid)
|
dh.WaitDealPublished(ctx, proposalCid)
|
||||||
|
|
||||||
|
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||||
offers, err := client.ClientFindData(ctx, carRoot, nil)
|
offers, err := client.ClientFindData(ctx, carRoot, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEmpty(t, offers, "no offers")
|
require.NotEmpty(t, offers, "no offers")
|
||||||
|
@ -50,10 +50,12 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
|||||||
_, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
|
_, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
|
||||||
require.Equal(t, res1.Root, res2.Root)
|
require.Equal(t, res1.Root, res2.Root)
|
||||||
|
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||||
// Retrieval
|
// Retrieval
|
||||||
dealInfo, err := client.ClientGetDealInfo(ctx, *deal1)
|
dealInfo, err := client.ClientGetDealInfo(ctx, *deal1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||||
// fetch quote -> zero for unsealed price since unsealed file already exists.
|
// fetch quote -> zero for unsealed price since unsealed file already exists.
|
||||||
offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -79,6 +81,7 @@ iLoop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||||
// get retrieval quote -> zero for unsealed price as unsealed file exists.
|
// get retrieval quote -> zero for unsealed price as unsealed file exists.
|
||||||
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -98,6 +101,7 @@ iLoop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||||
// fetch quote -> non-zero for unseal price as we no more unsealed files.
|
// fetch quote -> non-zero for unseal price as we no more unsealed files.
|
||||||
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -33,6 +33,7 @@ func TestDealsRetryLackOfFunds(t *testing.T) {
|
|||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||||
policy.SetPreCommitChallengeDelay(5)
|
policy.SetPreCommitChallengeDelay(5)
|
||||||
@ -116,6 +117,7 @@ func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) {
|
|||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||||
policy.SetPreCommitChallengeDelay(5)
|
policy.SetPreCommitChallengeDelay(5)
|
||||||
@ -196,6 +198,7 @@ func TestDealsRetryLackOfFunds_belowLimit(t *testing.T) {
|
|||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||||
policy.SetPreCommitChallengeDelay(5)
|
policy.SetPreCommitChallengeDelay(5)
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
var testdata embed.FS
|
var testdata embed.FS
|
||||||
|
|
||||||
func TestImportLocal(t *testing.T) {
|
func TestImportLocal(t *testing.T) {
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_IMPORT_LOCAL_001, @CLIENT_RETRIEVAL_FIND_001
|
||||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
im := imports.NewManager(ds, dir)
|
im := imports.NewManager(ds, dir)
|
||||||
@ -45,6 +46,7 @@ func TestImportLocal(t *testing.T) {
|
|||||||
b, err := testdata.ReadFile("testdata/payload.txt")
|
b, err := testdata.ReadFile("testdata/payload.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
root, err := a.ClientImportLocal(ctx, bytes.NewReader(b))
|
root, err := a.ClientImportLocal(ctx, bytes.NewReader(b))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, cid.Undef, root)
|
require.NotEqual(t, cid.Undef, root)
|
||||||
@ -57,6 +59,7 @@ func TestImportLocal(t *testing.T) {
|
|||||||
require.Equal(t, root, *it.Root)
|
require.Equal(t, root, *it.Root)
|
||||||
require.True(t, strings.HasPrefix(it.CARPath, dir))
|
require.True(t, strings.HasPrefix(it.CARPath, dir))
|
||||||
|
|
||||||
|
//stm @CLIENT_DATA_HAS_LOCAL_001
|
||||||
local, err := a.ClientHasLocal(ctx, root)
|
local, err := a.ClientHasLocal(ctx, root)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, local)
|
require.True(t, local)
|
||||||
@ -69,6 +72,7 @@ func TestImportLocal(t *testing.T) {
|
|||||||
// retrieve as UnixFS.
|
// retrieve as UnixFS.
|
||||||
out1 := filepath.Join(dir, "retrieval1.data") // as unixfs
|
out1 := filepath.Join(dir, "retrieval1.data") // as unixfs
|
||||||
out2 := filepath.Join(dir, "retrieval2.data") // as car
|
out2 := filepath.Join(dir, "retrieval2.data") // as car
|
||||||
|
//stm: @CLIENT_RETRIEVAL_RETRIEVE_001
|
||||||
err = a.ClientRetrieve(ctx, order, &api.FileRef{
|
err = a.ClientRetrieve(ctx, order, &api.FileRef{
|
||||||
Path: out1,
|
Path: out1,
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user