Merge pull request #8053 from filecoin-project/jen/v15
chore: build: v1.14.0 -> master
This commit is contained in:
commit
e435b42426
@ -113,6 +113,8 @@ type StorageMiner interface {
|
||||
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
|
||||
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
|
||||
// SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
|
||||
SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin
|
||||
|
||||
// WorkerConnect tells the node to connect to workers RPC
|
||||
WorkerConnect(context.Context, string) error //perm:admin retry:true
|
||||
|
@ -757,6 +757,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
|
||||
|
||||
SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
|
||||
|
||||
SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"`
|
||||
|
||||
SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"`
|
||||
@ -4471,6 +4473,17 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
if s.Internal.SectorAbortUpgrade == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.SectorAbortUpgrade(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
|
||||
if s.Internal.SectorAddPieceToAny == nil {
|
||||
return *new(SectorOffset), ErrNotSupported
|
||||
|
@ -1,2 +1,2 @@
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBdRCBLUeKvoy22u5DcXs61adFn31v8WWCZgmBjDCjbsC
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWDUQJBA18njjXnG9RtLxoN3muvdU7PEy55QorUEsdAqdy
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWFHDtFx7CVTy4xoCDutVo1cScvSnQjDeaM8UzwVS1qwkh
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWKt8cwpkiumkT8x32c3YFxsPRwhV5J8hCYPn9mhUmcAXt
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -54,8 +54,8 @@ const UpgradeHyperdriveHeight = 420
|
||||
|
||||
const UpgradeChocolateHeight = 312746
|
||||
|
||||
// 2022-02-08T19:23:00Z
|
||||
const UpgradeOhSnapHeight = 676246
|
||||
// 2022-02-10T19:23:00Z
|
||||
const UpgradeOhSnapHeight = 682006
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
|
||||
|
@ -55,6 +55,7 @@ var sectorsCmd = &cli.Command{
|
||||
sectorsTerminateCmd,
|
||||
sectorsRemoveCmd,
|
||||
sectorsSnapUpCmd,
|
||||
sectorsSnapAbortCmd,
|
||||
sectorsMarkForUpgradeCmd,
|
||||
sectorsStartSealCmd,
|
||||
sectorsSealDelayCmd,
|
||||
@ -1520,6 +1521,31 @@ var sectorsSnapUpCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsSnapAbortCmd = &cli.Command{
|
||||
Name: "abort-upgrade",
|
||||
Usage: "Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before",
|
||||
ArgsUsage: "<sectorNum>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse sector number: %w", err)
|
||||
}
|
||||
|
||||
return nodeApi.SectorAbortUpgrade(ctx, abi.SectorNumber(id))
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsMarkForUpgradeCmd = &cli.Command{
|
||||
Name: "mark-for-upgrade",
|
||||
Usage: "Mark a committed capacity sector for replacement by a sector with deals",
|
||||
|
@ -117,6 +117,7 @@
|
||||
* [SealingAbort](#SealingAbort)
|
||||
* [SealingSchedDiag](#SealingSchedDiag)
|
||||
* [Sector](#Sector)
|
||||
* [SectorAbortUpgrade](#SectorAbortUpgrade)
|
||||
* [SectorAddPieceToAny](#SectorAddPieceToAny)
|
||||
* [SectorCommitFlush](#SectorCommitFlush)
|
||||
* [SectorCommitPending](#SectorCommitPending)
|
||||
@ -2499,6 +2500,21 @@ Response: `{}`
|
||||
## Sector
|
||||
|
||||
|
||||
### SectorAbortUpgrade
|
||||
SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
9
|
||||
]
|
||||
```
|
||||
|
||||
Response: `{}`
|
||||
|
||||
### SectorAddPieceToAny
|
||||
Add piece to an open sector. If no sectors with enough space are open,
|
||||
either a new sector will be created, or this call will block until more
|
||||
|
@ -1580,6 +1580,7 @@ COMMANDS:
|
||||
terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)
|
||||
remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))
|
||||
snap-up Mark a committed capacity sector to be filled with deals
|
||||
abort-upgrade Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before
|
||||
mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals
|
||||
seal Manually start sealing a sector (filling any unused space with junk)
|
||||
set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts
|
||||
@ -1815,6 +1816,19 @@ OPTIONS:
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner sectors abort-upgrade
|
||||
```
|
||||
NAME:
|
||||
lotus-miner sectors abort-upgrade - Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before
|
||||
|
||||
USAGE:
|
||||
lotus-miner sectors abort-upgrade [command options] <sectorNum>
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
### lotus-miner sectors mark-for-upgrade
|
||||
```
|
||||
NAME:
|
||||
|
17
extern/storage-sealing/checks.go
vendored
17
extern/storage-sealing/checks.go
vendored
@ -20,6 +20,7 @@ import (
|
||||
// We should implement some wait-for-api logic
|
||||
type ErrApi struct{ error }
|
||||
|
||||
type ErrNoDeals struct{ error }
|
||||
type ErrInvalidDeals struct{ error }
|
||||
type ErrInvalidPiece struct{ error }
|
||||
type ErrExpiredDeals struct{ error }
|
||||
@ -38,12 +39,14 @@ type ErrCommitWaitFailed struct{ error }
|
||||
type ErrBadRU struct{ error }
|
||||
type ErrBadPR struct{ error }
|
||||
|
||||
func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error {
|
||||
func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI, mustHaveDeals bool) error {
|
||||
tok, height, err := api.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return &ErrApi{xerrors.Errorf("getting chain head: %w", err)}
|
||||
}
|
||||
|
||||
dealCount := 0
|
||||
|
||||
for i, p := range si.Pieces {
|
||||
// if no deal is associated with the piece, ensure that we added it as
|
||||
// filler (i.e. ensure that it has a zero PieceCID)
|
||||
@ -55,6 +58,8 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api
|
||||
continue
|
||||
}
|
||||
|
||||
dealCount++
|
||||
|
||||
proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok)
|
||||
if err != nil {
|
||||
return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)}
|
||||
@ -77,13 +82,17 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api
|
||||
}
|
||||
}
|
||||
|
||||
if mustHaveDeals && dealCount <= 0 {
|
||||
return &ErrNoDeals{(xerrors.Errorf("sector %d must have deals, but does not", si.SectorNumber))}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkPrecommit checks that data commitment generated in the sealing process
|
||||
// matches pieces, and that the seal ticket isn't expired
|
||||
func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, height abi.ChainEpoch, api SealingAPI) (err error) {
|
||||
if err := checkPieces(ctx, maddr, si, api); err != nil {
|
||||
if err := checkPieces(ctx, maddr, si, api, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -184,7 +193,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
|
||||
return &ErrInvalidProof{xerrors.New("invalid proof (compute error?)")}
|
||||
}
|
||||
|
||||
if err := checkPieces(ctx, m.maddr, si, m.Api); err != nil {
|
||||
if err := checkPieces(ctx, m.maddr, si, m.Api, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -194,7 +203,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
|
||||
// check that sector info is good after running a replica update
|
||||
func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, api SealingAPI) error {
|
||||
|
||||
if err := checkPieces(ctx, maddr, si, api); err != nil {
|
||||
if err := checkPieces(ctx, maddr, si, api, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if !si.CCUpdate {
|
||||
|
7
extern/storage-sealing/fsm.go
vendored
7
extern/storage-sealing/fsm.go
vendored
@ -137,27 +137,32 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
SnapDealsWaitDeals: planOne(
|
||||
on(SectorAddPiece{}, SnapDealsAddPiece),
|
||||
on(SectorStartPacking{}, SnapDealsPacking),
|
||||
on(SectorAbortUpgrade{}, AbortUpgrade),
|
||||
),
|
||||
SnapDealsAddPiece: planOne(
|
||||
on(SectorPieceAdded{}, SnapDealsWaitDeals),
|
||||
apply(SectorStartPacking{}),
|
||||
apply(SectorAddPiece{}),
|
||||
on(SectorAddPieceFailed{}, SnapDealsAddPieceFailed),
|
||||
on(SectorAbortUpgrade{}, AbortUpgrade),
|
||||
),
|
||||
SnapDealsPacking: planOne(
|
||||
on(SectorPacked{}, UpdateReplica),
|
||||
on(SectorAbortUpgrade{}, AbortUpgrade),
|
||||
),
|
||||
UpdateReplica: planOne(
|
||||
on(SectorReplicaUpdate{}, ProveReplicaUpdate),
|
||||
on(SectorUpdateReplicaFailed{}, ReplicaUpdateFailed),
|
||||
on(SectorDealsExpired{}, SnapDealsDealsExpired),
|
||||
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
|
||||
on(SectorAbortUpgrade{}, AbortUpgrade),
|
||||
),
|
||||
ProveReplicaUpdate: planOne(
|
||||
on(SectorProveReplicaUpdate{}, SubmitReplicaUpdate),
|
||||
on(SectorProveReplicaUpdateFailed{}, ReplicaUpdateFailed),
|
||||
on(SectorDealsExpired{}, SnapDealsDealsExpired),
|
||||
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
|
||||
on(SectorAbortUpgrade{}, AbortUpgrade),
|
||||
),
|
||||
SubmitReplicaUpdate: planOne(
|
||||
on(SectorReplicaUpdateSubmitted{}, ReplicaUpdateWait),
|
||||
@ -238,6 +243,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
on(SectorRetryWaitDeals{}, SnapDealsWaitDeals),
|
||||
apply(SectorStartPacking{}),
|
||||
apply(SectorAddPiece{}),
|
||||
on(SectorAbortUpgrade{}, AbortUpgrade),
|
||||
),
|
||||
SnapDealsDealsExpired: planOne(
|
||||
on(SectorAbortUpgrade{}, AbortUpgrade),
|
||||
@ -256,6 +262,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
on(SectorRetryProveReplicaUpdate{}, ProveReplicaUpdate),
|
||||
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
|
||||
on(SectorDealsExpired{}, SnapDealsDealsExpired),
|
||||
on(SectorAbortUpgrade{}, AbortUpgrade),
|
||||
),
|
||||
ReleaseSectorKeyFailed: planOne(
|
||||
on(SectorUpdateActive{}, ReleaseSectorKey),
|
||||
|
7
extern/storage-sealing/input.go
vendored
7
extern/storage-sealing/input.go
vendored
@ -545,6 +545,13 @@ func (m *Sealing) StartPacking(sid abi.SectorNumber) error {
|
||||
return m.sectors.Send(uint64(sid), SectorStartPacking{})
|
||||
}
|
||||
|
||||
func (m *Sealing) AbortUpgrade(sid abi.SectorNumber) error {
|
||||
m.startupWait.Wait()
|
||||
|
||||
log.Infow("aborting upgrade of sector", "sector", sid, "trigger", "user")
|
||||
return m.sectors.Send(uint64(sid), SectorAbortUpgrade{xerrors.New("triggered by user")})
|
||||
}
|
||||
|
||||
func proposalCID(deal api.PieceDealInfo) cid.Cid {
|
||||
pc, err := deal.DealProposal.Cid()
|
||||
if err != nil {
|
||||
|
2
extern/storage-sealing/states_failed.go
vendored
2
extern/storage-sealing/states_failed.go
vendored
@ -499,7 +499,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx statemachine.Context, sector SectorIn
|
||||
}
|
||||
|
||||
func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error {
|
||||
return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{})
|
||||
return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{xerrors.New("failed recovering deal ids")})
|
||||
}
|
||||
|
||||
func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) {
|
||||
|
11
extern/storage-sealing/states_replica_update.go
vendored
11
extern/storage-sealing/states_replica_update.go
vendored
@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
|
||||
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state
|
||||
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api, true); err != nil { // Sanity check state
|
||||
return handleErrors(ctx, err, sector)
|
||||
}
|
||||
out, err := m.sealer.ReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.pieceInfos())
|
||||
@ -56,7 +56,7 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect
|
||||
return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (1) failed: %w", err)})
|
||||
}
|
||||
|
||||
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state
|
||||
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api, true); err != nil { // Sanity check state
|
||||
return handleErrors(ctx, err, sector)
|
||||
}
|
||||
|
||||
@ -78,10 +78,6 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state
|
||||
return handleErrors(ctx, err, sector)
|
||||
}
|
||||
|
||||
if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil {
|
||||
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
|
||||
}
|
||||
@ -215,8 +211,7 @@ func (m *Sealing) handleReplicaUpdateWait(ctx statemachine.Context, sector Secto
|
||||
}
|
||||
|
||||
if !si.SealedCID.Equals(*sector.UpdateSealed) {
|
||||
log.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID)
|
||||
return ctx.Send(SectorAbortUpgrade{})
|
||||
return ctx.Send(SectorAbortUpgrade{xerrors.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID)})
|
||||
}
|
||||
return ctx.Send(SectorReplicaUpdateLanded{})
|
||||
}
|
||||
|
2
extern/storage-sealing/states_sealing.go
vendored
2
extern/storage-sealing/states_sealing.go
vendored
@ -198,7 +198,7 @@ func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) e
|
||||
}
|
||||
|
||||
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
|
||||
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state
|
||||
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api, false); err != nil { // Sanity check state
|
||||
switch err.(type) {
|
||||
case *ErrApi:
|
||||
log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err)
|
||||
|
@ -63,7 +63,6 @@ func runTestCCUpgrade(t *testing.T) *kit.TestFullNode {
|
||||
}
|
||||
waitForSectorActive(ctx, t, CCUpgrade, client, maddr)
|
||||
|
||||
//stm: @SECTOR_CC_UPGRADE_001
|
||||
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -391,6 +391,10 @@ func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.Sect
|
||||
return sm.Miner.MarkForUpgrade(ctx, id, snap)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorAbortUpgrade(ctx context.Context, number abi.SectorNumber) error {
|
||||
return sm.Miner.SectorAbortUpgrade(number)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) {
|
||||
return sm.Miner.CommitFlush(ctx)
|
||||
}
|
||||
|
@ -86,6 +86,10 @@ func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool {
|
||||
return m.sealing.IsMarkedForUpgrade(id)
|
||||
}
|
||||
|
||||
func (m *Miner) SectorAbortUpgrade(sectorNum abi.SectorNumber) error {
|
||||
return m.sealing.AbortUpgrade(sectorNum)
|
||||
}
|
||||
|
||||
func (m *Miner) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) {
|
||||
return m.sealing.SectorAddPieceToAny(ctx, size, r, d)
|
||||
}
|
||||
|
@ -1,4 +1,54 @@
|
||||
{
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.params": {
|
||||
"cid": "Qma5WL6abSqYg9uUQAZ3EHS286bsNsha7oAGsJBD48Bq2q",
|
||||
"digest": "c3ad7bb549470b82ad52ed070aebb4f4",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.vk": {
|
||||
"cid": "QmUa7f9JtJMsqJJ3s3ZXk6WyF4xJLE8FiqYskZGgk8GCDv",
|
||||
"digest": "994c5b7d450ca9da348c910689f2dc7f",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.params": {
|
||||
"cid": "QmQiT4qBGodrVNEgVTDXxBNDdPbaD8Ag7Sx3ZTq1zHX79S",
|
||||
"digest": "5aedd2cf3e5c0a15623d56a1b43110ad",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.vk": {
|
||||
"cid": "QmdcpKUQvHM8RFRVKbk1yHfEqMcBzhtFWKRp9SNEmWq37i",
|
||||
"digest": "abd80269054d391a734febdac0d2e687",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.params": {
|
||||
"cid": "QmYM6Hg7mjmvA3ZHTsqkss1fkdyDju5dDmLiBZGJ5pz9y9",
|
||||
"digest": "311f92a3e75036ced01b1c0025f1fa0c",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.vk": {
|
||||
"cid": "QmaQsTLL3nc5dw6wAvaioJSBfd1jhQrA2o6ucFf7XeV74P",
|
||||
"digest": "eadad9784969890d30f2749708c79771",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": {
|
||||
"cid": "QmNPc75iEfcahCwNKdqnWLtxnjspUGGR4iscjiz3wP3RtS",
|
||||
"digest": "1b3cfd761a961543f9eb273e435a06a2",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": {
|
||||
"cid": "QmdFFUe1gcz9MMHc6YW8aoV48w4ckvcERjt7PkydQAMfCN",
|
||||
"digest": "3a6941983754737fde880d29c7094905",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": {
|
||||
"cid": "QmUB6xTVjzBQGuDNeyJMrrJ1byk58vhPm8eY2Lv9pgwanp",
|
||||
"digest": "1a392e7b759fb18e036c7559b5ece816",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": {
|
||||
"cid": "Qmd794Jty7k26XJ8Eg4NDEks65Qk8G4GVfGkwqvymv8HAg",
|
||||
"digest": "80e366df2f1011953c2d01c7b7c9ee8e",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": {
|
||||
"cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR",
|
||||
"digest": "7610b9f82bfc88405b7a832b651ce2f6",
|
||||
|
Loading…
Reference in New Issue
Block a user