feat: curio: allow multiple pieces per sector (#11935)
* multiple piece per sector, DDO deals * in memory to DB * sql parser * add seal command * multi piece TreeD * redo filler pieces * remove psql exception handling * fix deal sectors porep * fix tests * ddo deals * lower SDR cpu for test * devnet cpu 0 * get params for itest * fix itest sector size * revert sdr devnet cpu * improve SectorStatus API * account for verified constraints
This commit is contained in:
parent
967524aa83
commit
7e3846c669
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@ -69,6 +69,7 @@ jobs:
|
||||
"itest-sector_import_simple": ["self-hosted", "linux", "x64", "2xlarge"],
|
||||
"itest-wdpost": ["self-hosted", "linux", "x64", "2xlarge"],
|
||||
"unit-storage": ["self-hosted", "linux", "x64", "2xlarge"],
|
||||
"itest-curio": ["self-hosted", "linux", "x64", "2xlarge"],
|
||||
|
||||
"itest-batch_deal": ["self-hosted", "linux", "x64", "xlarge"],
|
||||
"itest-cli": ["self-hosted", "linux", "x64", "xlarge"],
|
||||
@ -144,7 +145,8 @@ jobs:
|
||||
"itest-worker",
|
||||
"multicore-sdr",
|
||||
"unit-cli",
|
||||
"unit-storage"
|
||||
"unit-storage",
|
||||
"itest-curio"
|
||||
]
|
||||
run: |
|
||||
# Create a list of integration test groups
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
@ -15,7 +16,7 @@ import (
|
||||
type Curio interface {
|
||||
Version(context.Context) (Version, error) //perm:admin
|
||||
|
||||
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece PieceDealInfo, rawSize int64, source url.URL, header http.Header) (SectorOffset, error) //perm:write
|
||||
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (SectorOffset, error) //perm:write
|
||||
|
||||
StorageInit(ctx context.Context, path string, opts storiface.LocalStorageMeta) error //perm:admin
|
||||
StorageAddLocal(ctx context.Context, path string) error //perm:admin
|
||||
|
@ -44,6 +44,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/piece"
|
||||
lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||
@ -120,7 +121,7 @@ type CurioStruct struct {
|
||||
}
|
||||
|
||||
type CurioMethods struct {
|
||||
AllocatePieceToSector func(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) `perm:"write"`
|
||||
AllocatePieceToSector func(p0 context.Context, p1 address.Address, p2 lpiece.PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) `perm:"write"`
|
||||
|
||||
LogList func(p0 context.Context) ([]string, error) `perm:"read"`
|
||||
|
||||
@ -1498,14 +1499,14 @@ func (s *CommonStub) Version(p0 context.Context) (APIVersion, error) {
|
||||
return *new(APIVersion), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *CurioStruct) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
|
||||
func (s *CurioStruct) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 lpiece.PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
|
||||
if s.Internal.AllocatePieceToSector == nil {
|
||||
return *new(SectorOffset), ErrNotSupported
|
||||
}
|
||||
return s.Internal.AllocatePieceToSector(p0, p1, p2, p3, p4, p5)
|
||||
}
|
||||
|
||||
func (s *CurioStub) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
|
||||
func (s *CurioStub) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 lpiece.PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
|
||||
return *new(SectorOffset), ErrNotSupported
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -242,7 +242,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4416"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4417"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -473,7 +473,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4427"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4428"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -572,7 +572,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4438"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4439"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -604,7 +604,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4449"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4450"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -710,7 +710,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4460"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4461"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -803,7 +803,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4471"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4472"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -887,7 +887,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4482"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4483"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -987,7 +987,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4493"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4494"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1043,7 +1043,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4504"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4505"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1116,7 +1116,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4515"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4516"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1189,7 +1189,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4526"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4527"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1236,7 +1236,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4537"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4538"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1268,7 +1268,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4548"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4549"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1305,7 +1305,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4570"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4571"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1352,7 +1352,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4581"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4582"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1392,7 +1392,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4592"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4593"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1439,7 +1439,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4603"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4604"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1494,7 +1494,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4614"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4615"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1523,7 +1523,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4625"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4626"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1660,7 +1660,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4636"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4637"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1689,7 +1689,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4647"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4648"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1743,7 +1743,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4658"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4659"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1834,7 +1834,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4669"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4670"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1862,7 +1862,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4680"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4681"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1952,7 +1952,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4691"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4692"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2208,7 +2208,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4702"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4703"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2453,7 +2453,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4713"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4714"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2509,7 +2509,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4724"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4725"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2556,7 +2556,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4735"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4736"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2654,7 +2654,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4746"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4747"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2720,7 +2720,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4757"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4758"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2786,7 +2786,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4768"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4769"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2895,7 +2895,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4779"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4780"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2953,7 +2953,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4790"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4791"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3075,7 +3075,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4801"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4802"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3262,7 +3262,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4812"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4813"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3466,7 +3466,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4823"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4824"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3557,7 +3557,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4834"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4835"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3615,7 +3615,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4845"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4846"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3873,7 +3873,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4856"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4857"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4148,7 +4148,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4867"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4868"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4176,7 +4176,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4878"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4879"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4214,7 +4214,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4889"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4890"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4322,7 +4322,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4900"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4901"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4360,7 +4360,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4911"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4912"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4389,7 +4389,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4922"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4923"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4452,7 +4452,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4933"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4934"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4515,7 +4515,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4944"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4945"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4560,7 +4560,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4955"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4956"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4682,7 +4682,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4966"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4967"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4837,7 +4837,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4977"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4978"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4891,7 +4891,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4988"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4989"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4945,7 +4945,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4999"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5000"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5000,7 +5000,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5010"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5011"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5102,7 +5102,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5021"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5022"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5325,7 +5325,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5032"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5033"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5508,7 +5508,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5043"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5044"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5702,7 +5702,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5054"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5055"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5748,7 +5748,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5065"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5066"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5898,7 +5898,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5076"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5077"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6035,7 +6035,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5087"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5088"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6103,7 +6103,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5098"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5099"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6220,7 +6220,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5109"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5110"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6311,7 +6311,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5120"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5121"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6397,7 +6397,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5131"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5132"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6424,7 +6424,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5142"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5143"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6451,7 +6451,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5153"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5154"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6519,7 +6519,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5164"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5165"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7025,7 +7025,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5175"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5176"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7122,7 +7122,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5186"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5187"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7222,7 +7222,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5197"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5198"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7322,7 +7322,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5208"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5209"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7447,7 +7447,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5219"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5220"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7556,7 +7556,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5230"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5231"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7659,7 +7659,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5241"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5242"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7789,7 +7789,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5252"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5253"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7896,7 +7896,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5263"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5264"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7957,7 +7957,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5274"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5275"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8025,7 +8025,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5285"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5286"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8106,7 +8106,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5296"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5297"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8270,7 +8270,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5307"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5308"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8363,7 +8363,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5318"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5319"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8564,7 +8564,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5329"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5330"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8675,7 +8675,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5340"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5341"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8806,7 +8806,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5351"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5352"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8892,7 +8892,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5362"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5363"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8919,7 +8919,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5373"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5374"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8972,7 +8972,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5384"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5385"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9060,7 +9060,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5395"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5396"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9511,7 +9511,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5406"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5407"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9678,7 +9678,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5417"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5418"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9851,7 +9851,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5428"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5429"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9919,7 +9919,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5439"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5440"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9987,7 +9987,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5450"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5451"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10148,7 +10148,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5461"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5462"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10193,7 +10193,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5483"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5484"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10238,7 +10238,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5494"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5495"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10265,7 +10265,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5505"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5506"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -161,7 +161,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7331"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7332"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -252,7 +252,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7342"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7343"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -420,7 +420,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7353"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7354"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -447,7 +447,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7364"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7365"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -597,7 +597,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7375"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7376"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -700,7 +700,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7386"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7387"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -803,7 +803,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7397"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7398"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -925,7 +925,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7408"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7409"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1135,7 +1135,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7419"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7420"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1306,7 +1306,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7430"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7431"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3350,7 +3350,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7441"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7442"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3470,7 +3470,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7452"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7453"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3531,7 +3531,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7463"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7464"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3569,7 +3569,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7474"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7475"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3729,7 +3729,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7485"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7486"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3913,7 +3913,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7496"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7497"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4054,7 +4054,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7507"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7508"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4107,7 +4107,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7518"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7519"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4250,7 +4250,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7529"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7530"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4474,7 +4474,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7540"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7541"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4601,7 +4601,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7551"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7552"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4768,7 +4768,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7562"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7563"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4895,7 +4895,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7573"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7574"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4933,7 +4933,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7584"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7585"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4972,7 +4972,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7595"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7596"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4995,7 +4995,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7606"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7607"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5034,7 +5034,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7617"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7618"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5057,7 +5057,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7628"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7629"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5096,7 +5096,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7639"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7640"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5130,7 +5130,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7650"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7651"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5184,7 +5184,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7661"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7662"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5223,7 +5223,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7672"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7673"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5262,7 +5262,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7683"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7684"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5297,7 +5297,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7694"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7695"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5477,7 +5477,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7705"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7706"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5506,7 +5506,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7716"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7717"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5529,7 +5529,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7727"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7728"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -1268,6 +1268,11 @@ var ActorNewMinerCmd = &cli.Command{
|
||||
Name: "sector-size",
|
||||
Usage: "specify sector size to use for new miner initialisation",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "confidence",
|
||||
Usage: "number of block confirmations to wait for",
|
||||
Value: int(build.MessageConfidence),
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
|
@ -220,7 +220,8 @@ var configViewCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curioConfig, err := deps.GetConfig(cctx, db)
|
||||
layers := cctx.StringSlice("layers")
|
||||
curioConfig, err := deps.GetConfig(cctx.Context, layers, db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context,
|
||||
|
||||
if deps.Cfg == nil {
|
||||
// The config feeds into task runners & their helpers
|
||||
deps.Cfg, err = GetConfig(cctx, deps.DB)
|
||||
deps.Cfg, err = GetConfig(cctx.Context, cctx.StringSlice("layers"), deps.DB)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("populate config: %w", err)
|
||||
}
|
||||
@ -371,13 +371,13 @@ func LoadConfigWithUpgrades(text string, curioConfigWithDefaults *config.CurioCo
|
||||
}
|
||||
return meta, err
|
||||
}
|
||||
func GetConfig(cctx *cli.Context, db *harmonydb.DB) (*config.CurioConfig, error) {
|
||||
func GetConfig(ctx context.Context, layers []string, db *harmonydb.DB) (*config.CurioConfig, error) {
|
||||
curioConfig := config.DefaultCurioConfig()
|
||||
have := []string{}
|
||||
layers := append([]string{"base"}, cctx.StringSlice("layers")...) // Always stack on top of "base" layer
|
||||
layers = append([]string{"base"}, layers...) // Always stack on top of "base" layer
|
||||
for _, layer := range layers {
|
||||
text := ""
|
||||
err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text)
|
||||
err := db.QueryRow(ctx, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), sql.ErrNoRows.Error()) {
|
||||
return nil, fmt.Errorf("missing layer '%s' ", layer)
|
||||
@ -420,7 +420,9 @@ func GetDepsCLI(ctx context.Context, cctx *cli.Context) (*Deps, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg, err := GetConfig(cctx, db)
|
||||
layers := cctx.StringSlice("layers")
|
||||
|
||||
cfg, err := GetConfig(cctx.Context, layers, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -3,18 +3,27 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/cmd/curio/deps"
|
||||
"github.com/filecoin-project/lotus/curiosrc/market/lmrpc"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
)
|
||||
|
||||
var marketCmd = &cli.Command{
|
||||
Name: "market",
|
||||
Subcommands: []*cli.Command{
|
||||
marketRPCInfoCmd,
|
||||
marketSealCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -31,7 +40,9 @@ var marketRPCInfoCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
cfg, err := deps.GetConfig(cctx, db)
|
||||
layers := cctx.StringSlice("layers")
|
||||
|
||||
cfg, err := deps.GetConfig(cctx.Context, layers, db)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get config: %w", err)
|
||||
}
|
||||
@ -68,3 +79,123 @@ var marketRPCInfoCmd = &cli.Command{
|
||||
},
|
||||
Name: "rpc-info",
|
||||
}
|
||||
|
||||
var marketSealCmd = &cli.Command{
|
||||
Name: "seal",
|
||||
Usage: "start sealing a deal sector early",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "actor",
|
||||
Usage: "Specify actor address to start sealing sectors for",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "layers",
|
||||
Usage: "list of layers to be interpreted (atop defaults). Default: base",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "synthetic",
|
||||
Usage: "Use synthetic PoRep",
|
||||
Value: false, // todo implement synthetic
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
act, err := address.NewFromString(cctx.String("actor"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing --actor: %w", err)
|
||||
}
|
||||
|
||||
if cctx.Args().Len() > 1 {
|
||||
return xerrors.Errorf("specify only one sector")
|
||||
}
|
||||
|
||||
sec := cctx.Args().First()
|
||||
|
||||
sector, err := strconv.ParseUint(sec, 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse the sector number: %w", err)
|
||||
}
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
dep, err := deps.GetDepsCLI(ctx, cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(act)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner id: %w", err)
|
||||
}
|
||||
|
||||
mi, err := dep.Full.StateMinerInfo(ctx, act, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
nv, err := dep.Full.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting network version: %w", err)
|
||||
}
|
||||
|
||||
wpt := mi.WindowPoStProofType
|
||||
spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, wpt, cctx.Bool("synthetic"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting seal proof type: %w", err)
|
||||
}
|
||||
|
||||
comm, err := dep.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||
// Get current open sector pieces from DB
|
||||
var pieces []struct {
|
||||
Sector abi.SectorNumber `db:"sector_number"`
|
||||
Size abi.PaddedPieceSize `db:"piece_size"`
|
||||
Index uint64 `db:"piece_index"`
|
||||
}
|
||||
err = tx.Select(&pieces, `
|
||||
SELECT
|
||||
sector_number,
|
||||
piece_size,
|
||||
piece_index,
|
||||
FROM
|
||||
open_sector_pieces
|
||||
WHERE
|
||||
sp_id = $1 AND sector_number = $2
|
||||
ORDER BY
|
||||
piece_index DESC;`, mid, sector)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting open sectors from DB")
|
||||
}
|
||||
|
||||
if len(pieces) < 1 {
|
||||
return false, xerrors.Errorf("sector %d is not waiting to be sealed", sector)
|
||||
}
|
||||
|
||||
cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, mid, sector, spt)
|
||||
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
|
||||
}
|
||||
|
||||
if cn != 1 {
|
||||
return false, xerrors.Errorf("incorrect number of rows returned")
|
||||
}
|
||||
|
||||
_, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", mid, sector)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
||||
}, harmonydb.OptionRetry())
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("start sealing sector: %w", err)
|
||||
}
|
||||
|
||||
if !comm {
|
||||
return xerrors.Errorf("start sealing sector: commit failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/metrics/proxy"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/piece"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
@ -154,10 +155,23 @@ func (p *CurioAPI) StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsS
|
||||
return p.Stor.FsStat(ctx, id)
|
||||
}
|
||||
|
||||
func (p *CurioAPI) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
|
||||
di := market.NewPieceIngester(p.Deps.DB, p.Deps.Full)
|
||||
func (p *CurioAPI) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece piece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
|
||||
di, err := market.NewPieceIngester(ctx, p.Deps.DB, p.Deps.Full, maddr, true, time.Minute)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("failed to create a piece ingestor")
|
||||
}
|
||||
|
||||
return di.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header)
|
||||
sector, err := di.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("failed to add piece to a sector")
|
||||
}
|
||||
|
||||
err = di.Seal()
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("failed to start sealing the sector %d for actor %s", sector.Sector, maddr)
|
||||
}
|
||||
|
||||
return sector, nil
|
||||
}
|
||||
|
||||
// Trigger shutdown
|
||||
|
@ -85,7 +85,10 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task
|
||||
{
|
||||
// Piece handling
|
||||
if cfg.Subsystems.EnableParkPiece {
|
||||
parkPieceTask := piece.NewParkPieceTask(db, must.One(slrLazy.Val()), cfg.Subsystems.ParkPieceMaxTasks)
|
||||
parkPieceTask, err := piece.NewParkPieceTask(db, must.One(slrLazy.Val()), cfg.Subsystems.ParkPieceMaxTasks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cleanupPieceTask := piece.NewCleanupPieceTask(db, must.One(slrLazy.Val()), 0)
|
||||
activeTasks = append(activeTasks, parkPieceTask, cleanupPieceTask)
|
||||
}
|
||||
@ -134,7 +137,7 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task
|
||||
activeTasks = append(activeTasks, moveStorageTask)
|
||||
}
|
||||
if cfg.Subsystems.EnableSendCommitMsg {
|
||||
commitTask := seal.NewSubmitCommitTask(sp, db, full, sender, as, cfg.Fees.MaxCommitGasFee)
|
||||
commitTask := seal.NewSubmitCommitTask(sp, db, full, sender, as, cfg)
|
||||
activeTasks = append(activeTasks, commitTask)
|
||||
}
|
||||
}
|
||||
|
@ -220,6 +220,13 @@ var minerCreateCmd = &cli.Command{
|
||||
Name: "create",
|
||||
Usage: "sends a create miner message",
|
||||
ArgsUsage: "[sender] [owner] [worker] [sector size]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "confidence",
|
||||
Usage: "number of block confirmations to wait for",
|
||||
Value: int(build.MessageConfidence),
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
wapi, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
@ -274,7 +281,7 @@ var minerCreateCmd = &cli.Command{
|
||||
log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid())
|
||||
log.Infof("Waiting for confirmation")
|
||||
|
||||
mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence)
|
||||
mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), uint64(cctx.Int("confidence")))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("waiting for worker init: %w", err)
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ if [ ! -f $CURIO_REPO_PATH/.init.curio ]; then
|
||||
echo Initiating a new Curio cluster ...
|
||||
curio config new-cluster $newminer
|
||||
echo Enabling market ...
|
||||
curio config get seal | sed -e $'$a\\\n BoostAdapters = ["'"$newminer"':'"$myip"':32100"]' | curio config set --title seal
|
||||
curio config get seal | sed -e $'$a\\\n BoostAdapters = ["'"$newminer"':'"$myip"':32100"]\n EnableParkPiece = true' | curio config set --title seal
|
||||
touch $CURIO_REPO_PATH/.init.config
|
||||
fi
|
||||
|
||||
|
@ -648,7 +648,7 @@ func (sb *SealCalls) TreeD(ctx context.Context, sector storiface.SectorRef, unse
|
||||
}
|
||||
|
||||
if treeDUnsealed != unsealed {
|
||||
return xerrors.Errorf("tree-d cid mismatch with supplied unsealed cid")
|
||||
return xerrors.Errorf("tree-d cid %s mismatch with supplied unsealed cid %s", treeDUnsealed, unsealed)
|
||||
}
|
||||
|
||||
if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache); err != nil {
|
||||
|
@ -3,15 +3,19 @@ package market
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-padreader"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
@ -19,106 +23,280 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/curiosrc/seal"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece"
|
||||
)
|
||||
|
||||
var log = logging.Logger("piece-ingestor")
|
||||
|
||||
const loopFrequency = 10 * time.Second
|
||||
|
||||
type Ingester interface {
|
||||
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error)
|
||||
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error)
|
||||
}
|
||||
|
||||
type PieceIngesterApi interface {
|
||||
ChainHead(context.Context) (*types.TipSet, error)
|
||||
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
|
||||
StateMinerAllocated(ctx context.Context, a address.Address, key types.TipSetKey) (*bitfield.BitField, error)
|
||||
StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error)
|
||||
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||
}
|
||||
|
||||
type openSector struct {
|
||||
number abi.SectorNumber
|
||||
currentSize abi.PaddedPieceSize
|
||||
earliestStartEpoch abi.ChainEpoch
|
||||
index uint64
|
||||
openedAt *time.Time
|
||||
latestEndEpoch abi.ChainEpoch
|
||||
}
|
||||
|
||||
type PieceIngester struct {
|
||||
db *harmonydb.DB
|
||||
api PieceIngesterApi
|
||||
ctx context.Context
|
||||
db *harmonydb.DB
|
||||
api PieceIngesterApi
|
||||
miner address.Address
|
||||
mid uint64 // miner ID
|
||||
windowPoStProofType abi.RegisteredPoStProof
|
||||
synth bool
|
||||
sectorSize abi.SectorSize
|
||||
sealRightNow bool // Should be true only for CurioAPI AllocatePieceToSector method
|
||||
maxWaitTime time.Duration
|
||||
}
|
||||
|
||||
func NewPieceIngester(db *harmonydb.DB, api PieceIngesterApi) *PieceIngester {
|
||||
return &PieceIngester{db: db, api: api}
|
||||
type pieceDetails struct {
|
||||
Sector abi.SectorNumber `db:"sector_number"`
|
||||
Size abi.PaddedPieceSize `db:"piece_size"`
|
||||
StartEpoch abi.ChainEpoch `db:"deal_start_epoch"`
|
||||
EndEpoch abi.ChainEpoch `db:"deal_end_epoch"`
|
||||
Index uint64 `db:"piece_index"`
|
||||
CreatedAt *time.Time `db:"created_at"`
|
||||
}
|
||||
|
||||
func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
|
||||
mi, err := p.api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
type verifiedDeal struct {
|
||||
isVerified bool
|
||||
tmin abi.ChainEpoch
|
||||
tmax abi.ChainEpoch
|
||||
}
|
||||
|
||||
func NewPieceIngester(ctx context.Context, db *harmonydb.DB, api PieceIngesterApi, maddr address.Address, sealRightNow bool, maxWaitTime time.Duration) (*PieceIngester, error) {
|
||||
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, err
|
||||
}
|
||||
|
||||
if piece.DealProposal.PieceSize != abi.PaddedPieceSize(mi.SectorSize) {
|
||||
return api.SectorOffset{}, xerrors.Errorf("only full sector pieces supported for now")
|
||||
}
|
||||
|
||||
// check raw size
|
||||
if piece.DealProposal.PieceSize != padreader.PaddedSize(uint64(rawSize)).Padded() {
|
||||
return api.SectorOffset{}, xerrors.Errorf("raw size doesn't match padded piece size")
|
||||
}
|
||||
|
||||
// add initial piece + to a sector
|
||||
nv, err := p.api.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting network version: %w", err)
|
||||
}
|
||||
|
||||
synth := false // todo synthetic porep config
|
||||
spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType, synth)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting seal proof type: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting miner ID: %w", err)
|
||||
return nil, xerrors.Errorf("getting miner ID: %w", err)
|
||||
}
|
||||
|
||||
pi := &PieceIngester{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
api: api,
|
||||
sealRightNow: sealRightNow,
|
||||
miner: maddr,
|
||||
maxWaitTime: maxWaitTime,
|
||||
sectorSize: mi.SectorSize,
|
||||
windowPoStProofType: mi.WindowPoStProofType,
|
||||
mid: mid,
|
||||
synth: false, // TODO: synthetic porep config
|
||||
}
|
||||
|
||||
go pi.start()
|
||||
|
||||
return pi, nil
|
||||
}
|
||||
|
||||
func (p *PieceIngester) start() {
|
||||
ticker := time.NewTicker(loopFrequency)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
err := p.Seal()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PieceIngester) Seal() error {
|
||||
head, err := p.api.ChainHead(p.ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting chain head: %w", err)
|
||||
}
|
||||
|
||||
spt, err := p.getSealProofType()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting seal proof type: %w", err)
|
||||
}
|
||||
|
||||
shouldSeal := func(sector *openSector) bool {
|
||||
// Start sealing a sector if
|
||||
// 1. If sector is full
|
||||
// 2. We have been waiting for MaxWaitDuration
|
||||
// 3. StartEpoch is less than 8 hours // todo: make this config?
|
||||
if sector.currentSize == abi.PaddedPieceSize(p.sectorSize) {
|
||||
log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "sector full")
|
||||
return true
|
||||
}
|
||||
if time.Since(*sector.openedAt) > p.maxWaitTime {
|
||||
log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "MaxWaitTime reached")
|
||||
return true
|
||||
}
|
||||
if sector.earliestStartEpoch < head.Height()+abi.ChainEpoch(960) {
|
||||
log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "earliest start epoch")
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
comm, err := p.db.BeginTransaction(p.ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||
|
||||
openSectors, err := p.getOpenSectors(tx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, sector := range openSectors {
|
||||
sector := sector
|
||||
if shouldSeal(sector) {
|
||||
// Start sealing the sector
|
||||
cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, p.mid, sector.number, spt)
|
||||
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
|
||||
}
|
||||
|
||||
if cn != 1 {
|
||||
return false, xerrors.Errorf("adding sector to pipeline: incorrect number of rows returned")
|
||||
}
|
||||
|
||||
_, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", p.mid, sector.number)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return true, nil
|
||||
}, harmonydb.OptionRetry())
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("start sealing sector: %w", err)
|
||||
}
|
||||
|
||||
if !comm {
|
||||
return xerrors.Errorf("start sealing sector: commit failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
|
||||
if maddr != p.miner {
|
||||
return api.SectorOffset{}, xerrors.Errorf("miner address doesn't match")
|
||||
}
|
||||
|
||||
// check raw size
|
||||
if piece.Size() != padreader.PaddedSize(uint64(rawSize)).Padded() {
|
||||
return api.SectorOffset{}, xerrors.Errorf("raw size doesn't match padded piece size")
|
||||
}
|
||||
|
||||
var propJson []byte
|
||||
|
||||
dataHdrJson, err := json.Marshal(header)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("json.Marshal(header): %w", err)
|
||||
}
|
||||
|
||||
vd := verifiedDeal{
|
||||
isVerified: false,
|
||||
}
|
||||
|
||||
if piece.DealProposal != nil {
|
||||
vd.isVerified = piece.DealProposal.VerifiedDeal
|
||||
if vd.isVerified {
|
||||
alloc, err := p.api.StateGetAllocationForPendingDeal(ctx, piece.DealID, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting pending allocation for deal %d: %w", piece.DealID, err)
|
||||
}
|
||||
if alloc == nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("no allocation found for deal %d: %w", piece.DealID, err)
|
||||
}
|
||||
vd.tmin = alloc.TermMin
|
||||
vd.tmax = alloc.TermMax
|
||||
}
|
||||
propJson, err = json.Marshal(piece.DealProposal)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("json.Marshal(piece.DealProposal): %w", err)
|
||||
}
|
||||
} else {
|
||||
vd.isVerified = piece.PieceActivationManifest.VerifiedAllocationKey != nil
|
||||
if vd.isVerified {
|
||||
client, err := address.NewIDAddress(uint64(piece.PieceActivationManifest.VerifiedAllocationKey.Client))
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting client address from actor ID: %w", err)
|
||||
}
|
||||
alloc, err := p.api.StateGetAllocation(ctx, client, verifregtypes.AllocationId(piece.PieceActivationManifest.VerifiedAllocationKey.ID), types.EmptyTSK)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting allocation details for %d: %w", piece.PieceActivationManifest.VerifiedAllocationKey.ID, err)
|
||||
}
|
||||
if alloc == nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("no allocation found for ID %d: %w", piece.PieceActivationManifest.VerifiedAllocationKey.ID, err)
|
||||
}
|
||||
vd.tmin = alloc.TermMin
|
||||
vd.tmax = alloc.TermMax
|
||||
}
|
||||
propJson, err = json.Marshal(piece.PieceActivationManifest)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("json.Marshal(piece.PieceActivationManifest): %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !p.sealRightNow {
|
||||
// Try to allocate the piece to an open sector
|
||||
allocated, ret, err := p.allocateToExisting(ctx, piece, rawSize, source, dataHdrJson, propJson, vd)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, err
|
||||
}
|
||||
if allocated {
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Allocation to open sector failed, create a new sector and add the piece to it
|
||||
num, err := seal.AllocateSectorNumbers(ctx, p.api, p.db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) {
|
||||
if len(numbers) != 1 {
|
||||
return false, xerrors.Errorf("expected one sector number")
|
||||
}
|
||||
n := numbers[0]
|
||||
|
||||
_, err := tx.Exec("INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3)", mid, n, spt)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err)
|
||||
if piece.DealProposal != nil {
|
||||
_, err = tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`,
|
||||
p.mid, n, 0,
|
||||
piece.DealProposal.PieceCID, piece.DealProposal.PieceSize,
|
||||
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
|
||||
piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("adding deal to sector: %w", err)
|
||||
}
|
||||
} else {
|
||||
_, err = tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`,
|
||||
p.mid, n, 0,
|
||||
piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size,
|
||||
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
|
||||
piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("adding deal to sector: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
dataHdrJson, err := json.Marshal(header)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("json.Marshal(header): %w", err)
|
||||
}
|
||||
|
||||
dealProposalJson, err := json.Marshal(piece.DealProposal)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("json.Marshal(piece.DealProposal): %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`INSERT INTO sectors_sdr_initial_pieces (sp_id,
|
||||
sector_number,
|
||||
piece_index,
|
||||
|
||||
piece_cid,
|
||||
piece_size,
|
||||
|
||||
data_url,
|
||||
data_headers,
|
||||
data_raw_size,
|
||||
data_delete_on_finalize,
|
||||
|
||||
f05_publish_cid,
|
||||
f05_deal_id,
|
||||
f05_deal_proposal,
|
||||
f05_deal_start_epoch,
|
||||
f05_deal_end_epoch) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`,
|
||||
mid, n, 0,
|
||||
piece.DealProposal.PieceCID, piece.DealProposal.PieceSize,
|
||||
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
|
||||
piece.PublishCid, piece.DealID, dealProposalJson,
|
||||
piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("inserting into sectors_sdr_initial_pieces: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -129,10 +307,241 @@ func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address
|
||||
return api.SectorOffset{}, xerrors.Errorf("expected one sector number")
|
||||
}
|
||||
|
||||
// After we insert the piece/sector_pipeline entries, the lpseal/poller will take it from here
|
||||
if p.sealRightNow {
|
||||
err = p.SectorStartSealing(ctx, num[0])
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("SectorStartSealing: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return api.SectorOffset{
|
||||
Sector: num[0],
|
||||
Offset: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *PieceIngester) allocateToExisting(ctx context.Context, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, dataHdrJson, propJson []byte, vd verifiedDeal) (bool, api.SectorOffset, error) {
|
||||
|
||||
var ret api.SectorOffset
|
||||
var allocated bool
|
||||
var rerr error
|
||||
|
||||
comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||
openSectors, err := p.getOpenSectors(tx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
pieceSize := piece.Size()
|
||||
for _, sec := range openSectors {
|
||||
sec := sec
|
||||
if sec.currentSize+pieceSize <= abi.PaddedPieceSize(p.sectorSize) {
|
||||
if vd.isVerified {
|
||||
sectorLifeTime := sec.latestEndEpoch - sec.earliestStartEpoch
|
||||
// Allocation's TMin must fit in sector and TMax should be at least sector lifetime or more
|
||||
// Based on https://github.com/filecoin-project/builtin-actors/blob/a0e34d22665ac8c84f02fea8a099216f29ffaeeb/actors/verifreg/src/lib.rs#L1071-L1086
|
||||
if sectorLifeTime <= vd.tmin && sectorLifeTime >= vd.tmax {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ret.Sector = sec.number
|
||||
ret.Offset = sec.currentSize
|
||||
|
||||
// Insert market deal to DB for the sector
|
||||
if piece.DealProposal != nil {
|
||||
cn, err := tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`,
|
||||
p.mid, sec.number, sec.index+1,
|
||||
piece.DealProposal.PieceCID, piece.DealProposal.PieceSize,
|
||||
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
|
||||
piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch)
|
||||
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("adding deal to sector: %v", err)
|
||||
}
|
||||
|
||||
if cn != 1 {
|
||||
return false, xerrors.Errorf("expected one piece")
|
||||
}
|
||||
|
||||
} else { // Insert DDO deal to DB for the sector
|
||||
cn, err := tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`,
|
||||
p.mid, sec.number, sec.index+1,
|
||||
piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size,
|
||||
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
|
||||
piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson)
|
||||
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("adding deal to sector: %v", err)
|
||||
}
|
||||
|
||||
if cn != 1 {
|
||||
return false, xerrors.Errorf("expected one piece")
|
||||
}
|
||||
|
||||
}
|
||||
allocated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}, harmonydb.OptionRetry())
|
||||
|
||||
if !comm {
|
||||
rerr = xerrors.Errorf("allocating piece to a sector: commit failed")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
rerr = xerrors.Errorf("allocating piece to a sector: %w", err)
|
||||
}
|
||||
|
||||
return allocated, ret, rerr
|
||||
|
||||
}
|
||||
|
||||
func (p *PieceIngester) SectorStartSealing(ctx context.Context, sector abi.SectorNumber) error {
|
||||
|
||||
spt, err := p.getSealProofType()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting seal proof type: %w", err)
|
||||
}
|
||||
|
||||
comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||
// Get current open sector pieces from DB
|
||||
var pieces []pieceDetails
|
||||
err = tx.Select(&pieces, `
|
||||
SELECT
|
||||
sector_number,
|
||||
piece_size,
|
||||
piece_index,
|
||||
COALESCE(direct_start_epoch, f05_deal_start_epoch, 0) AS deal_start_epoch,
|
||||
COALESCE(direct_end_epoch, f05_deal_end_epoch, 0) AS deal_end_epoch,
|
||||
created_at
|
||||
FROM
|
||||
open_sector_pieces
|
||||
WHERE
|
||||
sp_id = $1 AND sector_number = $2
|
||||
ORDER BY
|
||||
piece_index DESC;`, p.mid, sector)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting open sectors from DB")
|
||||
}
|
||||
|
||||
if len(pieces) < 1 {
|
||||
return false, xerrors.Errorf("sector %d is not waiting to be sealed", sector)
|
||||
}
|
||||
|
||||
cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, p.mid, sector, spt)
|
||||
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
|
||||
}
|
||||
|
||||
if cn != 1 {
|
||||
return false, xerrors.Errorf("incorrect number of rows returned")
|
||||
}
|
||||
|
||||
_, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", p.mid, sector)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
||||
}, harmonydb.OptionRetry())
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("start sealing sector: %w", err)
|
||||
}
|
||||
|
||||
if !comm {
|
||||
return xerrors.Errorf("start sealing sector: commit failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PieceIngester) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, error) {
|
||||
// Get current open sector pieces from DB
|
||||
var pieces []pieceDetails
|
||||
err := tx.Select(&pieces, `
|
||||
SELECT
|
||||
sector_number,
|
||||
piece_size,
|
||||
piece_index,
|
||||
COALESCE(direct_start_epoch, f05_deal_start_epoch, 0) AS deal_start_epoch,
|
||||
COALESCE(direct_end_epoch, f05_deal_end_epoch, 0) AS deal_end_epoch,
|
||||
created_at
|
||||
FROM
|
||||
open_sector_pieces
|
||||
WHERE
|
||||
sp_id = $1
|
||||
ORDER BY
|
||||
piece_index DESC;`, p.mid)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting open sectors from DB")
|
||||
}
|
||||
|
||||
getStartEpoch := func(new abi.ChainEpoch, cur abi.ChainEpoch) abi.ChainEpoch {
|
||||
if cur > 0 && cur < new {
|
||||
return cur
|
||||
}
|
||||
return new
|
||||
}
|
||||
|
||||
getEndEpoch := func(new abi.ChainEpoch, cur abi.ChainEpoch) abi.ChainEpoch {
|
||||
if cur > 0 && cur > new {
|
||||
return cur
|
||||
}
|
||||
return new
|
||||
}
|
||||
|
||||
getOpenedAt := func(piece pieceDetails, cur *time.Time) *time.Time {
|
||||
if piece.CreatedAt.Before(*cur) {
|
||||
return piece.CreatedAt
|
||||
}
|
||||
return cur
|
||||
}
|
||||
|
||||
sectorMap := map[abi.SectorNumber]*openSector{}
|
||||
for _, pi := range pieces {
|
||||
pi := pi
|
||||
sector, ok := sectorMap[pi.Sector]
|
||||
if !ok {
|
||||
sectorMap[pi.Sector] = &openSector{
|
||||
number: pi.Sector,
|
||||
currentSize: pi.Size,
|
||||
earliestStartEpoch: getStartEpoch(pi.StartEpoch, 0),
|
||||
index: pi.Index,
|
||||
openedAt: pi.CreatedAt,
|
||||
latestEndEpoch: getEndEpoch(pi.EndEpoch, 0),
|
||||
}
|
||||
continue
|
||||
}
|
||||
sector.currentSize += pi.Size
|
||||
sector.earliestStartEpoch = getStartEpoch(pi.StartEpoch, sector.earliestStartEpoch)
|
||||
sector.latestEndEpoch = getEndEpoch(pi.EndEpoch, sector.earliestStartEpoch)
|
||||
if sector.index < pi.Index {
|
||||
sector.index = pi.Index
|
||||
}
|
||||
sector.openedAt = getOpenedAt(pi, sector.openedAt)
|
||||
}
|
||||
|
||||
var os []*openSector
|
||||
|
||||
for _, v := range sectorMap {
|
||||
v := v
|
||||
os = append(os, v)
|
||||
}
|
||||
|
||||
return os, nil
|
||||
}
|
||||
|
||||
func (p *PieceIngester) getSealProofType() (abi.RegisteredSealProof, error) {
|
||||
nv, err := p.api.StateNetworkVersion(p.ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting network version: %w", err)
|
||||
}
|
||||
|
||||
return miner.PreferredSealProofTypeFromWindowPoStType(nv, p.windowPoStProofType, p.synth)
|
||||
}
|
||||
|
@ -18,12 +18,12 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/curiosrc/market"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
@ -64,17 +64,79 @@ func (l *LMRPCProvider) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storifa
|
||||
}
|
||||
|
||||
func (l *LMRPCProvider) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
|
||||
si, err := l.si.StorageFindSector(ctx, abi.SectorID{Miner: l.minerID, Number: sid}, storiface.FTSealed|storiface.FTCache, 0, false)
|
||||
if err != nil {
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
|
||||
var ssip []struct {
|
||||
PieceCID *string `db:"piece_cid"`
|
||||
DealID *int64 `db:"f05_deal_id"`
|
||||
Complete bool `db:"after_commit_msg_success"`
|
||||
Failed bool `db:"failed"`
|
||||
SDR bool `db:"after_sdr"`
|
||||
PoRep bool `db:"after_porep"`
|
||||
}
|
||||
|
||||
err = l.db.Select(ctx, &ssip, "SELECT ssip.piece_cid, ssip.f05_deal_id FROM sectors_sdr_pipeline p LEFT JOIN sectors_sdr_initial_pieces ssip ON p.sp_id = ssip.sp_id AND p.sector_number = ssip.sector_number WHERE p.sp_id = $1 AND p.sector_number = $2", l.minerID, sid)
|
||||
err := l.db.Select(ctx, &ssip, `
|
||||
WITH CheckCommit AS (
|
||||
SELECT
|
||||
sp_id,
|
||||
sector_number,
|
||||
after_commit_msg,
|
||||
failed,
|
||||
after_sdr,
|
||||
after_porep,
|
||||
after_commit_msg_success
|
||||
FROM
|
||||
sectors_sdr_pipeline
|
||||
WHERE
|
||||
sp_id = $1 AND sector_number = $2
|
||||
),
|
||||
MetaPieces AS (
|
||||
SELECT
|
||||
mp.piece_cid,
|
||||
mp.f05_deal_id,
|
||||
cc.after_commit_msg_success,
|
||||
cc.failed,
|
||||
cc.after_sdr,
|
||||
cc.after_porep
|
||||
FROM
|
||||
sectors_meta_pieces mp
|
||||
INNER JOIN
|
||||
CheckCommit cc ON mp.sp_id = cc.sp_id AND mp.sector_num = cc.sector_number
|
||||
WHERE
|
||||
cc.after_commit_msg IS TRUE
|
||||
),
|
||||
InitialPieces AS (
|
||||
SELECT
|
||||
ip.piece_cid,
|
||||
ip.f05_deal_id,
|
||||
cc.after_commit_msg_success,
|
||||
cc.failed,
|
||||
cc.after_sdr,
|
||||
cc.after_porep
|
||||
FROM
|
||||
sectors_sdr_initial_pieces ip
|
||||
INNER JOIN
|
||||
CheckCommit cc ON ip.sp_id = cc.sp_id AND ip.sector_number = cc.sector_number
|
||||
WHERE
|
||||
cc.after_commit_msg IS FALSE
|
||||
),
|
||||
FallbackPieces AS (
|
||||
SELECT
|
||||
op.piece_cid,
|
||||
op.f05_deal_id,
|
||||
FALSE as after_commit_msg_success,
|
||||
FALSE as failed,
|
||||
FALSE as after_sdr,
|
||||
FALSE as after_porep
|
||||
FROM
|
||||
open_sector_pieces op
|
||||
WHERE
|
||||
op.sp_id = $1 AND op.sector_number = $2
|
||||
AND NOT EXISTS (SELECT 1 FROM sectors_sdr_pipeline sp WHERE sp.sp_id = op.sp_id AND sp.sector_number = op.sector_number)
|
||||
)
|
||||
SELECT * FROM MetaPieces
|
||||
UNION ALL
|
||||
SELECT * FROM InitialPieces
|
||||
UNION ALL
|
||||
SELECT * FROM FallbackPieces;`, l.minerID, sid)
|
||||
if err != nil {
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
@ -86,15 +148,6 @@ func (l *LMRPCProvider) SectorsStatus(ctx context.Context, sid abi.SectorNumber,
|
||||
deals = append(deals, abi.DealID(*d.DealID))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
osi, err := l.full.StateSectorGetInfo(ctx, l.maddr, sid, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
|
||||
if osi != nil {
|
||||
deals = osi.DealIDs
|
||||
}
|
||||
}
|
||||
|
||||
spt, err := miner.SealProofTypeFromSectorSize(l.ssize, network.Version20, false) // good enough, just need this for ssize anyways
|
||||
@ -102,49 +155,8 @@ func (l *LMRPCProvider) SectorsStatus(ctx context.Context, sid abi.SectorNumber,
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
|
||||
if len(si) == 0 {
|
||||
state := api.SectorState(sealing.UndefinedSectorState)
|
||||
if len(ssip) > 0 {
|
||||
state = api.SectorState(sealing.PreCommit1)
|
||||
}
|
||||
|
||||
return api.SectorInfo{
|
||||
SectorID: sid,
|
||||
State: state,
|
||||
CommD: nil,
|
||||
CommR: nil,
|
||||
Proof: nil,
|
||||
Deals: deals,
|
||||
Pieces: nil,
|
||||
Ticket: api.SealTicket{},
|
||||
Seed: api.SealSeed{},
|
||||
PreCommitMsg: nil,
|
||||
CommitMsg: nil,
|
||||
Retries: 0,
|
||||
ToUpgrade: false,
|
||||
ReplicaUpdateMessage: nil,
|
||||
LastErr: "",
|
||||
Log: nil,
|
||||
SealProof: spt,
|
||||
Activation: 0,
|
||||
Expiration: 0,
|
||||
DealWeight: big.Zero(),
|
||||
VerifiedDealWeight: big.Zero(),
|
||||
InitialPledge: big.Zero(),
|
||||
OnTime: 0,
|
||||
Early: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var state = api.SectorState(sealing.Proving)
|
||||
if !si[0].CanStore {
|
||||
state = api.SectorState(sealing.PreCommit2)
|
||||
}
|
||||
|
||||
// todo improve this with on-chain info
|
||||
return api.SectorInfo{
|
||||
ret := api.SectorInfo{
|
||||
SectorID: sid,
|
||||
State: state,
|
||||
CommD: nil,
|
||||
CommR: nil,
|
||||
Proof: nil,
|
||||
@ -159,16 +171,37 @@ func (l *LMRPCProvider) SectorsStatus(ctx context.Context, sid abi.SectorNumber,
|
||||
ReplicaUpdateMessage: nil,
|
||||
LastErr: "",
|
||||
Log: nil,
|
||||
SealProof: spt,
|
||||
Activation: 0,
|
||||
Expiration: 0,
|
||||
DealWeight: big.Zero(),
|
||||
VerifiedDealWeight: big.Zero(),
|
||||
InitialPledge: big.Zero(),
|
||||
OnTime: 0,
|
||||
Early: 0,
|
||||
}
|
||||
|
||||
SealProof: spt,
|
||||
Activation: 0,
|
||||
Expiration: 0,
|
||||
DealWeight: big.Zero(),
|
||||
VerifiedDealWeight: big.Zero(),
|
||||
InitialPledge: big.Zero(),
|
||||
OnTime: 0,
|
||||
Early: 0,
|
||||
}, nil
|
||||
// If no rows found i.e. sector doesn't exist in DB
|
||||
//assign ssip[0] to a local variable for easier reading.
|
||||
currentSSIP := ssip[0]
|
||||
|
||||
switch {
|
||||
case len(ssip) == 0:
|
||||
ret.State = api.SectorState(sealing.UndefinedSectorState)
|
||||
case currentSSIP.Failed:
|
||||
ret.State = api.SectorState(sealing.FailedUnrecoverable)
|
||||
case !currentSSIP.SDR:
|
||||
ret.State = api.SectorState(sealing.WaitDeals)
|
||||
case currentSSIP.SDR && !currentSSIP.PoRep:
|
||||
ret.State = api.SectorState(sealing.PreCommit1)
|
||||
case currentSSIP.SDR && currentSSIP.PoRep && !currentSSIP.Complete:
|
||||
ret.State = api.SectorState(sealing.PreCommit2)
|
||||
case currentSSIP.Complete:
|
||||
ret.State = api.SectorState(sealing.Proving)
|
||||
default:
|
||||
return api.SectorInfo{}, nil
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (l *LMRPCProvider) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) {
|
||||
@ -324,7 +357,7 @@ func (l *LMRPCProvider) SectorAddPieceToAny(ctx context.Context, size abi.Unpadd
|
||||
return api.SectorOffset{}, xerrors.Errorf("not supported, use AllocatePieceToSector")
|
||||
}
|
||||
|
||||
func (l *LMRPCProvider) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
|
||||
func (l *LMRPCProvider) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
|
||||
return l.pi.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header)
|
||||
}
|
||||
|
||||
|
@ -2,11 +2,13 @@ package lmrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -32,6 +34,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
@ -149,7 +152,10 @@ func forEachMarketRPC(cfg *config.CurioConfig, cb func(string, string) error) er
|
||||
func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Address, conf *config.CurioConfig, listen string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
pin := cumarket.NewPieceIngester(db, full)
|
||||
pin, err := cumarket.NewPieceIngester(ctx, db, full, maddr, false, time.Duration(conf.Ingest.MaxDealWaitTime))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("starting piece ingestor")
|
||||
}
|
||||
|
||||
si := paths.NewDBIndex(nil, db)
|
||||
|
||||
@ -188,8 +194,10 @@ func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Addr
|
||||
}, nil
|
||||
}
|
||||
|
||||
ast.CommonStruct.Internal.AuthNew = lp.AuthNew
|
||||
pieceInfoLk := new(sync.Mutex)
|
||||
pieceInfos := map[uuid.UUID][]pieceInfo{}
|
||||
|
||||
ast.CommonStruct.Internal.AuthNew = lp.AuthNew
|
||||
ast.Internal.ActorAddress = lp.ActorAddress
|
||||
ast.Internal.WorkerJobs = lp.WorkerJobs
|
||||
ast.Internal.SectorsStatus = lp.SectorsStatus
|
||||
@ -198,219 +206,7 @@ func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Addr
|
||||
ast.Internal.SectorsListInStates = lp.SectorsListInStates
|
||||
ast.Internal.StorageRedeclareLocal = lp.StorageRedeclareLocal
|
||||
ast.Internal.ComputeDataCid = lp.ComputeDataCid
|
||||
|
||||
type pieceInfo struct {
|
||||
data storiface.Data
|
||||
size abi.UnpaddedPieceSize
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
pieceInfoLk := new(sync.Mutex)
|
||||
pieceInfos := map[uuid.UUID][]pieceInfo{}
|
||||
|
||||
ast.Internal.SectorAddPieceToAny = func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal api.PieceDealInfo) (api.SectorOffset, error) {
|
||||
origPieceData := pieceData
|
||||
defer func() {
|
||||
closer, ok := origPieceData.(io.Closer)
|
||||
if !ok {
|
||||
log.Warnf("DataCid: cannot close pieceData reader %T because it is not an io.Closer", origPieceData)
|
||||
return
|
||||
}
|
||||
if err := closer.Close(); err != nil {
|
||||
log.Warnw("closing pieceData in DataCid", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
pi := pieceInfo{
|
||||
data: pieceData,
|
||||
size: pieceSize,
|
||||
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
pieceUUID := uuid.New()
|
||||
|
||||
//color.Blue("%s %s piece assign request with id %s", deal.DealProposal.PieceCID, deal.DealProposal.Provider, pieceUUID)
|
||||
log.Infow("piece assign request", "piece_cid", deal.DealProposal.PieceCID, "provider", deal.DealProposal.Provider, "piece_uuid", pieceUUID)
|
||||
|
||||
pieceInfoLk.Lock()
|
||||
pieceInfos[pieceUUID] = append(pieceInfos[pieceUUID], pi)
|
||||
pieceInfoLk.Unlock()
|
||||
|
||||
// /piece?piece_cid=xxxx
|
||||
dataUrl := rootUrl
|
||||
dataUrl.Path = "/piece"
|
||||
dataUrl.RawQuery = "piece_id=" + pieceUUID.String()
|
||||
|
||||
// add piece entry
|
||||
|
||||
var refID int64
|
||||
var pieceWasCreated bool
|
||||
|
||||
for {
|
||||
var backpressureWait bool
|
||||
|
||||
comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||
// BACKPRESSURE
|
||||
wait, err := maybeApplyBackpressure(tx, conf.Ingest)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("backpressure checks: %w", err)
|
||||
}
|
||||
if wait {
|
||||
backpressureWait = true
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var pieceID int64
|
||||
// Attempt to select the piece ID first
|
||||
err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1`, deal.DealProposal.PieceCID.String()).Scan(&pieceID)
|
||||
|
||||
if err != nil {
|
||||
if err == pgx.ErrNoRows {
|
||||
// Piece does not exist, attempt to insert
|
||||
err = tx.QueryRow(`
|
||||
INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT (piece_cid) DO NOTHING
|
||||
RETURNING id`, deal.DealProposal.PieceCID.String(), int64(pieceSize.Padded()), int64(pieceSize)).Scan(&pieceID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("inserting new parked piece and getting id: %w", err)
|
||||
}
|
||||
pieceWasCreated = true // New piece was created
|
||||
} else {
|
||||
// Some other error occurred during select
|
||||
return false, xerrors.Errorf("checking existing parked piece: %w", err)
|
||||
}
|
||||
} else {
|
||||
pieceWasCreated = false // Piece already exists, no new piece was created
|
||||
}
|
||||
|
||||
// Add parked_piece_ref
|
||||
err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url)
|
||||
VALUES ($1, $2) RETURNING ref_id`, pieceID, dataUrl.String()).Scan(&refID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("inserting parked piece ref: %w", err)
|
||||
}
|
||||
|
||||
// If everything went well, commit the transaction
|
||||
return true, nil // This will commit the transaction
|
||||
}, harmonydb.OptionRetry())
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("inserting parked piece: %w", err)
|
||||
}
|
||||
if !comm {
|
||||
if backpressureWait {
|
||||
// Backpressure was applied, wait and try again
|
||||
select {
|
||||
case <-time.After(backpressureWaitTime):
|
||||
case <-ctx.Done():
|
||||
return api.SectorOffset{}, xerrors.Errorf("context done while waiting for backpressure: %w", ctx.Err())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return api.SectorOffset{}, xerrors.Errorf("piece tx didn't commit")
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// wait for piece to be parked
|
||||
if pieceWasCreated {
|
||||
<-pi.done
|
||||
} else {
|
||||
// If the piece was not created, we need to close the done channel
|
||||
close(pi.done)
|
||||
|
||||
go func() {
|
||||
// close the data reader (drain to eof if it's not a closer)
|
||||
if closer, ok := pieceData.(io.Closer); ok {
|
||||
if err := closer.Close(); err != nil {
|
||||
log.Warnw("closing pieceData in DataCid", "error", err)
|
||||
}
|
||||
} else {
|
||||
log.Warnw("pieceData is not an io.Closer", "type", fmt.Sprintf("%T", pieceData))
|
||||
|
||||
_, err := io.Copy(io.Discard, pieceData)
|
||||
if err != nil {
|
||||
log.Warnw("draining pieceData in DataCid", "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
{
|
||||
// piece park is either done or currently happening from another AP call
|
||||
// now we need to make sure that the piece is definitely parked successfully
|
||||
// - in case of errors we return, and boost should be able to retry the call
|
||||
|
||||
// * If piece is completed, return
|
||||
// * If piece is not completed but has null taskID, wait
|
||||
// * If piece has a non-null taskID
|
||||
// * If the task is in harmony_tasks, wait
|
||||
// * Otherwise look for an error in harmony_task_history and return that
|
||||
|
||||
for {
|
||||
var taskID *int64
|
||||
var complete bool
|
||||
err := db.QueryRow(ctx, `SELECT task_id, complete FROM parked_pieces WHERE id = $1`, refID).Scan(&taskID, &complete)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting piece park status: %w", err)
|
||||
}
|
||||
|
||||
if complete {
|
||||
break
|
||||
}
|
||||
|
||||
if taskID == nil {
|
||||
// piece is not parked yet
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
// check if task is in harmony_tasks
|
||||
var taskName string
|
||||
err = db.QueryRow(ctx, `SELECT name FROM harmony_task WHERE id = $1`, *taskID).Scan(&taskName)
|
||||
if err == nil {
|
||||
// task is in harmony_tasks, wait
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
if err != pgx.ErrNoRows {
|
||||
return api.SectorOffset{}, xerrors.Errorf("checking park-piece task in harmony_tasks: %w", err)
|
||||
}
|
||||
|
||||
// task is not in harmony_tasks, check harmony_task_history (latest work_end)
|
||||
var taskError string
|
||||
var taskResult bool
|
||||
err = db.QueryRow(ctx, `SELECT result, err FROM harmony_task_history WHERE task_id = $1 ORDER BY work_end DESC LIMIT 1`, *taskID).Scan(&taskResult, &taskError)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("checking park-piece task history: %w", err)
|
||||
}
|
||||
if !taskResult {
|
||||
return api.SectorOffset{}, xerrors.Errorf("park-piece task failed: %s", taskError)
|
||||
}
|
||||
return api.SectorOffset{}, xerrors.Errorf("park task succeeded but piece is not marked as complete")
|
||||
}
|
||||
}
|
||||
|
||||
pieceIDUrl := url.URL{
|
||||
Scheme: "pieceref",
|
||||
Opaque: fmt.Sprintf("%d", refID),
|
||||
}
|
||||
|
||||
// make a sector
|
||||
so, err := pin.AllocatePieceToSector(ctx, maddr, deal, int64(pieceSize), pieceIDUrl, nil)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, err
|
||||
}
|
||||
|
||||
log.Infow("piece assigned to sector", "piece_cid", deal.DealProposal.PieceCID, "sector", so.Sector, "offset", so.Offset)
|
||||
|
||||
return so, nil
|
||||
}
|
||||
|
||||
ast.Internal.SectorAddPieceToAny = sectorAddPieceToAnyOperation(maddr, rootUrl, conf, pieceInfoLk, pieceInfos, pin, db, mi.SectorSize)
|
||||
ast.Internal.StorageList = si.StorageList
|
||||
ast.Internal.StorageDetach = si.StorageDetach
|
||||
ast.Internal.StorageReportHealth = si.StorageReportHealth
|
||||
@ -422,6 +218,7 @@ func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Addr
|
||||
ast.Internal.StorageLock = si.StorageLock
|
||||
ast.Internal.StorageTryLock = si.StorageTryLock
|
||||
ast.Internal.StorageGetLocks = si.StorageGetLocks
|
||||
ast.Internal.SectorStartSealing = pin.SectorStartSealing
|
||||
|
||||
var pieceHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
|
||||
// /piece?piece_id=xxxx
|
||||
@ -490,7 +287,7 @@ func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Addr
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/piece", pieceHandler)
|
||||
mux.Handle("/", mh)
|
||||
mux.Handle("/", mh) // todo: create a method for sealNow for sectors
|
||||
|
||||
server := &http.Server{
|
||||
Addr: listen,
|
||||
@ -502,43 +299,287 @@ func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Addr
|
||||
return server.ListenAndServe()
|
||||
}
|
||||
|
||||
func maybeApplyBackpressure(tx *harmonydb.Tx, cfg config.CurioIngestConfig) (wait bool, err error) {
|
||||
var bufferedSDR, bufferedTrees, bufferedPoRep int
|
||||
err = tx.QueryRow(`WITH BufferedSDR AS (
|
||||
SELECT SUM(buffered_count) AS buffered_sdr_count
|
||||
FROM (
|
||||
SELECT COUNT(p.task_id_sdr) - COUNT(t.owner_id) AS buffered_count
|
||||
FROM sectors_sdr_pipeline p
|
||||
LEFT JOIN harmony_task t ON p.task_id_sdr = t.id
|
||||
WHERE p.after_sdr = false
|
||||
UNION ALL
|
||||
SELECT COUNT(1) AS buffered_count
|
||||
FROM parked_pieces
|
||||
WHERE complete = false
|
||||
) AS subquery
|
||||
),
|
||||
BufferedTrees AS (
|
||||
SELECT COUNT(p.task_id_tree_r) - COUNT(t.owner_id) AS buffered_trees_count
|
||||
FROM sectors_sdr_pipeline p
|
||||
LEFT JOIN harmony_task t ON p.task_id_tree_r = t.id
|
||||
WHERE p.after_sdr = true AND p.after_tree_r = false
|
||||
),
|
||||
BufferedPoRep AS (
|
||||
SELECT COUNT(p.task_id_porep) - COUNT(t.owner_id) AS buffered_porep_count
|
||||
FROM sectors_sdr_pipeline p
|
||||
LEFT JOIN harmony_task t ON p.task_id_porep = t.id
|
||||
WHERE p.after_tree_r = true AND p.after_porep = false
|
||||
)
|
||||
SELECT
|
||||
(SELECT buffered_sdr_count FROM BufferedSDR) AS total_buffered,
|
||||
type pieceInfo struct {
|
||||
data storiface.Data
|
||||
size abi.UnpaddedPieceSize
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func sectorAddPieceToAnyOperation(maddr address.Address, rootUrl url.URL, conf *config.CurioConfig, pieceInfoLk *sync.Mutex, pieceInfos map[uuid.UUID][]pieceInfo, pin *cumarket.PieceIngester, db *harmonydb.DB, ssize abi.SectorSize) func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal lpiece.PieceDealInfo) (api.SectorOffset, error) {
|
||||
return func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal lpiece.PieceDealInfo) (api.SectorOffset, error) {
|
||||
if (deal.PieceActivationManifest == nil && deal.DealProposal == nil) || (deal.PieceActivationManifest != nil && deal.DealProposal != nil) {
|
||||
return api.SectorOffset{}, xerrors.Errorf("deal info must have either deal proposal or piece manifest")
|
||||
}
|
||||
|
||||
origPieceData := pieceData
|
||||
defer func() {
|
||||
closer, ok := origPieceData.(io.Closer)
|
||||
if !ok {
|
||||
log.Warnf("DataCid: cannot close pieceData reader %T because it is not an io.Closer", origPieceData)
|
||||
return
|
||||
}
|
||||
if err := closer.Close(); err != nil {
|
||||
log.Warnw("closing pieceData in DataCid", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
pi := pieceInfo{
|
||||
data: pieceData,
|
||||
size: pieceSize,
|
||||
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
pieceUUID := uuid.New()
|
||||
|
||||
if deal.DealProposal != nil {
|
||||
log.Infow("piece assign request", "piece_cid", deal.PieceCID().String(), "provider", deal.DealProposal.Provider, "piece_uuid", pieceUUID)
|
||||
}
|
||||
|
||||
pieceInfoLk.Lock()
|
||||
pieceInfos[pieceUUID] = append(pieceInfos[pieceUUID], pi)
|
||||
pieceInfoLk.Unlock()
|
||||
|
||||
// /piece?piece_cid=xxxx
|
||||
dataUrl := rootUrl
|
||||
dataUrl.Path = "/piece"
|
||||
dataUrl.RawQuery = "piece_id=" + pieceUUID.String()
|
||||
|
||||
// add piece entry
|
||||
refID, pieceWasCreated, err := addPieceEntry(ctx, db, conf, deal, pieceSize, dataUrl, ssize)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, err
|
||||
}
|
||||
|
||||
// wait for piece to be parked
|
||||
if pieceWasCreated {
|
||||
<-pi.done
|
||||
} else {
|
||||
// If the piece was not created, we need to close the done channel
|
||||
close(pi.done)
|
||||
|
||||
closeDataReader(pieceData)
|
||||
}
|
||||
|
||||
{
|
||||
// piece park is either done or currently happening from another AP call
|
||||
// now we need to make sure that the piece is definitely parked successfully
|
||||
// - in case of errors we return, and boost should be able to retry the call
|
||||
|
||||
// * If piece is completed, return
|
||||
// * If piece is not completed but has null taskID, wait
|
||||
// * If piece has a non-null taskID
|
||||
// * If the task is in harmony_tasks, wait
|
||||
// * Otherwise look for an error in harmony_task_history and return that
|
||||
|
||||
for {
|
||||
var taskID *int64
|
||||
var complete bool
|
||||
err := db.QueryRow(ctx, `SELECT pp.task_id, pp.complete
|
||||
FROM parked_pieces pp
|
||||
JOIN parked_piece_refs ppr ON pp.id = ppr.piece_id
|
||||
WHERE ppr.ref_id = $1;`, refID).Scan(&taskID, &complete)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("getting piece park status: %w", err)
|
||||
}
|
||||
|
||||
if complete {
|
||||
break
|
||||
}
|
||||
|
||||
if taskID == nil {
|
||||
// piece is not parked yet
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
// check if task is in harmony_tasks
|
||||
var taskName string
|
||||
err = db.QueryRow(ctx, `SELECT name FROM harmony_task WHERE id = $1`, *taskID).Scan(&taskName)
|
||||
if err == nil {
|
||||
// task is in harmony_tasks, wait
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
if err != pgx.ErrNoRows {
|
||||
return api.SectorOffset{}, xerrors.Errorf("checking park-piece task in harmony_tasks: %w", err)
|
||||
}
|
||||
|
||||
// task is not in harmony_tasks, check harmony_task_history (latest work_end)
|
||||
var taskError string
|
||||
var taskResult bool
|
||||
err = db.QueryRow(ctx, `SELECT result, err FROM harmony_task_history WHERE task_id = $1 ORDER BY work_end DESC LIMIT 1`, *taskID).Scan(&taskResult, &taskError)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, xerrors.Errorf("checking park-piece task history: %w", err)
|
||||
}
|
||||
if !taskResult {
|
||||
return api.SectorOffset{}, xerrors.Errorf("park-piece task failed: %s", taskError)
|
||||
}
|
||||
return api.SectorOffset{}, xerrors.Errorf("park task succeeded but piece is not marked as complete")
|
||||
}
|
||||
}
|
||||
|
||||
pieceIDUrl := url.URL{
|
||||
Scheme: "pieceref",
|
||||
Opaque: fmt.Sprintf("%d", refID),
|
||||
}
|
||||
|
||||
// make a sector
|
||||
so, err := pin.AllocatePieceToSector(ctx, maddr, deal, int64(pieceSize), pieceIDUrl, nil)
|
||||
if err != nil {
|
||||
return api.SectorOffset{}, err
|
||||
}
|
||||
|
||||
log.Infow("piece assigned to sector", "piece_cid", deal.PieceCID().String(), "sector", so.Sector, "offset", so.Offset)
|
||||
|
||||
return so, nil
|
||||
}
|
||||
}
|
||||
|
||||
func addPieceEntry(ctx context.Context, db *harmonydb.DB, conf *config.CurioConfig, deal lpiece.PieceDealInfo, pieceSize abi.UnpaddedPieceSize, dataUrl url.URL, ssize abi.SectorSize) (int64, bool, error) {
|
||||
var refID int64
|
||||
var pieceWasCreated bool
|
||||
|
||||
for {
|
||||
var backpressureWait bool
|
||||
|
||||
comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||
// BACKPRESSURE
|
||||
wait, err := maybeApplyBackpressure(tx, conf.Ingest, ssize)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("backpressure checks: %w", err)
|
||||
}
|
||||
if wait {
|
||||
backpressureWait = true
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var pieceID int64
|
||||
// Attempt to select the piece ID first
|
||||
err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1`, deal.PieceCID().String()).Scan(&pieceID)
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
// Piece does not exist, attempt to insert
|
||||
err = tx.QueryRow(`
|
||||
INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT (piece_cid) DO NOTHING
|
||||
RETURNING id`, deal.PieceCID().String(), int64(pieceSize.Padded()), int64(pieceSize)).Scan(&pieceID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("inserting new parked piece and getting id: %w", err)
|
||||
}
|
||||
pieceWasCreated = true // New piece was created
|
||||
} else {
|
||||
// Some other error occurred during select
|
||||
return false, xerrors.Errorf("checking existing parked piece: %w", err)
|
||||
}
|
||||
} else {
|
||||
pieceWasCreated = false // Piece already exists, no new piece was created
|
||||
}
|
||||
|
||||
// Add parked_piece_ref
|
||||
err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url)
|
||||
VALUES ($1, $2) RETURNING ref_id`, pieceID, dataUrl.String()).Scan(&refID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("inserting parked piece ref: %w", err)
|
||||
}
|
||||
|
||||
// If everything went well, commit the transaction
|
||||
return true, nil // This will commit the transaction
|
||||
}, harmonydb.OptionRetry())
|
||||
if err != nil {
|
||||
return refID, pieceWasCreated, xerrors.Errorf("inserting parked piece: %w", err)
|
||||
}
|
||||
if !comm {
|
||||
if backpressureWait {
|
||||
// Backpressure was applied, wait and try again
|
||||
select {
|
||||
case <-time.After(backpressureWaitTime):
|
||||
case <-ctx.Done():
|
||||
return refID, pieceWasCreated, xerrors.Errorf("context done while waiting for backpressure: %w", ctx.Err())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return refID, pieceWasCreated, xerrors.Errorf("piece tx didn't commit")
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
return refID, pieceWasCreated, nil
|
||||
}
|
||||
|
||||
func closeDataReader(pieceData storiface.Data) {
|
||||
go func() {
|
||||
// close the data reader (drain to eof if it's not a closer)
|
||||
if closer, ok := pieceData.(io.Closer); ok {
|
||||
if err := closer.Close(); err != nil {
|
||||
log.Warnw("closing pieceData in DataCid", "error", err)
|
||||
}
|
||||
} else {
|
||||
log.Warnw("pieceData is not an io.Closer", "type", fmt.Sprintf("%T", pieceData))
|
||||
|
||||
_, err := io.Copy(io.Discard, pieceData)
|
||||
if err != nil {
|
||||
log.Warnw("draining pieceData in DataCid", "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func maybeApplyBackpressure(tx *harmonydb.Tx, cfg config.CurioIngestConfig, ssize abi.SectorSize) (wait bool, err error) {
|
||||
var bufferedSDR, bufferedTrees, bufferedPoRep, waitDealSectors int
|
||||
err = tx.QueryRow(`
|
||||
WITH BufferedSDR AS (
|
||||
SELECT COUNT(p.task_id_sdr) - COUNT(t.owner_id) AS buffered_sdr_count
|
||||
FROM sectors_sdr_pipeline p
|
||||
LEFT JOIN harmony_task t ON p.task_id_sdr = t.id
|
||||
WHERE p.after_sdr = false
|
||||
),
|
||||
BufferedTrees AS (
|
||||
SELECT COUNT(p.task_id_tree_r) - COUNT(t.owner_id) AS buffered_trees_count
|
||||
FROM sectors_sdr_pipeline p
|
||||
LEFT JOIN harmony_task t ON p.task_id_tree_r = t.id
|
||||
WHERE p.after_sdr = true AND p.after_tree_r = false
|
||||
),
|
||||
BufferedPoRep AS (
|
||||
SELECT COUNT(p.task_id_porep) - COUNT(t.owner_id) AS buffered_porep_count
|
||||
FROM sectors_sdr_pipeline p
|
||||
LEFT JOIN harmony_task t ON p.task_id_porep = t.id
|
||||
WHERE p.after_tree_r = true AND p.after_porep = false
|
||||
),
|
||||
WaitDealSectors AS (
|
||||
SELECT COUNT(DISTINCT sip.sector_number) AS wait_deal_sectors_count
|
||||
FROM sectors_sdr_initial_pieces sip
|
||||
LEFT JOIN sectors_sdr_pipeline sp ON sip.sp_id = sp.sp_id AND sip.sector_number = sp.sector_number
|
||||
WHERE sp.sector_number IS NULL
|
||||
)
|
||||
SELECT
|
||||
(SELECT buffered_sdr_count FROM BufferedSDR) AS total_buffered_sdr,
|
||||
(SELECT buffered_trees_count FROM BufferedTrees) AS buffered_trees_count,
|
||||
(SELECT buffered_porep_count FROM BufferedPoRep) AS buffered_porep_count
|
||||
`).Scan(&bufferedSDR, &bufferedTrees, &bufferedPoRep)
|
||||
(SELECT buffered_porep_count FROM BufferedPoRep) AS buffered_porep_count,
|
||||
(SELECT wait_deal_sectors_count FROM WaitDealSectors) AS wait_deal_sectors_count
|
||||
`).Scan(&bufferedSDR, &bufferedTrees, &bufferedPoRep, &waitDealSectors)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("counting parked pieces: %w", err)
|
||||
return false, xerrors.Errorf("counting buffered sectors: %w", err)
|
||||
}
|
||||
|
||||
if cfg.MaxQueueSDR != 0 && bufferedSDR > cfg.MaxQueueSDR {
|
||||
var pieceSizes []abi.PaddedPieceSize
|
||||
|
||||
err = tx.Select(&pieceSizes, `SELECT piece_padded_size FROM parked_pieces WHERE complete = false;`)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting in-process pieces")
|
||||
}
|
||||
|
||||
sectors := sectorCount(pieceSizes, abi.PaddedPieceSize(ssize))
|
||||
if cfg.MaxQueueDealSector != 0 && waitDealSectors+sectors > cfg.MaxQueueDealSector {
|
||||
log.Debugw("backpressure", "reason", "too many wait deal sectors", "wait_deal_sectors", waitDealSectors, "max", cfg.MaxQueueDealSector)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if bufferedSDR > cfg.MaxQueueSDR {
|
||||
log.Debugw("backpressure", "reason", "too many SDR tasks", "buffered", bufferedSDR, "max", cfg.MaxQueueSDR)
|
||||
return true, nil
|
||||
}
|
||||
@ -553,3 +594,27 @@ SELECT
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func sectorCount(sizes []abi.PaddedPieceSize, targetSize abi.PaddedPieceSize) int {
|
||||
sort.Slice(sizes, func(i, j int) bool {
|
||||
return sizes[i] > sizes[j]
|
||||
})
|
||||
|
||||
sectors := make([]abi.PaddedPieceSize, 0)
|
||||
|
||||
for _, size := range sizes {
|
||||
placed := false
|
||||
for i := range sectors {
|
||||
if sectors[i]+size <= targetSize {
|
||||
sectors[i] += size
|
||||
placed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !placed {
|
||||
sectors = append(sectors, size)
|
||||
}
|
||||
}
|
||||
|
||||
return len(sectors)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -33,7 +34,7 @@ type ParkPieceTask struct {
|
||||
max int
|
||||
}
|
||||
|
||||
func NewParkPieceTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) *ParkPieceTask {
|
||||
func NewParkPieceTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) (*ParkPieceTask, error) {
|
||||
pt := &ParkPieceTask{
|
||||
db: db,
|
||||
sc: sc,
|
||||
@ -41,8 +42,20 @@ func NewParkPieceTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) *ParkPieceTa
|
||||
max: max,
|
||||
}
|
||||
|
||||
go pt.pollPieceTasks(context.Background())
|
||||
return pt
|
||||
ctx := context.Background()
|
||||
|
||||
// We should delete all incomplete pieces before we start
|
||||
// as we would have lost reader for these. The RPC caller will get an error
|
||||
// when Curio shuts down before parking a piece. They can always retry.
|
||||
// Leaving these pieces we utilise unnecessary resources in the form of ParkPieceTask
|
||||
|
||||
_, err := db.Exec(ctx, `DELETE FROM parked_pieces WHERE complete = FALSE AND task_id IS NULL`)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to delete incomplete parked pieces: %w", err)
|
||||
}
|
||||
|
||||
go pt.pollPieceTasks(ctx)
|
||||
return pt, nil
|
||||
}
|
||||
|
||||
func (p *ParkPieceTask) pollPieceTasks(ctx context.Context) {
|
||||
@ -126,9 +139,7 @@ func (p *ParkPieceTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (d
|
||||
err = p.db.Select(ctx, &refData, `
|
||||
SELECT data_url, data_headers
|
||||
FROM parked_piece_refs
|
||||
WHERE piece_id = $1 AND data_url IS NOT NULL
|
||||
LIMIT 1
|
||||
`, pieceData.PieceID)
|
||||
WHERE piece_id = $1 AND data_url IS NOT NULL`, pieceData.PieceID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("fetching reference data: %w", err)
|
||||
}
|
||||
@ -143,28 +154,34 @@ func (p *ParkPieceTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (d
|
||||
return false, xerrors.Errorf("parsing piece raw size: %w", err)
|
||||
}
|
||||
|
||||
if refData[0].DataURL != "" {
|
||||
upr := &seal.UrlPieceReader{
|
||||
Url: refData[0].DataURL,
|
||||
RawSize: pieceRawSize,
|
||||
var merr error
|
||||
|
||||
for i := range refData {
|
||||
if refData[i].DataURL != "" {
|
||||
upr := &seal.UrlPieceReader{
|
||||
Url: refData[0].DataURL,
|
||||
RawSize: pieceRawSize,
|
||||
}
|
||||
defer func() {
|
||||
_ = upr.Close()
|
||||
}()
|
||||
|
||||
pnum := storiface.PieceNumber(pieceData.PieceID)
|
||||
|
||||
if err := p.sc.WritePiece(ctx, &taskID, pnum, pieceRawSize, upr); err != nil {
|
||||
merr = multierror.Append(merr, xerrors.Errorf("write piece: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
// Update the piece as complete after a successful write.
|
||||
_, err = p.db.Exec(ctx, `UPDATE parked_pieces SET complete = TRUE task_id = NULL WHERE id = $1`, pieceData.PieceID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("marking piece as complete: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
defer func() {
|
||||
_ = upr.Close()
|
||||
}()
|
||||
|
||||
pnum := storiface.PieceNumber(pieceData.PieceID)
|
||||
|
||||
if err := p.sc.WritePiece(ctx, &taskID, pnum, pieceRawSize, upr); err != nil {
|
||||
return false, xerrors.Errorf("write piece: %w", err)
|
||||
}
|
||||
|
||||
// Update the piece as complete after a successful write.
|
||||
_, err = p.db.Exec(ctx, `UPDATE parked_pieces SET complete = TRUE, task_id = NULL WHERE id = $1`, pieceData.PieceID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("marking piece as complete: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
return false, merr
|
||||
}
|
||||
|
||||
// If no URL is found, this indicates an issue since at least one URL is expected.
|
||||
|
@ -3,7 +3,6 @@ package seal
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
@ -112,14 +111,14 @@ func (p *PoRepTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done
|
||||
|
||||
proof, err := p.sc.PoRepSnark(ctx, sr, sealed, unsealed, sectorParams.TicketValue, abi.InteractiveSealRandomness(rand))
|
||||
if err != nil {
|
||||
end, err := p.recoverErrors(ctx, sectorParams.SpID, sectorParams.SectorNumber, err)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("recover errors: %w", err)
|
||||
}
|
||||
if end {
|
||||
// done, but the error handling has stored a different than success state
|
||||
return true, nil
|
||||
}
|
||||
//end, rerr := p.recoverErrors(ctx, sectorParams.SpID, sectorParams.SectorNumber, err)
|
||||
//if rerr != nil {
|
||||
// return false, xerrors.Errorf("recover errors: %w", rerr)
|
||||
//}
|
||||
//if end {
|
||||
// // done, but the error handling has stored a different than success state
|
||||
// return true, nil
|
||||
//}
|
||||
|
||||
return false, xerrors.Errorf("failed to compute seal proof: %w", err)
|
||||
}
|
||||
@ -175,46 +174,4 @@ func (p *PoRepTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||
p.sp.pollers[pollerPoRep].Set(taskFunc)
|
||||
}
|
||||
|
||||
func (p *PoRepTask) recoverErrors(ctx context.Context, spid, snum int64, cerr error) (end bool, err error) {
|
||||
const (
|
||||
// rust-fil-proofs error strings
|
||||
// https://github.com/filecoin-project/rust-fil-proofs/blob/3f018b51b6327b135830899d237a7ba181942d7e/storage-proofs-porep/src/stacked/vanilla/proof.rs#L454C1-L463
|
||||
errstrInvalidCommD = "Invalid comm_d detected at challenge_index"
|
||||
errstrInvalidCommR = "Invalid comm_r detected at challenge_index"
|
||||
errstrInvalidEncoding = "Invalid encoding proof generated at layer"
|
||||
)
|
||||
|
||||
if cerr == nil {
|
||||
return false, xerrors.Errorf("nil error")
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.Contains(cerr.Error(), errstrInvalidCommD):
|
||||
fallthrough
|
||||
case strings.Contains(cerr.Error(), errstrInvalidCommR):
|
||||
// todo: it might be more optimal to just retry the Trees compute first.
|
||||
// Invalid CommD/R likely indicates a problem with the data computed in that step
|
||||
// For now for simplicity just retry the whole thing
|
||||
fallthrough
|
||||
case strings.Contains(cerr.Error(), errstrInvalidEncoding):
|
||||
n, err := p.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
|
||||
SET after_porep = FALSE, after_sdr = FALSE, after_tree_d = FALSE,
|
||||
after_tree_r = FALSE, after_tree_c = FALSE
|
||||
WHERE sp_id = $1 AND sector_number = $2`,
|
||||
spid, snum)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err)
|
||||
}
|
||||
if n != 1 {
|
||||
return false, xerrors.Errorf("store sdr success: updated %d rows", n)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
||||
default:
|
||||
// if end is false the original error will be returned by the caller
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
var _ harmonytask.TaskInterface = &PoRepTask{}
|
||||
|
@ -17,15 +17,25 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/curiosrc/ffi"
|
||||
"github.com/filecoin-project/lotus/lib/filler"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
var isDevnet = build.BlockDelaySecs < 30
|
||||
|
||||
func SetDevnet(value bool) {
|
||||
isDevnet = value
|
||||
}
|
||||
|
||||
func GetDevnet() bool {
|
||||
return isDevnet
|
||||
}
|
||||
|
||||
type SDRAPI interface {
|
||||
ChainHead(context.Context) (*types.TipSet, error)
|
||||
StateGetRandomnessFromTickets(context.Context, crypto.DomainSeparationTag, abi.ChainEpoch, []byte, types.TipSetKey) (abi.Randomness, error)
|
||||
@ -74,13 +84,14 @@ func (s *SDRTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bo
|
||||
sectorParams := sectorParamsArr[0]
|
||||
|
||||
var pieces []struct {
|
||||
PieceIndex int64 `db:"piece_index"`
|
||||
PieceCID string `db:"piece_cid"`
|
||||
PieceSize int64 `db:"piece_size"`
|
||||
PieceIndex int64 `db:"piece_index"`
|
||||
PieceCID string `db:"piece_cid"`
|
||||
PieceSize int64 `db:"piece_size"`
|
||||
DataRawSize *int64 `db:"data_raw_size"`
|
||||
}
|
||||
|
||||
err = s.db.Select(ctx, &pieces, `
|
||||
SELECT piece_index, piece_cid, piece_size
|
||||
SELECT piece_index, piece_cid, piece_size, data_raw_size
|
||||
FROM sectors_sdr_initial_pieces
|
||||
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
|
||||
if err != nil {
|
||||
@ -94,18 +105,45 @@ func (s *SDRTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bo
|
||||
|
||||
var commd cid.Cid
|
||||
|
||||
var offset abi.UnpaddedPieceSize
|
||||
var allocated abi.UnpaddedPieceSize
|
||||
var pieceInfos []abi.PieceInfo
|
||||
|
||||
if len(pieces) > 0 {
|
||||
pieceInfos := make([]abi.PieceInfo, len(pieces))
|
||||
for i, p := range pieces {
|
||||
for _, p := range pieces {
|
||||
c, err := cid.Parse(p.PieceCID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("parsing piece cid: %w", err)
|
||||
}
|
||||
|
||||
pieceInfos[i] = abi.PieceInfo{
|
||||
allocated += abi.UnpaddedPieceSize(*p.DataRawSize)
|
||||
|
||||
pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), abi.PaddedPieceSize(p.PieceSize))
|
||||
offset += padLength.Unpadded()
|
||||
|
||||
for _, pad := range pads {
|
||||
pieceInfos = append(pieceInfos, abi.PieceInfo{
|
||||
Size: pad,
|
||||
PieceCID: zerocomm.ZeroPieceCommitment(pad.Unpadded()),
|
||||
})
|
||||
}
|
||||
|
||||
pieceInfos = append(pieceInfos, abi.PieceInfo{
|
||||
Size: abi.PaddedPieceSize(p.PieceSize),
|
||||
PieceCID: c,
|
||||
}
|
||||
})
|
||||
offset += abi.UnpaddedPieceSize(*p.DataRawSize)
|
||||
}
|
||||
|
||||
fillerSize, err := filler.FillersFromRem(abi.PaddedPieceSize(ssize).Unpadded() - allocated)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to calculate the final padding: %w", err)
|
||||
}
|
||||
for _, fil := range fillerSize {
|
||||
pieceInfos = append(pieceInfos, abi.PieceInfo{
|
||||
Size: fil.Padded(),
|
||||
PieceCID: zerocomm.ZeroPieceCommitment(fil),
|
||||
})
|
||||
}
|
||||
|
||||
commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos)
|
||||
@ -213,6 +251,7 @@ func (s *SDRTask) TypeDetails() harmonytask.TaskTypeDetails {
|
||||
|
||||
if isDevnet {
|
||||
res.Cost.Ram = 1 << 30
|
||||
res.Cost.Cpu = 1
|
||||
}
|
||||
|
||||
return res
|
||||
|
@ -3,16 +3,22 @@ package seal
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/builtin"
|
||||
miner2 "github.com/filecoin-project/go-state-types/builtin/v13/miner"
|
||||
verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg"
|
||||
verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/curiosrc/message"
|
||||
@ -20,6 +26,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/ctladdr"
|
||||
)
|
||||
|
||||
@ -28,9 +35,17 @@ type SubmitCommitAPI interface {
|
||||
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
|
||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error)
|
||||
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error)
|
||||
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes9.AllocationId, tsk types.TipSetKey) (*verifregtypes9.Allocation, error)
|
||||
StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifregtypes9.AllocationId, error)
|
||||
ctladdr.NodeApi
|
||||
}
|
||||
|
||||
type commitConfig struct {
|
||||
maxFee types.FIL
|
||||
RequireActivationSuccess bool
|
||||
RequireNotificationSuccess bool
|
||||
}
|
||||
|
||||
type SubmitCommitTask struct {
|
||||
sp *SealPoller
|
||||
db *harmonydb.DB
|
||||
@ -38,19 +53,24 @@ type SubmitCommitTask struct {
|
||||
|
||||
sender *message.Sender
|
||||
as *multictladdr.MultiAddressSelector
|
||||
|
||||
maxFee types.FIL
|
||||
cfg commitConfig
|
||||
}
|
||||
|
||||
func NewSubmitCommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitCommitAPI, sender *message.Sender, as *multictladdr.MultiAddressSelector, maxFee types.FIL) *SubmitCommitTask {
|
||||
func NewSubmitCommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitCommitAPI, sender *message.Sender, as *multictladdr.MultiAddressSelector, cfg *config.CurioConfig) *SubmitCommitTask {
|
||||
|
||||
cnfg := commitConfig{
|
||||
maxFee: cfg.Fees.MaxCommitGasFee,
|
||||
RequireActivationSuccess: cfg.Subsystems.RequireActivationSuccess,
|
||||
RequireNotificationSuccess: cfg.Subsystems.RequireNotificationSuccess,
|
||||
}
|
||||
|
||||
return &SubmitCommitTask{
|
||||
sp: sp,
|
||||
db: db,
|
||||
api: api,
|
||||
sender: sender,
|
||||
as: as,
|
||||
|
||||
maxFee: maxFee,
|
||||
cfg: cnfg,
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,31 +96,38 @@ func (s *SubmitCommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool)
|
||||
}
|
||||
sectorParams := sectorParamsArr[0]
|
||||
|
||||
var pieces []struct {
|
||||
PieceIndex int64 `db:"piece_index"`
|
||||
PieceCID string `db:"piece_cid"`
|
||||
PieceSize int64 `db:"piece_size"`
|
||||
Proposal json.RawMessage `db:"f05_deal_proposal"`
|
||||
Manifest json.RawMessage `db:"direct_piece_activation_manifest"`
|
||||
DealID abi.DealID `db:"f05_deal_id"`
|
||||
}
|
||||
|
||||
err = s.db.Select(ctx, &pieces, `
|
||||
SELECT piece_index,
|
||||
piece_cid,
|
||||
piece_size,
|
||||
f05_deal_proposal,
|
||||
direct_piece_activation_manifest,
|
||||
COALESCE(f05_deal_id, 0) AS f05_deal_id
|
||||
FROM sectors_sdr_initial_pieces
|
||||
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting pieces: %w", err)
|
||||
}
|
||||
|
||||
maddr, err := address.NewIDAddress(uint64(sectorParams.SpID))
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting miner address: %w", err)
|
||||
}
|
||||
|
||||
params := miner.ProveCommitSectorParams{
|
||||
SectorNumber: abi.SectorNumber(sectorParams.SectorNumber),
|
||||
Proof: sectorParams.Proof,
|
||||
}
|
||||
|
||||
enc := new(bytes.Buffer)
|
||||
if err := params.MarshalCBOR(enc); err != nil {
|
||||
return false, xerrors.Errorf("could not serialize commit params: %w", err)
|
||||
}
|
||||
|
||||
ts, err := s.api.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting chain head: %w", err)
|
||||
}
|
||||
|
||||
mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
pci, err := s.api.StateSectorPreCommitInfo(ctx, maddr, abi.SectorNumber(sectorParams.SectorNumber), ts.Key())
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting precommit info: %w", err)
|
||||
@ -109,6 +136,88 @@ func (s *SubmitCommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool)
|
||||
return false, xerrors.Errorf("precommit info not found on chain")
|
||||
}
|
||||
|
||||
mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
params := miner.ProveCommitSectors3Params{
|
||||
RequireActivationSuccess: s.cfg.RequireActivationSuccess,
|
||||
RequireNotificationSuccess: s.cfg.RequireNotificationSuccess,
|
||||
}
|
||||
|
||||
var pams []miner.PieceActivationManifest
|
||||
|
||||
for _, piece := range pieces {
|
||||
if piece.Proposal != nil {
|
||||
var prop *market.DealProposal
|
||||
err = json.Unmarshal(piece.Proposal, &prop)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("marshalling json to deal proposal: %w", err)
|
||||
}
|
||||
alloc, err := s.api.StateGetAllocationIdForPendingDeal(ctx, piece.DealID, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting allocation for deal %d: %w", piece.DealID, err)
|
||||
}
|
||||
clid, err := s.api.StateLookupID(ctx, prop.Client, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting client address for deal %d: %w", piece.DealID, err)
|
||||
}
|
||||
|
||||
clientId, err := address.IDFromAddress(clid)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting client address for deal %d: %w", piece.DealID, err)
|
||||
}
|
||||
|
||||
var vac *miner2.VerifiedAllocationKey
|
||||
if alloc != verifregtypes9.NoAllocationID {
|
||||
vac = &miner2.VerifiedAllocationKey{
|
||||
Client: abi.ActorID(clientId),
|
||||
ID: verifreg13.AllocationId(alloc),
|
||||
}
|
||||
}
|
||||
|
||||
payload, err := cborutil.Dump(piece.DealID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("serializing deal id: %w", err)
|
||||
}
|
||||
|
||||
pams = append(pams, miner.PieceActivationManifest{
|
||||
CID: prop.PieceCID,
|
||||
Size: prop.PieceSize,
|
||||
VerifiedAllocationKey: vac,
|
||||
Notify: []miner2.DataActivationNotification{
|
||||
{
|
||||
Address: market.Address,
|
||||
Payload: payload,
|
||||
},
|
||||
},
|
||||
})
|
||||
} else {
|
||||
var pam *miner.PieceActivationManifest
|
||||
err = json.Unmarshal(piece.Manifest, &pam)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("marshalling json to PieceManifest: %w", err)
|
||||
}
|
||||
err = s.allocationCheck(ctx, pam, pci, abi.ActorID(sectorParams.SpID), ts)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pams = append(pams, *pam)
|
||||
}
|
||||
}
|
||||
|
||||
params.SectorActivations = append(params.SectorActivations, miner.SectorActivationManifest{
|
||||
SectorNumber: abi.SectorNumber(sectorParams.SectorNumber),
|
||||
Pieces: pams,
|
||||
})
|
||||
params.SectorProofs = append(params.SectorProofs, sectorParams.Proof)
|
||||
|
||||
enc := new(bytes.Buffer)
|
||||
if err := params.MarshalCBOR(enc); err != nil {
|
||||
return false, xerrors.Errorf("could not serialize commit params: %w", err)
|
||||
}
|
||||
|
||||
collateral, err := s.api.StateMinerInitialPledgeCollateral(ctx, maddr, pci.Info, ts.Key())
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting initial pledge collateral: %w", err)
|
||||
@ -127,13 +236,13 @@ func (s *SubmitCommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool)
|
||||
msg := &types.Message{
|
||||
To: maddr,
|
||||
From: a,
|
||||
Method: builtin.MethodsMiner.ProveCommitSector, // todo ddo provecommit3
|
||||
Method: builtin.MethodsMiner.ProveCommitSectors3,
|
||||
Params: enc.Bytes(),
|
||||
Value: collateral, // todo config for pulling from miner balance!!
|
||||
}
|
||||
|
||||
mss := &api.MessageSendSpec{
|
||||
MaxFee: abi.TokenAmount(s.maxFee),
|
||||
MaxFee: abi.TokenAmount(s.cfg.maxFee),
|
||||
}
|
||||
|
||||
mcid, err := s.sender.Send(ctx, msg, mss, "commit")
|
||||
@ -278,4 +387,37 @@ func (s *SubmitCommitTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||
s.sp.pollers[pollerCommitMsg].Set(taskFunc)
|
||||
}
|
||||
|
||||
func (s *SubmitCommitTask) allocationCheck(ctx context.Context, piece *miner.PieceActivationManifest, precomitInfo *miner.SectorPreCommitOnChainInfo, miner abi.ActorID, ts *types.TipSet) error {
|
||||
// skip pieces not claiming an allocation
|
||||
if piece.VerifiedAllocationKey == nil {
|
||||
return nil
|
||||
}
|
||||
addr, err := address.NewIDAddress(uint64(piece.VerifiedAllocationKey.Client))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
alloc, err := s.api.StateGetAllocation(ctx, addr, verifregtypes9.AllocationId(piece.VerifiedAllocationKey.ID), ts.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if alloc == nil {
|
||||
return xerrors.Errorf("no allocation found for piece %s with allocation ID %d", piece.CID.String(), piece.VerifiedAllocationKey.ID)
|
||||
}
|
||||
if alloc.Provider != miner {
|
||||
return xerrors.Errorf("provider id mismatch for piece %s: expected %d and found %d", piece.CID.String(), miner, alloc.Provider)
|
||||
}
|
||||
if alloc.Size != piece.Size {
|
||||
return xerrors.Errorf("size mismatch for piece %s: expected %d and found %d", piece.CID.String(), piece.Size, alloc.Size)
|
||||
}
|
||||
if precomitInfo.Info.Expiration < ts.Height()+alloc.TermMin {
|
||||
return xerrors.Errorf("sector expiration %d is before than allocation TermMin %d for piece %s", precomitInfo.Info.Expiration, ts.Height()+alloc.TermMin, piece.CID.String())
|
||||
}
|
||||
if precomitInfo.Info.Expiration > ts.Height()+alloc.TermMax {
|
||||
return xerrors.Errorf("sector expiration %d is later than allocation TermMax %d for piece %s", precomitInfo.Info.Expiration, ts.Height()+alloc.TermMax, piece.CID.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ harmonytask.TaskInterface = &SubmitCommitTask{}
|
||||
|
@ -120,44 +120,41 @@ func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bo
|
||||
|
||||
{
|
||||
var pieces []struct {
|
||||
PieceIndex int64 `db:"piece_index"`
|
||||
PieceCID string `db:"piece_cid"`
|
||||
PieceSize int64 `db:"piece_size"`
|
||||
|
||||
F05DealID int64 `db:"f05_deal_id"`
|
||||
F05DealEndEpoch int64 `db:"f05_deal_end_epoch"`
|
||||
F05DealStartEpoch int64 `db:"f05_deal_start_epoch"`
|
||||
PieceIndex int64 `db:"piece_index"`
|
||||
PieceCID string `db:"piece_cid"`
|
||||
PieceSize int64 `db:"piece_size"`
|
||||
DealStartEpoch int64 `db:"deal_start_epoch"`
|
||||
DealEndEpoch int64 `db:"deal_end_epoch"`
|
||||
}
|
||||
|
||||
err = s.db.Select(ctx, &pieces, `
|
||||
SELECT piece_index, piece_cid, piece_size, f05_deal_id, f05_deal_end_epoch, f05_deal_start_epoch
|
||||
SELECT piece_index,
|
||||
piece_cid,
|
||||
piece_size,
|
||||
COALESCE(f05_deal_end_epoch, direct_end_epoch, 0) AS deal_end_epoch,
|
||||
COALESCE(f05_deal_start_epoch, direct_start_epoch, 0) AS deal_start_epoch
|
||||
FROM sectors_sdr_initial_pieces
|
||||
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting pieces: %w", err)
|
||||
}
|
||||
|
||||
if len(pieces) > 1 {
|
||||
return false, xerrors.Errorf("too many pieces") // todo support multiple pieces
|
||||
}
|
||||
|
||||
if len(pieces) > 0 {
|
||||
params.Sectors[0].UnsealedCid = &unsealedCID
|
||||
params.Sectors[0].Expiration = abi.ChainEpoch(pieces[0].F05DealEndEpoch)
|
||||
|
||||
if abi.ChainEpoch(pieces[0].F05DealStartEpoch) < head.Height() {
|
||||
// deal start epoch is in the past, can't precommit this sector anymore
|
||||
_, perr := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
|
||||
for _, p := range pieces {
|
||||
if p.DealStartEpoch > 0 && abi.ChainEpoch(p.DealStartEpoch) < head.Height() {
|
||||
// deal start epoch is in the past, can't precommit this sector anymore
|
||||
_, perr := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
|
||||
SET failed = TRUE, failed_at = NOW(), failed_reason = 'past-start-epoch', failed_reason_msg = 'precommit: start epoch is in the past', task_id_precommit_msg = NULL
|
||||
WHERE task_id_precommit_msg = $1`, taskID)
|
||||
if perr != nil {
|
||||
return false, xerrors.Errorf("persisting precommit start epoch expiry: %w", perr)
|
||||
if perr != nil {
|
||||
return false, xerrors.Errorf("persisting precommit start epoch expiry: %w", perr)
|
||||
}
|
||||
return true, xerrors.Errorf("deal start epoch is in the past")
|
||||
}
|
||||
if p.DealEndEpoch > 0 && abi.ChainEpoch(p.DealEndEpoch) > params.Sectors[0].Expiration {
|
||||
params.Sectors[0].Expiration = abi.ChainEpoch(p.DealEndEpoch)
|
||||
}
|
||||
return true, xerrors.Errorf("deal start epoch is in the past")
|
||||
}
|
||||
|
||||
for _, p := range pieces {
|
||||
params.Sectors[0].DealIDs = append(params.Sectors[0].DealIDs, abi.DealID(p.F05DealID))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,10 +16,12 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/curiosrc/ffi"
|
||||
"github.com/filecoin-project/lotus/lib/filler"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
@ -164,20 +166,37 @@ func (t *TreeDTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done
|
||||
}()
|
||||
|
||||
if len(pieces) > 0 {
|
||||
pieceInfos := make([]abi.PieceInfo, len(pieces))
|
||||
pieceReaders := make([]io.Reader, len(pieces))
|
||||
var pieceInfos []abi.PieceInfo
|
||||
var pieceReaders []io.Reader
|
||||
var offset abi.UnpaddedPieceSize
|
||||
var allocated abi.UnpaddedPieceSize
|
||||
|
||||
for i, p := range pieces {
|
||||
for _, p := range pieces {
|
||||
// make pieceInfo
|
||||
c, err := cid.Parse(p.PieceCID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("parsing piece cid: %w", err)
|
||||
}
|
||||
|
||||
pieceInfos[i] = abi.PieceInfo{
|
||||
allocated += abi.UnpaddedPieceSize(*p.DataRawSize)
|
||||
|
||||
pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), abi.PaddedPieceSize(p.PieceSize))
|
||||
offset += padLength.Unpadded()
|
||||
|
||||
for _, pad := range pads {
|
||||
pieceInfos = append(pieceInfos, abi.PieceInfo{
|
||||
Size: pad,
|
||||
PieceCID: zerocomm.ZeroPieceCommitment(pad.Unpadded()),
|
||||
})
|
||||
pieceReaders = append(pieceReaders, nullreader.NewNullReader(pad.Unpadded()))
|
||||
}
|
||||
|
||||
pieceInfos = append(pieceInfos, abi.PieceInfo{
|
||||
Size: abi.PaddedPieceSize(p.PieceSize),
|
||||
PieceCID: c,
|
||||
}
|
||||
})
|
||||
|
||||
offset += abi.UnpaddedPieceSize(*p.DataRawSize)
|
||||
|
||||
// make pieceReader
|
||||
if p.DataUrl != nil {
|
||||
@ -216,19 +235,33 @@ func (t *TreeDTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done
|
||||
|
||||
closers = append(closers, pr)
|
||||
|
||||
pieceReaders[i], _ = padreader.New(pr, uint64(*p.DataRawSize))
|
||||
reader, _ := padreader.New(pr, uint64(*p.DataRawSize))
|
||||
pieceReaders = append(pieceReaders, reader)
|
||||
} else {
|
||||
pieceReaders[i], _ = padreader.New(&UrlPieceReader{
|
||||
reader, _ := padreader.New(&UrlPieceReader{
|
||||
Url: dataUrl,
|
||||
RawSize: *p.DataRawSize,
|
||||
}, uint64(*p.DataRawSize))
|
||||
pieceReaders = append(pieceReaders, reader)
|
||||
}
|
||||
|
||||
} else { // padding piece (w/o fr32 padding, added in TreeD)
|
||||
pieceReaders[i] = nullreader.NewNullReader(abi.PaddedPieceSize(p.PieceSize).Unpadded())
|
||||
pieceReaders = append(pieceReaders, nullreader.NewNullReader(abi.PaddedPieceSize(p.PieceSize).Unpadded()))
|
||||
}
|
||||
}
|
||||
|
||||
fillerSize, err := filler.FillersFromRem(abi.PaddedPieceSize(ssize).Unpadded() - allocated)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to calculate the final padding: %w", err)
|
||||
}
|
||||
for _, fil := range fillerSize {
|
||||
pieceInfos = append(pieceInfos, abi.PieceInfo{
|
||||
Size: fil.Padded(),
|
||||
PieceCID: zerocomm.ZeroPieceCommitment(fil),
|
||||
})
|
||||
pieceReaders = append(pieceReaders, nullreader.NewNullReader(fil))
|
||||
}
|
||||
|
||||
commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("computing CommD: %w", err)
|
||||
|
@ -71,11 +71,15 @@ func (t *TreeRCTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done
|
||||
}
|
||||
|
||||
// R / C
|
||||
sealed, _, err := t.sc.TreeRC(ctx, &taskID, sref, commd)
|
||||
sealed, unsealed, err := t.sc.TreeRC(ctx, &taskID, sref, commd)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("computing tree r and c: %w", err)
|
||||
}
|
||||
|
||||
if unsealed != commd {
|
||||
return false, xerrors.Errorf("commd %s does match unsealed %s", commd.String(), unsealed.String())
|
||||
}
|
||||
|
||||
// todo synth porep
|
||||
|
||||
// todo porep challenge check
|
||||
|
@ -126,7 +126,12 @@ func (c *cfg) getSectors(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Get all pieces
|
||||
apihelper.OrHTTPFail(w, c.DB.Select(r.Context(), &pieces, `SELECT
|
||||
sp_id, sector_number, piece_size, f05_deal_id, f05_deal_proposal, direct_piece_activation_manifest
|
||||
sp_id,
|
||||
sector_number,
|
||||
piece_size,
|
||||
COALESCE(f05_deal_id, 0) AS f05_deal_id,
|
||||
f05_deal_proposal,
|
||||
direct_piece_activation_manifest
|
||||
FROM sectors_sdr_initial_pieces
|
||||
ORDER BY sp_id, sector_number`))
|
||||
pieceIndex := map[sectorID][]int{}
|
||||
@ -144,7 +149,7 @@ func (c *cfg) getSectors(w http.ResponseWriter, r *http.Request) {
|
||||
if i, ok := sectorIdx[sectorID{minerID, uint64(st.SectorNumber)}]; ok {
|
||||
sectors[i].IsOnChain = true
|
||||
sectors[i].ExpiresAt = st.Expiration
|
||||
sectors[i].IsFilPlus = st.VerifiedDealWeight.GreaterThan(st.DealWeight)
|
||||
sectors[i].IsFilPlus = st.VerifiedDealWeight.GreaterThan(big.NewInt(0))
|
||||
if ss, err := st.SealProof.SectorSize(); err == nil {
|
||||
sectors[i].SealInfo = ss.ShortString()
|
||||
}
|
||||
@ -187,23 +192,14 @@ func (c *cfg) getSectors(w http.ResponseWriter, r *http.Request) {
|
||||
rdw := big.Add(st.DealWeight, st.VerifiedDealWeight)
|
||||
dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
||||
vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(verifiedPowerGainMul)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
||||
for _, deal := range st.DealIDs {
|
||||
|
||||
if deal > 0 {
|
||||
f05++
|
||||
}
|
||||
}
|
||||
// DDO info is not on chain
|
||||
for _, piece := range pieces {
|
||||
if piece.Manifest != nil {
|
||||
//var pam *miner.PieceActivationManifest
|
||||
//apihelper.OrHTTPFail(w, json.Unmarshal(piece.Manifest, pam))
|
||||
//dw += float64(pam.Size)
|
||||
//if pam.VerifiedAllocationKey != nil {
|
||||
// vp += float64(pam.Size) * verifiedPowerGainMul
|
||||
//}
|
||||
// DDO sectors don't have deal info on chain
|
||||
for _, p := range pi {
|
||||
if p.Manifest != nil {
|
||||
ddo++
|
||||
}
|
||||
if p.Proposal != nil {
|
||||
f05++
|
||||
}
|
||||
}
|
||||
}
|
||||
sectors[i].DealWeight = "CC"
|
||||
@ -221,7 +217,7 @@ func (c *cfg) getSectors(w http.ResponseWriter, r *http.Request) {
|
||||
SectorNum: int64(chainy.onChain.SectorNumber),
|
||||
IsOnChain: true,
|
||||
ExpiresAt: chainy.onChain.Expiration,
|
||||
IsFilPlus: chainy.onChain.VerifiedDealWeight.GreaterThan(chainy.onChain.DealWeight),
|
||||
IsFilPlus: chainy.onChain.VerifiedDealWeight.GreaterThan(big.NewInt(0)),
|
||||
Proving: chainy.active,
|
||||
Flag: true, // All such sectors should be flagged to be terminated
|
||||
}
|
||||
@ -254,19 +250,19 @@ func (c *cfg) getSectors(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if len(pi) > 0 {
|
||||
for _, piece := range pi {
|
||||
if piece.Proposal != nil {
|
||||
for _, p := range pi {
|
||||
if p.Proposal != nil {
|
||||
var prop *market.DealProposal
|
||||
apihelper.OrHTTPFail(w, json.Unmarshal(piece.Proposal, &prop))
|
||||
apihelper.OrHTTPFail(w, json.Unmarshal(p.Proposal, &prop))
|
||||
dw += float64(prop.PieceSize)
|
||||
if prop.VerifiedDeal {
|
||||
vp += float64(prop.PieceSize) * verifiedPowerGainMul
|
||||
}
|
||||
f05++
|
||||
}
|
||||
if piece.Manifest != nil {
|
||||
if p.Manifest != nil {
|
||||
var pam *miner.PieceActivationManifest
|
||||
apihelper.OrHTTPFail(w, json.Unmarshal(piece.Manifest, &pam))
|
||||
apihelper.OrHTTPFail(w, json.Unmarshal(p.Manifest, &pam))
|
||||
dw += float64(pam.Size)
|
||||
if pam.VerifiedAllocationKey != nil {
|
||||
vp += float64(pam.Size) * verifiedPowerGainMul
|
||||
@ -275,6 +271,7 @@ func (c *cfg) getSectors(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
}
|
||||
sectors[i].IsFilPlus = vp > 0
|
||||
if dw > 0 {
|
||||
sectors[i].DealWeight = fmt.Sprintf("%s", units.BytesSize(dw))
|
||||
} else if vp > 0 {
|
||||
|
@ -531,6 +531,7 @@ USAGE:
|
||||
|
||||
COMMANDS:
|
||||
rpc-info
|
||||
seal start sealing a deal sector early
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
OPTIONS:
|
||||
@ -550,6 +551,21 @@ OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### curio market seal
|
||||
```
|
||||
NAME:
|
||||
curio market seal - start sealing a deal sector early
|
||||
|
||||
USAGE:
|
||||
curio market seal [command options] [arguments...]
|
||||
|
||||
OPTIONS:
|
||||
--actor value Specify actor address to start sealing sectors for
|
||||
--layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base
|
||||
--synthetic Use synthetic PoRep (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
## curio fetch-params
|
||||
```
|
||||
NAME:
|
||||
|
@ -245,6 +245,7 @@ OPTIONS:
|
||||
--owner value, -o value owner key to use for new miner initialisation
|
||||
--from value, -f value address to send actor(miner) creation message from
|
||||
--sector-size value specify sector size to use for new miner initialisation
|
||||
--confidence value number of block confirmations to wait for (default: 5)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
|
@ -123,6 +123,16 @@
|
||||
# type: bool
|
||||
#EnableSendCommitMsg = false
|
||||
|
||||
# Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3).
|
||||
#
|
||||
# type: bool
|
||||
#RequireActivationSuccess = true
|
||||
|
||||
# Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3).
|
||||
#
|
||||
# type: bool
|
||||
#RequireNotificationSuccess = true
|
||||
|
||||
# EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance.
|
||||
# This tasks should only be enabled on nodes with long-term storage.
|
||||
#
|
||||
@ -319,13 +329,22 @@
|
||||
|
||||
|
||||
[Ingest]
|
||||
# Maximum number of sectors that can be queued waiting for deals to start processing.
|
||||
# 0 = unlimited
|
||||
# Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
# The DealSector queue includes deals which are ready to enter the sealing pipeline but are not yet part of it -
|
||||
# size of this queue will also impact the maximum number of ParkPiece tasks which can run concurrently.
|
||||
# DealSector queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism.
|
||||
#
|
||||
# type: int
|
||||
#MaxQueueDealSector = 8
|
||||
|
||||
# Maximum number of sectors that can be queued waiting for SDR to start processing.
|
||||
# 0 = unlimited
|
||||
# Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
# The SDR queue includes deals which are in the process of entering the sealing pipeline - size of this queue
|
||||
# will also impact the maximum number of ParkPiece tasks which can run concurrently.
|
||||
#
|
||||
# SDR queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism.
|
||||
# The SDR queue includes deals which are in the process of entering the sealing pipeline. In case of the SDR tasks it is
|
||||
# possible that this queue grows more than this limit(CC sectors), the backpressure is only applied to sectors
|
||||
# entering the pipeline.
|
||||
#
|
||||
# type: int
|
||||
#MaxQueueSDR = 8
|
||||
@ -348,6 +367,11 @@
|
||||
# type: int
|
||||
#MaxQueuePoRep = 0
|
||||
|
||||
# Maximum time an open deal sector should wait for more deal before it starts sealing
|
||||
#
|
||||
# type: Duration
|
||||
#MaxDealWaitTime = "1h0m0s"
|
||||
|
||||
|
||||
[Journal]
|
||||
# Events of the form: "system1:event1,system1:event2[,...]"
|
||||
|
@ -2,19 +2,44 @@ package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"github.com/google/uuid"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
miner2 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/cli/spcli"
|
||||
"github.com/filecoin-project/lotus/cmd/curio/deps"
|
||||
"github.com/filecoin-project/lotus/cmd/curio/rpc"
|
||||
"github.com/filecoin-project/lotus/cmd/curio/tasks"
|
||||
"github.com/filecoin-project/lotus/curiosrc/market/lmrpc"
|
||||
"github.com/filecoin-project/lotus/curiosrc/seal"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
func TestCurioNewActor(t *testing.T) {
|
||||
@ -65,3 +90,297 @@ func TestCurioNewActor(t *testing.T) {
|
||||
|
||||
require.Contains(t, baseCfg.Addresses[0].MinerAddresses, maddr.String())
|
||||
}
|
||||
|
||||
func TestCurioHappyPath(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
full, miner, esemble := kit.EnsembleMinimal(t,
|
||||
kit.LatestActorsAt(-1),
|
||||
kit.WithSectorIndexDB(),
|
||||
)
|
||||
|
||||
esemble.Start()
|
||||
blockTime := 100 * time.Millisecond
|
||||
esemble.BeginMining(blockTime)
|
||||
|
||||
err := miner.LogSetLevel(ctx, "*", "ERROR")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = full.LogSetLevel(ctx, "*", "ERROR")
|
||||
require.NoError(t, err)
|
||||
|
||||
db := miner.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
|
||||
|
||||
token, err := full.AuthNew(ctx, api.AllPermissions)
|
||||
require.NoError(t, err)
|
||||
|
||||
fapi := fmt.Sprintf("%s:%s", string(token), full.ListenAddr)
|
||||
|
||||
var titles []string
|
||||
err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, titles)
|
||||
require.NotContains(t, titles, "base")
|
||||
|
||||
addr := miner.OwnerKey.Address
|
||||
sectorSizeInt, err := units.RAMInBytes("2KiB")
|
||||
require.NoError(t, err)
|
||||
|
||||
maddr, err := spcli.CreateStorageMiner(ctx, full, addr, addr, addr, abi.SectorSize(sectorSizeInt), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deps.CreateMinerConfig(ctx, full, db, []string{maddr.String()}, fapi)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, titles, "base")
|
||||
baseCfg := config.DefaultCurioConfig()
|
||||
var baseText string
|
||||
|
||||
err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText)
|
||||
require.NoError(t, err)
|
||||
_, err = deps.LoadConfigWithUpgrades(baseText, baseCfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, baseCfg.Addresses)
|
||||
require.GreaterOrEqual(t, len(baseCfg.Addresses), 1)
|
||||
|
||||
require.Contains(t, baseCfg.Addresses[0].MinerAddresses, maddr.String())
|
||||
|
||||
temp := os.TempDir()
|
||||
dir, err := os.MkdirTemp(temp, "curio")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.Remove(dir)
|
||||
}()
|
||||
|
||||
capi, enginerTerm, closure, finishCh := ConstructCurioTest(ctx, t, dir, db, full, maddr, baseCfg)
|
||||
defer enginerTerm()
|
||||
defer closure()
|
||||
|
||||
mid, err := address.IDFromAddress(maddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
mi, err := full.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
nv, err := full.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
wpt := mi.WindowPoStProofType
|
||||
spt, err := miner2.PreferredSealProofTypeFromWindowPoStType(nv, wpt, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
num, err := seal.AllocateSectorNumbers(ctx, full, db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) {
|
||||
for _, n := range numbers {
|
||||
_, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) values ($1, $2, $3)", mid, n, spt)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err)
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
require.Len(t, num, 1)
|
||||
// TODO: add DDO deal, f05 deal 2 MiB each in the sector
|
||||
|
||||
var sectorParamsArr []struct {
|
||||
SpID int64 `db:"sp_id"`
|
||||
SectorNumber int64 `db:"sector_number"`
|
||||
}
|
||||
|
||||
require.Eventuallyf(t, func() bool {
|
||||
err = db.Select(ctx, §orParamsArr, `
|
||||
SELECT sp_id, sector_number
|
||||
FROM sectors_sdr_pipeline
|
||||
WHERE after_commit_msg_success = True`)
|
||||
require.NoError(t, err)
|
||||
return len(sectorParamsArr) == 1
|
||||
}, 5*time.Minute, 1*time.Second, "sector did not finish sealing in 5 minutes")
|
||||
|
||||
require.Equal(t, sectorParamsArr[0].SectorNumber, int64(0))
|
||||
require.Equal(t, sectorParamsArr[0].SpID, int64(mid))
|
||||
|
||||
_ = capi.Shutdown(ctx)
|
||||
|
||||
<-finishCh
|
||||
}
|
||||
|
||||
func createCliContext(dir string) (*cli.Context, error) {
|
||||
// Define flags for the command
|
||||
flags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "listen",
|
||||
Usage: "host address and port the worker api will listen on",
|
||||
Value: "0.0.0.0:12300",
|
||||
EnvVars: []string{"LOTUS_WORKER_LISTEN"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nosync",
|
||||
Usage: "don't check full-node sync status",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "halt-after-init",
|
||||
Usage: "only run init, then return",
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "manage-fdlimit",
|
||||
Usage: "manage open file limit",
|
||||
Value: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "storage-json",
|
||||
Usage: "path to json file containing storage config",
|
||||
Value: "~/.curio/storage.json",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "journal",
|
||||
Usage: "path to journal files",
|
||||
Value: "~/.curio/",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "layers",
|
||||
Aliases: []string{"l", "layer"},
|
||||
Usage: "list of layers to be interpreted (atop defaults)",
|
||||
},
|
||||
}
|
||||
|
||||
// Set up the command with flags
|
||||
command := &cli.Command{
|
||||
Name: "simulate",
|
||||
Flags: flags,
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("Listen address:", c.String("listen"))
|
||||
fmt.Println("No-sync:", c.Bool("nosync"))
|
||||
fmt.Println("Halt after init:", c.Bool("halt-after-init"))
|
||||
fmt.Println("Manage file limit:", c.Bool("manage-fdlimit"))
|
||||
fmt.Println("Storage config path:", c.String("storage-json"))
|
||||
fmt.Println("Journal path:", c.String("journal"))
|
||||
fmt.Println("Layers:", c.StringSlice("layers"))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create a FlagSet and populate it
|
||||
set := flag.NewFlagSet("test", flag.ContinueOnError)
|
||||
for _, f := range flags {
|
||||
if err := f.Apply(set); err != nil {
|
||||
return nil, xerrors.Errorf("Error applying flag: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
curioDir := path.Join(dir, "curio")
|
||||
cflag := fmt.Sprintf("--storage-json=%s", curioDir)
|
||||
|
||||
storage := path.Join(dir, "storage.json")
|
||||
sflag := fmt.Sprintf("--journal=%s", storage)
|
||||
|
||||
// Parse the flags with test values
|
||||
err := set.Parse([]string{"--listen=0.0.0.0:12345", "--nosync", "--manage-fdlimit", sflag, cflag, "--layers=seal", "--layers=post"})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Error setting flag: %s\n", err)
|
||||
}
|
||||
|
||||
// Create a cli.Context from the FlagSet
|
||||
app := cli.NewApp()
|
||||
ctx := cli.NewContext(app, set, nil)
|
||||
ctx.Command = command
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func ConstructCurioTest(ctx context.Context, t *testing.T, dir string, db *harmonydb.DB, full v1api.FullNode, maddr address.Address, cfg *config.CurioConfig) (api.Curio, func(), jsonrpc.ClientCloser, <-chan struct{}) {
|
||||
cctx, err := createCliContext(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
shutdownChan := make(chan struct{})
|
||||
|
||||
{
|
||||
var ctxclose func()
|
||||
ctx, ctxclose = context.WithCancel(ctx)
|
||||
go func() {
|
||||
<-shutdownChan
|
||||
ctxclose()
|
||||
}()
|
||||
}
|
||||
|
||||
dependencies := &deps.Deps{}
|
||||
dependencies.DB = db
|
||||
dependencies.Full = full
|
||||
seal.SetDevnet(true)
|
||||
err = os.Setenv("CURIO_REPO_PATH", dir)
|
||||
require.NoError(t, err)
|
||||
err = dependencies.PopulateRemainingDeps(ctx, cctx, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
taskEngine, err := tasks.StartTasks(ctx, dependencies)
|
||||
require.NoError(t, err)
|
||||
|
||||
dependencies.Cfg.Subsystems.BoostAdapters = []string{fmt.Sprintf("%s:127.0.0.1:32000", maddr)}
|
||||
err = lmrpc.ServeCurioMarketRPCFromConfig(dependencies.DB, dependencies.Full, dependencies.Cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
go func() {
|
||||
err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown.
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
finishCh := node.MonitorShutdown(shutdownChan)
|
||||
|
||||
var machines []string
|
||||
err = db.Select(ctx, &machines, `select host_and_port from harmony_machines`)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, machines, 1)
|
||||
laddr, err := net.ResolveTCPAddr("tcp", machines[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
ma, err := manet.FromNetAddr(laddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
var apiToken []byte
|
||||
{
|
||||
type jwtPayload struct {
|
||||
Allow []auth.Permission
|
||||
}
|
||||
|
||||
p := jwtPayload{
|
||||
Allow: api.AllPermissions,
|
||||
}
|
||||
|
||||
sk, err := base64.StdEncoding.DecodeString(cfg.Apis.StorageRPCSecret)
|
||||
require.NoError(t, err)
|
||||
|
||||
apiToken, err = jwt.Sign(&p, jwt.NewHS256(sk))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ctoken := fmt.Sprintf("%s:%s", string(apiToken), ma)
|
||||
err = os.Setenv("CURIO_API_INFO", ctoken)
|
||||
require.NoError(t, err)
|
||||
|
||||
capi, ccloser, err := rpc.GetCurioAPI(&cli.Context{})
|
||||
require.NoError(t, err)
|
||||
|
||||
scfg := storiface.LocalStorageMeta{
|
||||
ID: storiface.ID(uuid.New().String()),
|
||||
Weight: 10,
|
||||
CanSeal: true,
|
||||
CanStore: true,
|
||||
MaxStorage: 0,
|
||||
Groups: []string{},
|
||||
AllowTo: []string{},
|
||||
}
|
||||
|
||||
err = capi.StorageInit(ctx, dir, scfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = capi.StorageAddLocal(ctx, dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = capi.LogSetLevel(ctx, "harmonytask", "DEBUG")
|
||||
|
||||
return capi, taskEngine.GracefullyTerminate, ccloser, finishCh
|
||||
}
|
||||
|
43
lib/filler/filler.go
Normal file
43
lib/filler/filler.go
Normal file
@ -0,0 +1,43 @@
|
||||
package filler
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
)
|
||||
|
||||
func FillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) {
|
||||
// Convert to in-sector bytes for easier math:
|
||||
//
|
||||
// Sector size to user bytes ratio is constant, e.g. for 1024B we have 1016B
|
||||
// of user-usable data.
|
||||
//
|
||||
// (1024/1016 = 128/127)
|
||||
//
|
||||
// Given that we can get sector size by simply adding 1/127 of the user
|
||||
// bytes
|
||||
//
|
||||
// (we convert to sector bytes as they are nice round binary numbers)
|
||||
|
||||
toFill := uint64(in + (in / 127))
|
||||
|
||||
// We need to fill the sector with pieces that are powers of 2. Conveniently
|
||||
// computers store numbers in binary, which means we can look at 1s to get
|
||||
// all the piece sizes we need to fill the sector. It also means that number
|
||||
// of pieces is the number of 1s in the number of remaining bytes to fill
|
||||
out := make([]abi.UnpaddedPieceSize, bits.OnesCount64(toFill))
|
||||
for i := range out {
|
||||
// Extract the next lowest non-zero bit
|
||||
next := bits.TrailingZeros64(toFill)
|
||||
psize := uint64(1) << next
|
||||
// e.g: if the number is 0b010100, psize will be 0b000100
|
||||
|
||||
// set that bit to 0 by XORing it, so the next iteration looks at the
|
||||
// next bit
|
||||
toFill ^= psize
|
||||
|
||||
// Add the piece size to the list of pieces we need to create
|
||||
out[i] = abi.PaddedPieceSize(psize).Unpadded()
|
||||
}
|
||||
return out, nil
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package sealing
|
||||
package filler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func testFill(t *testing.T, n abi.UnpaddedPieceSize, exp []abi.UnpaddedPieceSize) {
|
||||
f, err := fillersFromRem(n)
|
||||
f, err := FillersFromRem(n)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, exp, f)
|
||||
|
@ -278,7 +278,8 @@ func (db *DB) upgrade() error {
|
||||
logger.Error("weird embed file read err")
|
||||
return err
|
||||
}
|
||||
for _, s := range strings.Split(string(file), ";") { // Implement the changes.
|
||||
|
||||
for _, s := range parseSQLStatements(string(file)) { // Implement the changes.
|
||||
if len(strings.TrimSpace(s)) == 0 {
|
||||
continue
|
||||
}
|
||||
@ -299,3 +300,40 @@ func (db *DB) upgrade() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseSQLStatements(sqlContent string) []string {
|
||||
var statements []string
|
||||
var currentStatement strings.Builder
|
||||
|
||||
lines := strings.Split(sqlContent, "\n")
|
||||
var inFunction bool
|
||||
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.TrimSpace(line)
|
||||
if trimmedLine == "" || strings.HasPrefix(trimmedLine, "--") {
|
||||
// Skip empty lines and comments.
|
||||
continue
|
||||
}
|
||||
|
||||
// Detect function blocks starting or ending.
|
||||
if strings.Contains(trimmedLine, "$$") {
|
||||
inFunction = !inFunction
|
||||
}
|
||||
|
||||
// Add the line to the current statement.
|
||||
currentStatement.WriteString(line + "\n")
|
||||
|
||||
// If we're not in a function and the line ends with a semicolon, or we just closed a function block.
|
||||
if (!inFunction && strings.HasSuffix(trimmedLine, ";")) || (strings.Contains(trimmedLine, "$$") && !inFunction) {
|
||||
statements = append(statements, currentStatement.String())
|
||||
currentStatement.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Add any remaining statement not followed by a semicolon (should not happen in well-formed SQL but just in case).
|
||||
if currentStatement.Len() > 0 {
|
||||
statements = append(statements, currentStatement.String())
|
||||
}
|
||||
|
||||
return statements
|
||||
}
|
||||
|
@ -118,6 +118,12 @@ create table sectors_sdr_initial_pieces (
|
||||
-- direct_end_epoch bigint,
|
||||
-- direct_piece_activation_manifest jsonb,
|
||||
|
||||
-- created_at added in 20240508-open-deal-sectors.sql
|
||||
-- created_at timestamp,
|
||||
|
||||
-- open_sector_pieces table is a copy of this
|
||||
-- all alters should happen on both tables except constraints
|
||||
|
||||
-- foreign key
|
||||
foreign key (sp_id, sector_number) references sectors_sdr_pipeline (sp_id, sector_number) on delete cascade,
|
||||
|
||||
|
206
lib/harmony/harmonydb/sql/20240508-open-deal-sectors.sql
Normal file
206
lib/harmony/harmonydb/sql/20240508-open-deal-sectors.sql
Normal file
@ -0,0 +1,206 @@
|
||||
ALTER TABLE sectors_sdr_initial_pieces
|
||||
ADD COLUMN created_at TIMESTAMP NOT NULL DEFAULT current_timestamp;
|
||||
|
||||
create table open_sector_pieces (
|
||||
sp_id bigint not null,
|
||||
sector_number bigint not null,
|
||||
|
||||
piece_index bigint not null,
|
||||
piece_cid text not null,
|
||||
piece_size bigint not null, -- padded size
|
||||
|
||||
-- data source
|
||||
data_url text not null,
|
||||
data_headers jsonb not null default '{}',
|
||||
data_raw_size bigint not null,
|
||||
data_delete_on_finalize bool not null,
|
||||
|
||||
-- deal info
|
||||
f05_publish_cid text,
|
||||
f05_deal_id bigint,
|
||||
f05_deal_proposal jsonb,
|
||||
f05_deal_start_epoch bigint,
|
||||
f05_deal_end_epoch bigint,
|
||||
|
||||
-- ddo deal info
|
||||
-- added in 20240402-sdr-pipeline-ddo-deal-info.sql
|
||||
direct_start_epoch bigint,
|
||||
direct_end_epoch bigint,
|
||||
direct_piece_activation_manifest jsonb,
|
||||
|
||||
-- created_at added in 20240508-open-deal-sectors.sql
|
||||
created_at timestamp NOT NULL DEFAULT current_timestamp,
|
||||
|
||||
-- sectors_sdr_initial_pieces table is a copy of this
|
||||
-- all alters should happen on both tables except constraints
|
||||
|
||||
primary key (sp_id, sector_number, piece_index)
|
||||
);
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION insert_sector_market_piece(
|
||||
v_sp_id bigint,
|
||||
v_sector_number bigint,
|
||||
v_piece_index bigint,
|
||||
v_piece_cid text,
|
||||
v_piece_size bigint,
|
||||
v_data_url text,
|
||||
v_data_headers jsonb,
|
||||
v_data_raw_size bigint,
|
||||
v_data_delete_on_finalize boolean,
|
||||
v_f05_publish_cid text,
|
||||
v_f05_deal_id bigint,
|
||||
v_f05_deal_proposal jsonb,
|
||||
v_f05_deal_start_epoch bigint,
|
||||
v_f05_deal_end_epoch bigint
|
||||
) RETURNS void AS $$
|
||||
BEGIN
|
||||
INSERT INTO open_sector_pieces (
|
||||
sp_id,
|
||||
sector_number,
|
||||
piece_index,
|
||||
created_at,
|
||||
piece_cid,
|
||||
piece_size,
|
||||
data_url,
|
||||
data_headers,
|
||||
data_raw_size,
|
||||
data_delete_on_finalize,
|
||||
f05_publish_cid,
|
||||
f05_deal_id,
|
||||
f05_deal_proposal,
|
||||
f05_deal_start_epoch,
|
||||
f05_deal_end_epoch
|
||||
) VALUES (
|
||||
v_sp_id,
|
||||
v_sector_number,
|
||||
v_piece_index,
|
||||
NOW(),
|
||||
v_piece_cid,
|
||||
v_piece_size,
|
||||
v_data_url,
|
||||
v_data_headers,
|
||||
v_data_raw_size,
|
||||
v_data_delete_on_finalize,
|
||||
v_f05_publish_cid,
|
||||
v_f05_deal_id,
|
||||
v_f05_deal_proposal,
|
||||
v_f05_deal_start_epoch,
|
||||
v_f05_deal_end_epoch
|
||||
) ON CONFLICT (sp_id, sector_number, piece_index) DO NOTHING;
|
||||
IF NOT FOUND THEN
|
||||
RAISE EXCEPTION 'Conflict detected for piece_index %', v_piece_index;
|
||||
END IF;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION insert_sector_ddo_piece(
|
||||
v_sp_id bigint,
|
||||
v_sector_number bigint,
|
||||
v_piece_index bigint,
|
||||
v_piece_cid text,
|
||||
v_piece_size bigint,
|
||||
v_data_url text,
|
||||
v_data_headers jsonb,
|
||||
v_data_raw_size bigint,
|
||||
v_data_delete_on_finalize boolean,
|
||||
v_direct_start_epoch bigint,
|
||||
v_direct_end_epoch bigint,
|
||||
v_direct_piece_activation_manifest jsonb
|
||||
) RETURNS void AS $$
|
||||
BEGIN
|
||||
INSERT INTO open_sector_pieces (
|
||||
sp_id,
|
||||
sector_number,
|
||||
piece_index,
|
||||
created_at,
|
||||
piece_cid,
|
||||
piece_size,
|
||||
data_url,
|
||||
data_headers,
|
||||
data_raw_size,
|
||||
data_delete_on_finalize,
|
||||
direct_start_epoch,
|
||||
direct_end_epoch,
|
||||
direct_piece_activation_manifest
|
||||
) VALUES (
|
||||
v_sp_id,
|
||||
v_sector_number,
|
||||
v_piece_index,
|
||||
NOW(),
|
||||
v_piece_cid,
|
||||
v_piece_size,
|
||||
v_data_url,
|
||||
v_data_headers,
|
||||
v_data_raw_size,
|
||||
v_data_delete_on_finalize,
|
||||
v_direct_start_epoch,
|
||||
v_direct_end_epoch,
|
||||
v_direct_piece_activation_manifest
|
||||
) ON CONFLICT (sp_id, sector_number, piece_index) DO NOTHING;
|
||||
IF NOT FOUND THEN
|
||||
RAISE EXCEPTION 'Conflict detected for piece_index %', v_piece_index;
|
||||
END IF;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE FUNCTION transfer_and_delete_open_piece(v_sp_id bigint, v_sector_number bigint)
|
||||
RETURNS void AS $$
|
||||
BEGIN
|
||||
-- Copy data from open_sector_pieces to sectors_sdr_initial_pieces
|
||||
INSERT INTO sectors_sdr_initial_pieces (
|
||||
sp_id,
|
||||
sector_number,
|
||||
piece_index,
|
||||
piece_cid,
|
||||
piece_size,
|
||||
data_url,
|
||||
data_headers,
|
||||
data_raw_size,
|
||||
data_delete_on_finalize,
|
||||
f05_publish_cid,
|
||||
f05_deal_id,
|
||||
f05_deal_proposal,
|
||||
f05_deal_start_epoch,
|
||||
f05_deal_end_epoch,
|
||||
direct_start_epoch,
|
||||
direct_end_epoch,
|
||||
direct_piece_activation_manifest,
|
||||
created_at
|
||||
)
|
||||
SELECT
|
||||
sp_id,
|
||||
sector_number,
|
||||
piece_index,
|
||||
piece_cid,
|
||||
piece_size,
|
||||
data_url,
|
||||
data_headers,
|
||||
data_raw_size,
|
||||
data_delete_on_finalize,
|
||||
f05_publish_cid,
|
||||
f05_deal_id,
|
||||
f05_deal_proposal,
|
||||
f05_deal_start_epoch,
|
||||
f05_deal_end_epoch,
|
||||
direct_start_epoch,
|
||||
direct_end_epoch,
|
||||
direct_piece_activation_manifest,
|
||||
created_at
|
||||
FROM
|
||||
open_sector_pieces
|
||||
WHERE
|
||||
sp_id = v_sp_id AND
|
||||
sector_number = v_sector_number;
|
||||
|
||||
-- Check for successful insertion, then delete the corresponding row from open_sector_pieces
|
||||
IF FOUND THEN
|
||||
DELETE FROM open_sector_pieces
|
||||
WHERE sp_id = v_sp_id AND sector_number = v_sector_number;
|
||||
ELSE
|
||||
RAISE EXCEPTION 'No data found to transfer for sp_id % and sector_number %', v_sp_id, v_sector_number;
|
||||
END IF;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
@ -331,8 +331,10 @@ const (
|
||||
func DefaultCurioConfig() *CurioConfig {
|
||||
return &CurioConfig{
|
||||
Subsystems: CurioSubsystemsConfig{
|
||||
GuiAddress: ":4701",
|
||||
BoostAdapters: []string{},
|
||||
GuiAddress: ":4701",
|
||||
BoostAdapters: []string{},
|
||||
RequireActivationSuccess: true,
|
||||
RequireNotificationSuccess: true,
|
||||
},
|
||||
Fees: CurioFees{
|
||||
DefaultMaxFee: DefaultDefaultMaxFee(),
|
||||
@ -364,9 +366,11 @@ func DefaultCurioConfig() *CurioConfig {
|
||||
SingleCheckTimeout: Duration(10 * time.Minute),
|
||||
},
|
||||
Ingest: CurioIngestConfig{
|
||||
MaxQueueSDR: 8, // default to 8 sectors before sdr
|
||||
MaxQueueTrees: 0, // default don't use this limit
|
||||
MaxQueuePoRep: 0, // default don't use this limit
|
||||
MaxQueueDealSector: 8, // default to 8 sectors open(or in process of opening) for deals
|
||||
MaxQueueSDR: 8, // default to 8 (will cause backpressure even if deal sectors are 0)
|
||||
MaxQueueTrees: 0, // default don't use this limit
|
||||
MaxQueuePoRep: 0, // default don't use this limit
|
||||
MaxDealWaitTime: Duration(1 * time.Hour),
|
||||
},
|
||||
Alerting: CurioAlerting{
|
||||
PagerDutyEventURL: "https://events.pagerduty.com/v2/enqueue",
|
||||
|
@ -308,6 +308,17 @@ alerts will be triggered for the wallet`,
|
||||
},
|
||||
},
|
||||
"CurioIngestConfig": {
|
||||
{
|
||||
Name: "MaxQueueDealSector",
|
||||
Type: "int",
|
||||
|
||||
Comment: `Maximum number of sectors that can be queued waiting for deals to start processing.
|
||||
0 = unlimited
|
||||
Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
The DealSector queue includes deals which are ready to enter the sealing pipeline but are not yet part of it -
|
||||
size of this queue will also impact the maximum number of ParkPiece tasks which can run concurrently.
|
||||
DealSector queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism.`,
|
||||
},
|
||||
{
|
||||
Name: "MaxQueueSDR",
|
||||
Type: "int",
|
||||
@ -315,10 +326,9 @@ alerts will be triggered for the wallet`,
|
||||
Comment: `Maximum number of sectors that can be queued waiting for SDR to start processing.
|
||||
0 = unlimited
|
||||
Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
The SDR queue includes deals which are in the process of entering the sealing pipeline - size of this queue
|
||||
will also impact the maximum number of ParkPiece tasks which can run concurrently.
|
||||
|
||||
SDR queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism.`,
|
||||
The SDR queue includes deals which are in the process of entering the sealing pipeline. In case of the SDR tasks it is
|
||||
possible that this queue grows more than this limit(CC sectors), the backpressure is only applied to sectors
|
||||
entering the pipeline.`,
|
||||
},
|
||||
{
|
||||
Name: "MaxQueueTrees",
|
||||
@ -340,6 +350,12 @@ Note: This mechanism will delay taking deal data from markets, providing backpre
|
||||
Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only
|
||||
applied to sectors entering the pipeline.`,
|
||||
},
|
||||
{
|
||||
Name: "MaxDealWaitTime",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `Maximum time an open deal sector should wait for more deal before it starts sealing`,
|
||||
},
|
||||
},
|
||||
"CurioProvingConfig": {
|
||||
{
|
||||
@ -590,6 +606,18 @@ also be bounded by resources available on the machine.`,
|
||||
Comment: `EnableSendCommitMsg enables the sending of commit messages to the chain
|
||||
from this curio instance.`,
|
||||
},
|
||||
{
|
||||
Name: "RequireActivationSuccess",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3).`,
|
||||
},
|
||||
{
|
||||
Name: "RequireNotificationSuccess",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3).`,
|
||||
},
|
||||
{
|
||||
Name: "EnableMoveStorage",
|
||||
Type: "bool",
|
||||
|
@ -190,6 +190,11 @@ type CurioSubsystemsConfig struct {
|
||||
// from this curio instance.
|
||||
EnableSendCommitMsg bool
|
||||
|
||||
// Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3).
|
||||
RequireActivationSuccess bool
|
||||
// Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3).
|
||||
RequireNotificationSuccess bool
|
||||
|
||||
// EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance.
|
||||
// This tasks should only be enabled on nodes with long-term storage.
|
||||
//
|
||||
@ -829,13 +834,20 @@ type CurioProvingConfig struct {
|
||||
}
|
||||
|
||||
type CurioIngestConfig struct {
|
||||
// Maximum number of sectors that can be queued waiting for deals to start processing.
|
||||
// 0 = unlimited
|
||||
// Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
// The DealSector queue includes deals which are ready to enter the sealing pipeline but are not yet part of it -
|
||||
// size of this queue will also impact the maximum number of ParkPiece tasks which can run concurrently.
|
||||
// DealSector queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism.
|
||||
MaxQueueDealSector int
|
||||
|
||||
// Maximum number of sectors that can be queued waiting for SDR to start processing.
|
||||
// 0 = unlimited
|
||||
// Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
// The SDR queue includes deals which are in the process of entering the sealing pipeline - size of this queue
|
||||
// will also impact the maximum number of ParkPiece tasks which can run concurrently.
|
||||
//
|
||||
// SDR queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism.
|
||||
// The SDR queue includes deals which are in the process of entering the sealing pipeline. In case of the SDR tasks it is
|
||||
// possible that this queue grows more than this limit(CC sectors), the backpressure is only applied to sectors
|
||||
// entering the pipeline.
|
||||
MaxQueueSDR int
|
||||
|
||||
// Maximum number of sectors that can be queued waiting for SDRTrees to start processing.
|
||||
@ -851,6 +863,9 @@ type CurioIngestConfig struct {
|
||||
// Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only
|
||||
// applied to sectors entering the pipeline.
|
||||
MaxQueuePoRep int
|
||||
|
||||
// Maximum time an open deal sector should wait for more deal before it starts sealing
|
||||
MaxDealWaitTime Duration
|
||||
}
|
||||
|
||||
// API contains configs for API endpoint
|
||||
|
@ -170,6 +170,15 @@ func (ds *PieceDealInfo) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
func (ds *PieceDealInfo) Size() abi.PaddedPieceSize {
|
||||
switch {
|
||||
case ds.isBuiltinMarketDeal():
|
||||
return ds.DealProposal.PieceSize
|
||||
default:
|
||||
return ds.PieceActivationManifest.Size
|
||||
}
|
||||
}
|
||||
|
||||
func (ds *PieceDealInfo) KeepUnsealedRequested() bool {
|
||||
return ds.KeepUnsealed
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/filler"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
@ -88,7 +89,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
|
||||
return xerrors.Errorf("too much data in sector: %d > %d", allocated, ubytes)
|
||||
}
|
||||
|
||||
fillerSizes, err := fillersFromRem(ubytes - allocated)
|
||||
fillerSizes, err := filler.FillersFromRem(ubytes - allocated)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
@ -18,42 +17,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
|
||||
)
|
||||
|
||||
func fillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) {
|
||||
// Convert to in-sector bytes for easier math:
|
||||
//
|
||||
// Sector size to user bytes ratio is constant, e.g. for 1024B we have 1016B
|
||||
// of user-usable data.
|
||||
//
|
||||
// (1024/1016 = 128/127)
|
||||
//
|
||||
// Given that we can get sector size by simply adding 1/127 of the user
|
||||
// bytes
|
||||
//
|
||||
// (we convert to sector bytes as they are nice round binary numbers)
|
||||
|
||||
toFill := uint64(in + (in / 127))
|
||||
|
||||
// We need to fill the sector with pieces that are powers of 2. Conveniently
|
||||
// computers store numbers in binary, which means we can look at 1s to get
|
||||
// all the piece sizes we need to fill the sector. It also means that number
|
||||
// of pieces is the number of 1s in the number of remaining bytes to fill
|
||||
out := make([]abi.UnpaddedPieceSize, bits.OnesCount64(toFill))
|
||||
for i := range out {
|
||||
// Extract the next lowest non-zero bit
|
||||
next := bits.TrailingZeros64(toFill)
|
||||
psize := uint64(1) << next
|
||||
// e.g: if the number is 0b010100, psize will be 0b000100
|
||||
|
||||
// set that bit to 0 by XORing it, so the next iteration looks at the
|
||||
// next bit
|
||||
toFill ^= psize
|
||||
|
||||
// Add the piece size to the list of pieces we need to create
|
||||
out[i] = abi.PaddedPieceSize(psize).Unpadded()
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (m *Sealing) ListSectors() ([]SectorInfo, error) {
|
||||
var sectors []SectorInfo
|
||||
if err := m.sectors.List(§ors); err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user