v1.27.0-a #10
@ -1018,7 +1018,7 @@ workflows:
|
|||||||
requires:
|
requires:
|
||||||
- build
|
- build
|
||||||
suite: utest-unit-rest
|
suite: utest-unit-rest
|
||||||
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..."
|
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./provider/... ./tools/..."
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
- test:
|
- test:
|
||||||
name: test-unit-storage
|
name: test-unit-storage
|
||||||
|
@ -1,10 +1,28 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import "context"
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
type LotusProvider interface {
|
type LotusProvider interface {
|
||||||
Version(context.Context) (Version, error) //perm:admin
|
Version(context.Context) (Version, error) //perm:admin
|
||||||
|
|
||||||
|
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece PieceDealInfo, rawSize int64, source url.URL, header http.Header) (SectorOffset, error) //perm:write
|
||||||
|
|
||||||
|
StorageAddLocal(ctx context.Context, path string) error //perm:admin
|
||||||
|
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
|
||||||
|
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
|
||||||
|
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
|
||||||
|
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
|
||||||
|
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
|
||||||
|
|
||||||
// Trigger shutdown
|
// Trigger shutdown
|
||||||
Shutdown(context.Context) error //perm:admin
|
Shutdown(context.Context) error //perm:admin
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,8 @@ package api
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@ -832,8 +834,22 @@ type LotusProviderStruct struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type LotusProviderMethods struct {
|
type LotusProviderMethods struct {
|
||||||
|
AllocatePieceToSector func(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) `perm:"write"`
|
||||||
|
|
||||||
Shutdown func(p0 context.Context) error `perm:"admin"`
|
Shutdown func(p0 context.Context) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageDetachLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageInfo func(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
|
StorageList func(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) `perm:"admin"`
|
||||||
|
|
||||||
|
StorageLocal func(p0 context.Context) (map[storiface.ID]string, error) `perm:"admin"`
|
||||||
|
|
||||||
|
StorageStat func(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) `perm:"admin"`
|
||||||
|
|
||||||
Version func(p0 context.Context) (Version, error) `perm:"admin"`
|
Version func(p0 context.Context) (Version, error) `perm:"admin"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5201,6 +5217,17 @@ func (s *GatewayStub) Web3ClientVersion(p0 context.Context) (string, error) {
|
|||||||
return "", ErrNotSupported
|
return "", ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStruct) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
|
||||||
|
if s.Internal.AllocatePieceToSector == nil {
|
||||||
|
return *new(SectorOffset), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.AllocatePieceToSector(p0, p1, p2, p3, p4, p5)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStub) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
|
||||||
|
return *new(SectorOffset), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *LotusProviderStruct) Shutdown(p0 context.Context) error {
|
func (s *LotusProviderStruct) Shutdown(p0 context.Context) error {
|
||||||
if s.Internal.Shutdown == nil {
|
if s.Internal.Shutdown == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
@ -5212,6 +5239,72 @@ func (s *LotusProviderStub) Shutdown(p0 context.Context) error {
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStruct) StorageAddLocal(p0 context.Context, p1 string) error {
|
||||||
|
if s.Internal.StorageAddLocal == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageAddLocal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStub) StorageAddLocal(p0 context.Context, p1 string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStruct) StorageDetachLocal(p0 context.Context, p1 string) error {
|
||||||
|
if s.Internal.StorageDetachLocal == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageDetachLocal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStub) StorageDetachLocal(p0 context.Context, p1 string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStruct) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
|
||||||
|
if s.Internal.StorageInfo == nil {
|
||||||
|
return *new(storiface.StorageInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageInfo(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStub) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
|
||||||
|
return *new(storiface.StorageInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStruct) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||||
|
if s.Internal.StorageList == nil {
|
||||||
|
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageList(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStub) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||||
|
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStruct) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||||
|
if s.Internal.StorageLocal == nil {
|
||||||
|
return *new(map[storiface.ID]string), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageLocal(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStub) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||||
|
return *new(map[storiface.ID]string), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStruct) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
|
||||||
|
if s.Internal.StorageStat == nil {
|
||||||
|
return *new(fsutil.FsStat), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageStat(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LotusProviderStub) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
|
||||||
|
return *new(fsutil.FsStat), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *LotusProviderStruct) Version(p0 context.Context) (Version, error) {
|
func (s *LotusProviderStruct) Version(p0 context.Context) (Version, error) {
|
||||||
if s.Internal.Version == nil {
|
if s.Internal.Version == nil {
|
||||||
return *new(Version), ErrNotSupported
|
return *new(Version), ErrNotSupported
|
||||||
|
113
blockstore/cached.go
Normal file
113
blockstore/cached.go
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlockstoreCache is a cache for blocks, compatible with lru.Cache; Must be safe for concurrent access
|
||||||
|
type BlockstoreCache interface {
|
||||||
|
Remove(mhString MhString) bool
|
||||||
|
Contains(mhString MhString) bool
|
||||||
|
Get(mhString MhString) (blocks.Block, bool)
|
||||||
|
Add(mhString MhString, block blocks.Block) (evicted bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReadCachedBlockstore struct {
|
||||||
|
top Blockstore
|
||||||
|
cache BlockstoreCache
|
||||||
|
}
|
||||||
|
|
||||||
|
type MhString string
|
||||||
|
|
||||||
|
func NewReadCachedBlockstore(top Blockstore, cache BlockstoreCache) *ReadCachedBlockstore {
|
||||||
|
return &ReadCachedBlockstore{
|
||||||
|
top: top,
|
||||||
|
cache: cache,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
|
||||||
|
c.cache.Remove(MhString(cid.Hash()))
|
||||||
|
return c.top.DeleteBlock(ctx, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||||
|
if c.cache.Contains(MhString(cid.Hash())) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.top.Has(ctx, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||||
|
if out, ok := c.cache.Get(MhString(cid.Hash())); ok {
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := c.top.Get(ctx, cid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.cache.Add(MhString(cid.Hash()), out)
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||||
|
if b, ok := c.cache.Get(MhString(cid.Hash())); ok {
|
||||||
|
return len(b.RawData()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.top.GetSize(ctx, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) Put(ctx context.Context, block blocks.Block) error {
|
||||||
|
c.cache.Add(MhString(block.Cid().Hash()), block)
|
||||||
|
return c.top.Put(ctx, block)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
||||||
|
for _, b := range blocks {
|
||||||
|
c.cache.Add(MhString(b.Cid().Hash()), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.top.PutMany(ctx, blocks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
return c.top.AllKeysChan(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) HashOnRead(enabled bool) {
|
||||||
|
c.top.HashOnRead(enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
|
||||||
|
return c.top.View(ctx, cid, func(bb []byte) error {
|
||||||
|
blk, err := blocks.NewBlockWithCid(bb, cid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.cache.Add(MhString(cid.Hash()), blk)
|
||||||
|
|
||||||
|
return callback(bb)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||||
|
for _, ci := range cids {
|
||||||
|
c.cache.Remove(MhString(ci.Hash()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.top.DeleteMany(ctx, cids)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) Flush(ctx context.Context) error {
|
||||||
|
return c.top.Flush(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Blockstore = (*ReadCachedBlockstore)(nil)
|
File diff suppressed because it is too large
Load Diff
@ -242,7 +242,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4170"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4186"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -473,7 +473,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4181"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4197"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -505,7 +505,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4192"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4208"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -611,7 +611,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4203"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4219"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -704,7 +704,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4214"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4230"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -788,7 +788,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4225"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4241"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -888,7 +888,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4236"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4252"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -944,7 +944,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4247"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4263"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1017,7 +1017,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4258"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4274"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1090,7 +1090,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4269"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4285"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1137,7 +1137,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4280"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4296"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1169,7 +1169,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4291"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4307"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1206,7 +1206,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4313"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4329"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1253,7 +1253,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4324"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4340"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1293,7 +1293,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4335"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4351"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1340,7 +1340,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4346"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4362"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1369,7 +1369,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4357"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4373"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1506,7 +1506,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4368"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4384"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1535,7 +1535,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4379"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4395"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1589,7 +1589,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4390"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4406"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1680,7 +1680,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4401"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4417"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1708,7 +1708,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4412"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4428"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1798,7 +1798,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4423"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4439"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2054,7 +2054,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4434"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4450"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2299,7 +2299,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4445"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4461"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2355,7 +2355,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4456"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4472"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2402,7 +2402,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4467"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4483"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2500,7 +2500,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4478"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4494"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2566,7 +2566,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4489"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4505"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2632,7 +2632,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4500"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4516"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2741,7 +2741,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4511"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4527"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2799,7 +2799,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4522"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4538"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2921,7 +2921,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4533"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4549"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3108,7 +3108,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4544"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4560"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3312,7 +3312,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4555"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4571"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3403,7 +3403,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4566"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4582"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3461,7 +3461,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4577"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4593"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3719,7 +3719,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4588"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4604"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3994,7 +3994,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4599"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4615"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4022,7 +4022,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4610"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4626"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4060,7 +4060,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4621"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4637"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4168,7 +4168,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4632"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4648"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4206,7 +4206,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4643"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4659"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4235,7 +4235,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4654"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4670"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4298,7 +4298,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4665"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4681"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4361,7 +4361,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4676"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4692"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4406,7 +4406,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4687"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4703"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4528,7 +4528,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4698"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4714"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4683,7 +4683,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4709"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4725"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4737,7 +4737,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4720"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4736"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4791,7 +4791,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4731"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4747"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4893,7 +4893,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4742"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4758"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5116,7 +5116,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4753"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4769"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5310,7 +5310,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4764"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4780"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5356,7 +5356,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4775"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4791"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5506,7 +5506,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4786"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4802"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5643,7 +5643,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4797"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4813"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5711,7 +5711,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4808"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4824"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5828,7 +5828,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4819"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4835"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5919,7 +5919,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4830"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4846"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -6005,7 +6005,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4841"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4857"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -6032,7 +6032,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4852"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4868"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -6059,7 +6059,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4863"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4879"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -6127,7 +6127,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4874"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4890"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -6633,7 +6633,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4885"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4901"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -6730,7 +6730,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4896"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4912"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -6830,7 +6830,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4907"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4923"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -6930,7 +6930,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4918"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4934"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -7055,7 +7055,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4929"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4945"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -7164,7 +7164,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4940"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4956"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -7267,7 +7267,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4951"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4967"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -7397,7 +7397,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4962"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4978"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -7504,7 +7504,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4973"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4989"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -7565,7 +7565,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4984"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5000"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -7633,7 +7633,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4995"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5011"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -7714,7 +7714,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5006"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5022"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -7878,7 +7878,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5017"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5033"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -8079,7 +8079,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5028"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5044"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -8190,7 +8190,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5039"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5055"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -8321,7 +8321,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5050"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5066"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -8407,7 +8407,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5061"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5077"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -8434,7 +8434,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5072"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5088"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -8487,7 +8487,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5083"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5099"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -8575,7 +8575,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5094"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5110"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -9026,7 +9026,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5105"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5121"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -9193,7 +9193,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5116"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5132"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -9366,7 +9366,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5127"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5143"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -9434,7 +9434,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5138"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5154"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -9502,7 +9502,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5149"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5165"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -9663,7 +9663,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5160"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5176"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -9708,7 +9708,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5171"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5187"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -9753,7 +9753,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5182"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5198"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -9780,7 +9780,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5193"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5209"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -161,7 +161,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7041"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7134"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -252,7 +252,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7052"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7145"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -420,7 +420,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7063"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7156"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -447,7 +447,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7074"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7167"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -597,7 +597,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7085"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7178"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -700,7 +700,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7096"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7189"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -803,7 +803,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7107"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7200"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -925,7 +925,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7118"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7211"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1135,7 +1135,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7129"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7222"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1306,7 +1306,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7140"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7233"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3350,7 +3350,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7151"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7244"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3470,7 +3470,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7162"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7255"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3531,7 +3531,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7173"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7266"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3569,7 +3569,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7184"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7277"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3729,7 +3729,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7195"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7288"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3913,7 +3913,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7206"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7299"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4054,7 +4054,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7217"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7310"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4107,7 +4107,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7228"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7321"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4250,7 +4250,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7239"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7332"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4474,7 +4474,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7250"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7343"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4601,7 +4601,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7261"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7354"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4768,7 +4768,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7272"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7365"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4895,7 +4895,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7283"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7376"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4933,7 +4933,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7294"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7387"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4972,7 +4972,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7305"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7398"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4995,7 +4995,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7316"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7409"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5034,7 +5034,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7327"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7420"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5057,7 +5057,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7338"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7431"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5096,7 +5096,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7349"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7442"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5130,7 +5130,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7360"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7453"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5184,7 +5184,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7371"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7464"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5223,7 +5223,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7382"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7475"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5262,7 +5262,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7393"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7486"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5297,7 +5297,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7404"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7497"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5477,7 +5477,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7415"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7508"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5506,7 +5506,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7426"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7519"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5529,7 +5529,7 @@
|
|||||||
"deprecated": false,
|
"deprecated": false,
|
||||||
"externalDocs": {
|
"externalDocs": {
|
||||||
"description": "Github remote link",
|
"description": "Github remote link",
|
||||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7437"
|
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7530"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@ -77,6 +77,10 @@ func (f FIL) MarshalText() (text []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f FIL) UnmarshalText(text []byte) error {
|
func (f FIL) UnmarshalText(text []byte) error {
|
||||||
|
if f.Int == nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal into nil BigInt (text:%s)", string(text))
|
||||||
|
}
|
||||||
|
|
||||||
p, err := ParseFIL(string(text))
|
p, err := ParseFIL(string(text))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
12
cli/state.go
12
cli/state.go
@ -1388,15 +1388,19 @@ func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, erro
|
|||||||
|
|
||||||
p, err := stmgr.GetParamType(ar, code, method) // todo use api for correct actor registry
|
p, err := stmgr.GetParamType(ar, code, method) // todo use api for correct actor registry
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return fmt.Sprintf("raw:%x; DECODE ERR: %s", params, err.Error()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := p.UnmarshalCBOR(bytes.NewReader(params)); err != nil {
|
if err := p.UnmarshalCBOR(bytes.NewReader(params)); err != nil {
|
||||||
return "", err
|
return fmt.Sprintf("raw:%x; DECODE cbor ERR: %s", params, err.Error()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := json.MarshalIndent(p, "", " ")
|
b, err := json.MarshalIndent(p, "", " ")
|
||||||
return string(b), err
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func JsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) {
|
func JsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) {
|
||||||
@ -1407,7 +1411,7 @@ func JsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error)
|
|||||||
re := reflect.New(methodMeta.Ret.Elem())
|
re := reflect.New(methodMeta.Ret.Elem())
|
||||||
p := re.Interface().(cbg.CBORUnmarshaler)
|
p := re.Interface().(cbg.CBORUnmarshaler)
|
||||||
if err := p.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
|
if err := p.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
|
||||||
return "", err
|
return fmt.Sprintf("raw:%x; DECODE ERR: %s", ret, err.Error()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := json.MarshalIndent(p, "", " ")
|
b, err := json.MarshalIndent(p, "", " ")
|
||||||
|
199
cmd/lotus-provider/cli.go
Normal file
199
cmd/lotus-provider/cli.go
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/BurntSushi/toml"
|
||||||
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-provider/rpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
const providerEnvVar = "PROVIDER_API_INFO"
|
||||||
|
|
||||||
|
var cliCmd = &cli.Command{
|
||||||
|
Name: "cli",
|
||||||
|
Usage: "Execute cli commands",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "machine",
|
||||||
|
Usage: "machine host:port (lotus-provider run --listen address)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Before: func(cctx *cli.Context) error {
|
||||||
|
if os.Getenv(providerEnvVar) != "" {
|
||||||
|
// set already
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := deps.MakeDB(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
machine := cctx.String("machine")
|
||||||
|
if machine == "" {
|
||||||
|
// interactive picker
|
||||||
|
var machines []struct {
|
||||||
|
HostAndPort string `db:"host_and_port"`
|
||||||
|
LastContact time.Time `db:"last_contact"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err := db.Select(ctx, &machines, "select host_and_port, last_contact from harmony_machines")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting machine list: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
fmt.Println("Available machines:")
|
||||||
|
for i, m := range machines {
|
||||||
|
// A machine is healthy if contacted not longer than 2 minutes ago
|
||||||
|
healthStatus := "unhealthy"
|
||||||
|
if now.Sub(m.LastContact) <= 2*time.Minute {
|
||||||
|
healthStatus = "healthy"
|
||||||
|
}
|
||||||
|
fmt.Printf("%d. %s %s\n", i+1, m.HostAndPort, healthStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print("Select: ")
|
||||||
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
input, err := reader.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("reading selection: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var selection int
|
||||||
|
_, err = fmt.Sscanf(input, "%d", &selection)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parsing selection: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if selection < 1 || selection > len(machines) {
|
||||||
|
return xerrors.New("invalid selection")
|
||||||
|
}
|
||||||
|
|
||||||
|
machine = machines[selection-1].HostAndPort
|
||||||
|
}
|
||||||
|
|
||||||
|
var apiKeys []string
|
||||||
|
{
|
||||||
|
var dbconfigs []struct {
|
||||||
|
Config string `db:"config"`
|
||||||
|
Title string `db:"title"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err := db.Select(ctx, &dbconfigs, "select config from harmony_config")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting configs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var seen = make(map[string]struct{})
|
||||||
|
|
||||||
|
for _, config := range dbconfigs {
|
||||||
|
var layer struct {
|
||||||
|
Apis struct {
|
||||||
|
StorageRPCSecret string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := toml.Decode(config.Config, &layer); err != nil {
|
||||||
|
return xerrors.Errorf("decode config layer %s: %w", config.Title, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if layer.Apis.StorageRPCSecret != "" {
|
||||||
|
if _, ok := seen[layer.Apis.StorageRPCSecret]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[layer.Apis.StorageRPCSecret] = struct{}{}
|
||||||
|
apiKeys = append(apiKeys, layer.Apis.StorageRPCSecret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(apiKeys) == 0 {
|
||||||
|
return xerrors.New("no api keys found in the database")
|
||||||
|
}
|
||||||
|
if len(apiKeys) > 1 {
|
||||||
|
return xerrors.Errorf("multiple api keys found in the database, not supported yet")
|
||||||
|
}
|
||||||
|
|
||||||
|
var apiToken []byte
|
||||||
|
{
|
||||||
|
type jwtPayload struct {
|
||||||
|
Allow []auth.Permission
|
||||||
|
}
|
||||||
|
|
||||||
|
p := jwtPayload{
|
||||||
|
Allow: api.AllPermissions,
|
||||||
|
}
|
||||||
|
|
||||||
|
sk, err := base64.StdEncoding.DecodeString(apiKeys[0])
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("decode secret: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
apiToken, err = jwt.Sign(&p, jwt.NewHS256(sk))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("signing token: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
laddr, err := net.ResolveTCPAddr("tcp", machine)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("net resolve: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(laddr.IP) == 0 {
|
||||||
|
// set localhost
|
||||||
|
laddr.IP = net.IPv4(127, 0, 0, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ma, err := manet.FromNetAddr(laddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("net from addr (%v): %w", laddr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
token := fmt.Sprintf("%s:%s", string(apiToken), ma)
|
||||||
|
if err := os.Setenv(providerEnvVar, token); err != nil {
|
||||||
|
return xerrors.Errorf("setting env var: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
api, closer, err := rpc.GetProviderAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
v, err := api.Version(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("querying version: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("remote node version:", v.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
storageCmd,
|
||||||
|
},
|
||||||
|
}
|
@ -6,9 +6,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/BurntSushi/toml"
|
||||||
|
"github.com/fatih/color"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -27,7 +30,9 @@ var configCmd = &cli.Command{
|
|||||||
configListCmd,
|
configListCmd,
|
||||||
configViewCmd,
|
configViewCmd,
|
||||||
configRmCmd,
|
configRmCmd,
|
||||||
|
configEditCmd,
|
||||||
configMigrateCmd,
|
configMigrateCmd,
|
||||||
|
configNewCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,3 +243,209 @@ var configViewCmd = &cli.Command{
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var configEditCmd = &cli.Command{
|
||||||
|
Name: "edit",
|
||||||
|
Usage: "edit a config layer",
|
||||||
|
ArgsUsage: "[layer name]",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "editor",
|
||||||
|
Usage: "editor to use",
|
||||||
|
Value: "vim",
|
||||||
|
EnvVars: []string{"EDITOR"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "source",
|
||||||
|
Usage: "source config layer",
|
||||||
|
DefaultText: "<edited layer>",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "allow-owerwrite",
|
||||||
|
Usage: "allow overwrite of existing layer if source is a different layer",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "no-source-diff",
|
||||||
|
Usage: "save the whole config into the layer, not just the diff",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "no-interpret-source",
|
||||||
|
Usage: "do not interpret source layer",
|
||||||
|
DefaultText: "true if --source is set",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
layer := cctx.Args().First()
|
||||||
|
if layer == "" {
|
||||||
|
return errors.New("layer name is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
source := layer
|
||||||
|
if cctx.IsSet("source") {
|
||||||
|
source = cctx.String("source")
|
||||||
|
|
||||||
|
if source == layer && !cctx.Bool("allow-owerwrite") {
|
||||||
|
return errors.New("source and target layers are the same")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := deps.MakeDB(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sourceConfig, err := getConfig(db, source)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting source config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.IsSet("source") && source != layer && !cctx.Bool("no-interpret-source") {
|
||||||
|
lp := config.DefaultLotusProvider()
|
||||||
|
if _, err := toml.Decode(sourceConfig, lp); err != nil {
|
||||||
|
return xerrors.Errorf("parsing source config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cb, err := config.ConfigUpdate(lp, config.DefaultLotusProvider(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("interpreting source config: %w", err)
|
||||||
|
}
|
||||||
|
sourceConfig = string(cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
editor := cctx.String("editor")
|
||||||
|
newConfig, err := edit(editor, sourceConfig)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("editing config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
toWrite := newConfig
|
||||||
|
|
||||||
|
if cctx.IsSet("source") && !cctx.Bool("no-source-diff") {
|
||||||
|
updated, err := diff(sourceConfig, newConfig)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("computing diff: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
fmt.Printf("%s will write changes as the layer because %s is not set\n", color.YellowString(">"), color.GreenString("--no-source-diff"))
|
||||||
|
fmt.Println(updated)
|
||||||
|
fmt.Printf("%s Confirm [y]: ", color.YellowString(">"))
|
||||||
|
|
||||||
|
for {
|
||||||
|
var confirmBuf [16]byte
|
||||||
|
n, err := os.Stdin.Read(confirmBuf[:])
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("reading confirmation: %w", err)
|
||||||
|
}
|
||||||
|
confirm := strings.TrimSpace(string(confirmBuf[:n]))
|
||||||
|
|
||||||
|
if confirm == "" {
|
||||||
|
confirm = "y"
|
||||||
|
}
|
||||||
|
|
||||||
|
if confirm[:1] == "y" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if confirm[:1] == "n" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s Confirm [y]:\n", color.YellowString(">"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
toWrite = updated
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s Writing config for layer %s\n", color.YellowString(">"), color.GreenString(layer))
|
||||||
|
|
||||||
|
return setConfig(db, layer, toWrite)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func diff(sourceConf, newConf string) (string, error) {
|
||||||
|
lpSrc := config.DefaultLotusProvider()
|
||||||
|
lpNew := config.DefaultLotusProvider()
|
||||||
|
|
||||||
|
_, err := toml.Decode(sourceConf, lpSrc)
|
||||||
|
if err != nil {
|
||||||
|
return "", xerrors.Errorf("decoding source config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = toml.Decode(newConf, lpNew)
|
||||||
|
if err != nil {
|
||||||
|
return "", xerrors.Errorf("decoding new config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cb, err := config.ConfigUpdate(lpNew, lpSrc, config.Commented(true), config.NoEnv())
|
||||||
|
if err != nil {
|
||||||
|
return "", xerrors.Errorf("interpreting source config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := strings.Split(string(cb), "\n")
|
||||||
|
var outLines []string
|
||||||
|
var categoryBuf string
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
// drop empty lines
|
||||||
|
if strings.TrimSpace(line) == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// drop lines starting with '#'
|
||||||
|
if strings.HasPrefix(strings.TrimSpace(line), "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// if starting with [, it's a category
|
||||||
|
if strings.HasPrefix(strings.TrimSpace(line), "[") {
|
||||||
|
categoryBuf = line
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if categoryBuf != "" {
|
||||||
|
outLines = append(outLines, categoryBuf)
|
||||||
|
categoryBuf = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
outLines = append(outLines, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(outLines, "\n"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func edit(editor, cfg string) (string, error) {
|
||||||
|
file, err := os.CreateTemp("", "lotus-provider-config-*.toml")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = file.WriteString(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := file.Name()
|
||||||
|
|
||||||
|
if err := file.Close(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
_ = os.Remove(filePath)
|
||||||
|
}()
|
||||||
|
|
||||||
|
cmd := exec.Command(editor, filePath)
|
||||||
|
cmd.Stdin = os.Stdin
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
err = cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(data), err
|
||||||
|
}
|
||||||
|
@ -37,7 +37,7 @@ var configMigrateCmd = &cli.Command{
|
|||||||
Aliases: []string{FlagMinerRepoDeprecation},
|
Aliases: []string{FlagMinerRepoDeprecation},
|
||||||
EnvVars: []string{"LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH"},
|
EnvVars: []string{"LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH"},
|
||||||
Value: "~/.lotusminer",
|
Value: "~/.lotusminer",
|
||||||
Usage: fmt.Sprintf("Specify miner repo path. flag(%s) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON", FlagMinerRepoDeprecation),
|
Usage: "Miner repo path",
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "repo",
|
Name: "repo",
|
||||||
@ -124,8 +124,8 @@ func fromMiner(cctx *cli.Context) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not read config.toml: %w", err)
|
return fmt.Errorf("could not read config.toml: %w", err)
|
||||||
}
|
}
|
||||||
var lpCfg config.LotusProviderConfig
|
lpCfg := config.DefaultLotusProvider()
|
||||||
_, err = deps.LoadConfigWithUpgrades(string(buf), &lpCfg)
|
_, err = deps.LoadConfigWithUpgrades(string(buf), lpCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not decode toml: %w", err)
|
return fmt.Errorf("could not decode toml: %w", err)
|
||||||
}
|
}
|
||||||
@ -177,12 +177,16 @@ func fromMiner(cctx *cli.Context) (err error) {
|
|||||||
}
|
}
|
||||||
lpCfg.Apis.ChainApiInfo = []string{header.Get("Authorization")[7:] + ":" + ainfo.Addr}
|
lpCfg.Apis.ChainApiInfo = []string{header.Get("Authorization")[7:] + ":" + ainfo.Addr}
|
||||||
|
|
||||||
// Enable WindowPoSt
|
// WindowPoSt message
|
||||||
lpCfg.Subsystems.EnableWindowPost = true
|
msg += "\n!! Before running lotus-provider with Window PoSt enabled, ensure any miner/worker answering of WindowPost is disabled by " +
|
||||||
msg += "\nBefore running lotus-provider, ensure any miner/worker answering of WindowPost is disabled by " +
|
|
||||||
"(on Miner) " + configColor("DisableBuiltinWindowPoSt=true") + " and (on Workers) not enabling windowpost on CLI or via " +
|
"(on Miner) " + configColor("DisableBuiltinWindowPoSt=true") + " and (on Workers) not enabling windowpost on CLI or via " +
|
||||||
"environment variable " + configColor("LOTUS_WORKER_WINDOWPOST") + "."
|
"environment variable " + configColor("LOTUS_WORKER_WINDOWPOST") + "."
|
||||||
|
|
||||||
|
// WinningPoSt message
|
||||||
|
msg += "\n!! Before running lotus-provider with Winning PoSt enabled, ensure any miner/worker answering of WinningPost is disabled by " +
|
||||||
|
"(on Miner) " + configColor("DisableBuiltinWinningPoSt=true") + " and (on Workers) not enabling winningpost on CLI or via " +
|
||||||
|
"environment variable " + configColor("LOTUS_WORKER_WINNINGPOST") + "."
|
||||||
|
|
||||||
// Express as configTOML
|
// Express as configTOML
|
||||||
configTOML := &bytes.Buffer{}
|
configTOML := &bytes.Buffer{}
|
||||||
if err = toml.NewEncoder(configTOML).Encode(lpCfg); err != nil {
|
if err = toml.NewEncoder(configTOML).Encode(lpCfg); err != nil {
|
159
cmd/lotus-provider/config_new.go
Normal file
159
cmd/lotus-provider/config_new.go
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/BurntSushi/toml"
|
||||||
|
"github.com/fatih/color"
|
||||||
|
"github.com/samber/lo"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
|
||||||
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
|
)
|
||||||
|
|
||||||
|
var configNewCmd = &cli.Command{
|
||||||
|
Name: "new-cluster",
|
||||||
|
Usage: "Create new coniguration for a new cluster",
|
||||||
|
ArgsUsage: "[SP actor address...]",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "repo",
|
||||||
|
EnvVars: []string{"LOTUS_PATH"},
|
||||||
|
Hidden: true,
|
||||||
|
Value: "~/.lotus",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
configColor := color.New(color.FgHiGreen).SprintFunc()
|
||||||
|
|
||||||
|
if cctx.Args().Len() < 1 {
|
||||||
|
return xerrors.New("must specify at least one SP actor address. Use 'lotus-shed miner create'")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := cctx.Context
|
||||||
|
|
||||||
|
db, err := deps.MakeDB(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
full, closer, err := cliutil.GetFullNodeAPIV1(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("connecting to full node: %w", err)
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
var titles []string
|
||||||
|
err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("miner cannot reach the db. Ensure the config toml's HarmonyDB entry"+
|
||||||
|
" is setup to reach Yugabyte correctly: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
name := cctx.String("to-layer")
|
||||||
|
if name == "" {
|
||||||
|
name = fmt.Sprintf("cluster%d", len(titles))
|
||||||
|
} else {
|
||||||
|
if lo.Contains(titles, name) && !cctx.Bool("overwrite") {
|
||||||
|
return xerrors.New("the overwrite flag is needed to replace existing layer: " + name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msg := "Layer " + configColor(name) + ` created. `
|
||||||
|
|
||||||
|
// setup config
|
||||||
|
lpCfg := config.DefaultLotusProvider()
|
||||||
|
|
||||||
|
for _, addr := range cctx.Args().Slice() {
|
||||||
|
maddr, err := address.NewFromString(addr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Invalid address: %s", addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = full.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Failed to get miner info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lpCfg.Addresses = append(lpCfg.Addresses, config.LotusProviderAddresses{
|
||||||
|
PreCommitControl: nil,
|
||||||
|
CommitControl: nil,
|
||||||
|
TerminateControl: nil,
|
||||||
|
DisableOwnerFallback: false,
|
||||||
|
DisableWorkerFallback: false,
|
||||||
|
MinerAddresses: []string{addr},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lpCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(sk)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
ainfo, err := cliutil.GetAPIInfo(cctx, repo.FullNode)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("could not get API info for FullNode: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := full.AuthNew(ctx, api.AllPermissions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lpCfg.Apis.ChainApiInfo = append(lpCfg.Apis.ChainApiInfo, fmt.Sprintf("%s:%s", string(token), ainfo.Addr))
|
||||||
|
}
|
||||||
|
|
||||||
|
// write config
|
||||||
|
|
||||||
|
configTOML := &bytes.Buffer{}
|
||||||
|
if err = toml.NewEncoder(configTOML).Encode(lpCfg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !lo.Contains(titles, "base") {
|
||||||
|
cfg, err := getDefaultConfig(true)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Cannot get default config: %w", err)
|
||||||
|
}
|
||||||
|
_, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ('base', $1)", cfg)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("overwrite") {
|
||||||
|
i, err := db.Exec(ctx, "DELETE FROM harmony_config WHERE title=$1", name)
|
||||||
|
if i != 0 {
|
||||||
|
fmt.Println("Overwriting existing layer")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Got error while deleting existing layer: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ($1, $2)", name, configTOML.String())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(msg)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
@ -101,6 +101,7 @@ type Deps struct {
|
|||||||
Stor *paths.Remote
|
Stor *paths.Remote
|
||||||
Si *paths.DBIndex
|
Si *paths.DBIndex
|
||||||
LocalStore *paths.Local
|
LocalStore *paths.Local
|
||||||
|
LocalPaths *paths.BasicLocalStorage
|
||||||
ListenAddr string
|
ListenAddr string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,7 +142,7 @@ func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context,
|
|||||||
// The config feeds into task runners & their helpers
|
// The config feeds into task runners & their helpers
|
||||||
deps.Cfg, err = GetConfig(cctx, deps.DB)
|
deps.Cfg, err = GetConfig(cctx, deps.DB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return xerrors.Errorf("populate config: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,7 +194,7 @@ func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context,
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
bls := &paths.BasicLocalStorage{
|
deps.LocalPaths = &paths.BasicLocalStorage{
|
||||||
PathToJSON: cctx.String("storage-json"),
|
PathToJSON: cctx.String("storage-json"),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,7 +213,7 @@ func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if deps.LocalStore == nil {
|
if deps.LocalStore == nil {
|
||||||
deps.LocalStore, err = paths.NewLocal(ctx, bls, deps.Si, []string{"http://" + deps.ListenAddr + "/remote"})
|
deps.LocalStore, err = paths.NewLocal(ctx, deps.LocalPaths, deps.Si, []string{"http://" + deps.ListenAddr + "/remote"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -234,7 +235,12 @@ Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`,
|
|||||||
// todo localWorker isn't the abstraction layer we want to use here, we probably want to go straight to ffiwrapper
|
// todo localWorker isn't the abstraction layer we want to use here, we probably want to go straight to ffiwrapper
|
||||||
// maybe with a lotus-provider specific abstraction. LocalWorker does persistent call tracking which we probably
|
// maybe with a lotus-provider specific abstraction. LocalWorker does persistent call tracking which we probably
|
||||||
// don't need (ehh.. maybe we do, the async callback system may actually work decently well with harmonytask)
|
// don't need (ehh.. maybe we do, the async callback system may actually work decently well with harmonytask)
|
||||||
deps.LW = sealer.NewLocalWorker(sealer.WorkerConfig{}, deps.Stor, deps.LocalStore, deps.Si, nil, wstates)
|
deps.LW = sealer.NewLocalWorker(sealer.WorkerConfig{
|
||||||
|
MaxParallelChallengeReads: deps.Cfg.Proving.ParallelCheckLimit,
|
||||||
|
}, deps.Stor, deps.LocalStore, deps.Si, nil, wstates)
|
||||||
|
}
|
||||||
|
if deps.Maddrs == nil {
|
||||||
|
deps.Maddrs = map[dtypes.MinerAddress]bool{}
|
||||||
}
|
}
|
||||||
if len(deps.Maddrs) == 0 {
|
if len(deps.Maddrs) == 0 {
|
||||||
for _, s := range deps.Cfg.Addresses {
|
for _, s := range deps.Cfg.Addresses {
|
||||||
@ -247,15 +253,19 @@ Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Println("last line of populate")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var oldAddresses = regexp.MustCompile("(?i)^[addresses]$")
|
var oldAddresses = regexp.MustCompile("(?i)^\\[addresses\\]$")
|
||||||
|
|
||||||
func LoadConfigWithUpgrades(text string, lp *config.LotusProviderConfig) (toml.MetaData, error) {
|
func LoadConfigWithUpgrades(text string, lp *config.LotusProviderConfig) (toml.MetaData, error) {
|
||||||
// allow migration from old config format that was limited to 1 wallet setup.
|
// allow migration from old config format that was limited to 1 wallet setup.
|
||||||
newText := oldAddresses.ReplaceAllString(text, "[[addresses]]")
|
newText := oldAddresses.ReplaceAllString(text, "[[addresses]]")
|
||||||
|
|
||||||
|
if text != newText {
|
||||||
|
log.Warnw("Upgraded config!", "old", text, "new", newText)
|
||||||
|
}
|
||||||
|
|
||||||
meta, err := toml.Decode(newText, &lp)
|
meta, err := toml.Decode(newText, &lp)
|
||||||
return meta, err
|
return meta, err
|
||||||
}
|
}
|
||||||
@ -292,3 +302,32 @@ func GetConfig(cctx *cli.Context, db *harmonydb.DB) (*config.LotusProviderConfig
|
|||||||
// validate the config. Because of layering, we must validate @ startup.
|
// validate the config. Because of layering, we must validate @ startup.
|
||||||
return lp, nil
|
return lp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetDepsCLI(ctx context.Context, cctx *cli.Context) (*Deps, error) {
|
||||||
|
db, err := MakeDB(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := GetConfig(cctx, db)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
full, fullCloser, err := cliutil.GetFullNodeAPIV1LotusProvider(cctx, cfg.Apis.ChainApiInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
fullCloser()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return &Deps{
|
||||||
|
Cfg: cfg,
|
||||||
|
DB: db,
|
||||||
|
Full: full,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
@ -42,17 +42,13 @@ func main() {
|
|||||||
|
|
||||||
local := []*cli.Command{
|
local := []*cli.Command{
|
||||||
//initCmd,
|
//initCmd,
|
||||||
|
cliCmd,
|
||||||
runCmd,
|
runCmd,
|
||||||
stopCmd,
|
stopCmd,
|
||||||
configCmd,
|
configCmd,
|
||||||
testCmd,
|
testCmd,
|
||||||
webCmd,
|
webCmd,
|
||||||
//backupCmd,
|
sealCmd,
|
||||||
//lcli.WithCategory("chain", actorCmd),
|
|
||||||
//lcli.WithCategory("storage", sectorsCmd),
|
|
||||||
//lcli.WithCategory("storage", provingCmd),
|
|
||||||
//lcli.WithCategory("storage", storageCmd),
|
|
||||||
//lcli.WithCategory("storage", sealingCmd),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
jaeger := tracing.SetupJaegerTracing("lotus")
|
jaeger := tracing.SetupJaegerTracing("lotus")
|
||||||
@ -128,10 +124,11 @@ func main() {
|
|||||||
Hidden: true,
|
Hidden: true,
|
||||||
Value: "5433",
|
Value: "5433",
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringSliceFlag{
|
||||||
Name: "layers",
|
Name: "layers",
|
||||||
EnvVars: []string{"LOTUS_LAYERS", "LOTUS_CONFIG_LAYERS"},
|
EnvVars: []string{"CURIO_LAYERS"},
|
||||||
Value: "base",
|
Usage: "list of layers to be interpreted (atop defaults). Default: base",
|
||||||
|
Value: cli.NewStringSlice("base"),
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: deps.FlagRepoPath,
|
Name: deps.FlagRepoPath,
|
||||||
|
131
cmd/lotus-provider/pipeline.go
Normal file
131
cmd/lotus-provider/pipeline.go
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpseal"
|
||||||
|
)
|
||||||
|
|
||||||
|
var sealCmd = &cli.Command{
|
||||||
|
Name: "seal",
|
||||||
|
Usage: "Manage the sealing pipeline",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
sealStartCmd,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var sealStartCmd = &cli.Command{
|
||||||
|
Name: "start",
|
||||||
|
Usage: "Start new sealing operations manually",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "actor",
|
||||||
|
Usage: "Specify actor address to start sealing sectors for",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "now",
|
||||||
|
Usage: "Start sealing sectors for all actors now (not on schedule)",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "cc",
|
||||||
|
Usage: "Start sealing new CC sectors",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "count",
|
||||||
|
Usage: "Number of sectors to start",
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "synthetic",
|
||||||
|
Usage: "Use synthetic PoRep",
|
||||||
|
Value: false, // todo implement synthetic
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if !cctx.Bool("now") {
|
||||||
|
return xerrors.Errorf("schedule not implemented, use --now")
|
||||||
|
}
|
||||||
|
if !cctx.IsSet("actor") {
|
||||||
|
return cli.ShowCommandHelp(cctx, "start")
|
||||||
|
}
|
||||||
|
if !cctx.Bool("cc") {
|
||||||
|
return xerrors.Errorf("only CC sectors supported for now")
|
||||||
|
}
|
||||||
|
|
||||||
|
act, err := address.NewFromString(cctx.String("actor"))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parsing --actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
dep, err := deps.GetDepsCLI(ctx, cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
create table sectors_sdr_pipeline (
|
||||||
|
sp_id bigint not null,
|
||||||
|
sector_number bigint not null,
|
||||||
|
|
||||||
|
-- at request time
|
||||||
|
create_time timestamp not null,
|
||||||
|
reg_seal_proof int not null,
|
||||||
|
comm_d_cid text not null,
|
||||||
|
|
||||||
|
[... other not relevant fields]
|
||||||
|
*/
|
||||||
|
|
||||||
|
mid, err := address.IDFromAddress(act)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting miner id: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mi, err := dep.Full.StateMinerInfo(ctx, act, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting miner info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nv, err := dep.Full.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting network version: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wpt := mi.WindowPoStProofType
|
||||||
|
spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, wpt, cctx.Bool("synthetic"))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting seal proof type: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
num, err := lpseal.AllocateSectorNumbers(ctx, dep.Full, dep.DB, act, cctx.Int("count"), func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) {
|
||||||
|
for _, n := range numbers {
|
||||||
|
_, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) values ($1, $2, $3)", mid, n, spt)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("allocating sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, number := range num {
|
||||||
|
fmt.Println(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
@ -54,18 +54,13 @@ var wdPostTaskCmd = &cli.Command{
|
|||||||
Usage: "deadline to compute WindowPoSt for ",
|
Usage: "deadline to compute WindowPoSt for ",
|
||||||
Value: 0,
|
Value: 0,
|
||||||
},
|
},
|
||||||
&cli.StringSliceFlag{
|
|
||||||
Name: "layers",
|
|
||||||
Usage: "list of layers to be interpreted (atop defaults). Default: base",
|
|
||||||
Value: cli.NewStringSlice("base"),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
deps, err := deps.GetDeps(ctx, cctx)
|
deps, err := deps.GetDeps(ctx, cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return xerrors.Errorf("get config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ts, err := deps.Full.ChainHead(ctx)
|
ts, err := deps.Full.ChainHead(ctx)
|
||||||
@ -83,42 +78,35 @@ var wdPostTaskCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("cannot get miner id %w", err)
|
return xerrors.Errorf("cannot get miner id %w", err)
|
||||||
}
|
}
|
||||||
var id int64
|
var taskId int64
|
||||||
|
|
||||||
retryDelay := time.Millisecond * 10
|
|
||||||
retryAddTask:
|
|
||||||
_, err = deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
_, err = deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||||
err = tx.QueryRow(`INSERT INTO harmony_task (name, posted_time, added_by) VALUES ('WdPost', CURRENT_TIMESTAMP, 123) RETURNING id`).Scan(&id)
|
err = tx.QueryRow(`INSERT INTO harmony_task (name, posted_time, added_by) VALUES ('WdPost', CURRENT_TIMESTAMP, 123) RETURNING id`).Scan(&taskId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("inserting harmony_task: ", err)
|
log.Error("inserting harmony_task: ", err)
|
||||||
return false, xerrors.Errorf("inserting harmony_task: %w", err)
|
return false, xerrors.Errorf("inserting harmony_task: %w", err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec(`INSERT INTO wdpost_partition_tasks
|
_, err = tx.Exec(`INSERT INTO wdpost_partition_tasks
|
||||||
(task_id, sp_id, proving_period_start, deadline_index, partition_index) VALUES ($1, $2, $3, $4, $5)`,
|
(task_id, sp_id, proving_period_start, deadline_index, partition_index) VALUES ($1, $2, $3, $4, $5)`,
|
||||||
id, maddr, ht, cctx.Uint64("deadline"), 0)
|
taskId, maddr, ht, cctx.Uint64("deadline"), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("inserting wdpost_partition_tasks: ", err)
|
log.Error("inserting wdpost_partition_tasks: ", err)
|
||||||
return false, xerrors.Errorf("inserting wdpost_partition_tasks: %w", err)
|
return false, xerrors.Errorf("inserting wdpost_partition_tasks: %w", err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec("INSERT INTO harmony_test (task_id) VALUES ($1)", id)
|
_, err = tx.Exec("INSERT INTO harmony_test (task_id) VALUES ($1)", taskId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("inserting into harmony_tests: %w", err)
|
return false, xerrors.Errorf("inserting into harmony_tests: %w", err)
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
}, harmonydb.OptionRetry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if harmonydb.IsErrSerialization(err) {
|
|
||||||
time.Sleep(retryDelay)
|
|
||||||
retryDelay *= 2
|
|
||||||
goto retryAddTask
|
|
||||||
}
|
|
||||||
return xerrors.Errorf("writing SQL transaction: %w", err)
|
return xerrors.Errorf("writing SQL transaction: %w", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("Inserted task %v. Waiting for success ", id)
|
fmt.Printf("Inserted task %v. Waiting for success ", taskId)
|
||||||
var result sql.NullString
|
var result sql.NullString
|
||||||
for {
|
for {
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
err = deps.DB.QueryRow(ctx, `SELECT result FROM harmony_test WHERE task_id=$1`, id).Scan(&result)
|
err = deps.DB.QueryRow(ctx, `SELECT result FROM harmony_test WHERE task_id=$1`, taskId).Scan(&result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("reading result from harmony_test: %w", err)
|
return xerrors.Errorf("reading result from harmony_test: %w", err)
|
||||||
}
|
}
|
||||||
@ -127,6 +115,7 @@ var wdPostTaskCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
fmt.Print(".")
|
fmt.Print(".")
|
||||||
}
|
}
|
||||||
|
fmt.Println()
|
||||||
log.Infof("Result: %s", result.String)
|
log.Infof("Result: %s", result.String)
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@ -172,7 +161,7 @@ It will not send any messages to the chain. Since it can compute any deadline, o
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, deps.Cfg.Fees, deps.Cfg.Proving, deps.Full, deps.Verif, deps.LW, nil,
|
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, deps.Cfg.Fees, deps.Cfg.Proving, deps.Full, deps.Verif, deps.LW, nil, nil,
|
||||||
deps.As, deps.Maddrs, deps.DB, deps.Stor, deps.Si, deps.Cfg.Subsystems.WindowPostMaxTasks)
|
deps.As, deps.Maddrs, deps.DB, deps.Stor, deps.Si, deps.Cfg.Subsystems.WindowPostMaxTasks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -7,29 +7,42 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-jsonrpc"
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/api/client"
|
||||||
|
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||||
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
|
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
|
||||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
"github.com/filecoin-project/lotus/metrics/proxy"
|
"github.com/filecoin-project/lotus/metrics/proxy"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpmarket"
|
||||||
"github.com/filecoin-project/lotus/provider/lpweb"
|
"github.com/filecoin-project/lotus/provider/lpweb"
|
||||||
"github.com/filecoin-project/lotus/storage/paths"
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = logging.Logger("lp/rpc")
|
var log = logging.Logger("lp/rpc")
|
||||||
|
|
||||||
|
var permissioned = os.Getenv("LOTUS_DISABLE_AUTH_PERMISSIONED") != "1"
|
||||||
|
|
||||||
func LotusProviderHandler(
|
func LotusProviderHandler(
|
||||||
authv func(ctx context.Context, token string) ([]auth.Permission, error),
|
authv func(ctx context.Context, token string) ([]auth.Permission, error),
|
||||||
remote http.HandlerFunc,
|
remote http.HandlerFunc,
|
||||||
@ -65,19 +78,111 @@ func LotusProviderHandler(
|
|||||||
|
|
||||||
type ProviderAPI struct {
|
type ProviderAPI struct {
|
||||||
*deps.Deps
|
*deps.Deps
|
||||||
|
paths.SectorIndex
|
||||||
ShutdownChan chan struct{}
|
ShutdownChan chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *ProviderAPI) StorageDetachLocal(ctx context.Context, path string) error {
|
||||||
|
path, err := homedir.Expand(path)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("expanding local path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that we have the path opened
|
||||||
|
lps, err := p.LocalStore.Local(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting local path list: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var localPath *storiface.StoragePath
|
||||||
|
for _, lp := range lps {
|
||||||
|
if lp.LocalPath == path {
|
||||||
|
lp := lp // copy to make the linter happy
|
||||||
|
localPath = &lp
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if localPath == nil {
|
||||||
|
return xerrors.Errorf("no local paths match '%s'", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// drop from the persisted storage.json
|
||||||
|
var found bool
|
||||||
|
if err := p.LocalPaths.SetStorage(func(sc *storiface.StorageConfig) {
|
||||||
|
out := make([]storiface.LocalPath, 0, len(sc.StoragePaths))
|
||||||
|
for _, storagePath := range sc.StoragePaths {
|
||||||
|
if storagePath.Path != path {
|
||||||
|
out = append(out, storagePath)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
sc.StoragePaths = out
|
||||||
|
}); err != nil {
|
||||||
|
return xerrors.Errorf("set storage config: %w", err)
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
// maybe this is fine?
|
||||||
|
return xerrors.Errorf("path not found in storage.json")
|
||||||
|
}
|
||||||
|
|
||||||
|
// unregister locally, drop from sector index
|
||||||
|
return p.LocalStore.ClosePath(ctx, localPath.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProviderAPI) StorageLocal(ctx context.Context) (map[storiface.ID]string, error) {
|
||||||
|
ps, err := p.LocalStore.Local(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var out = make(map[storiface.ID]string)
|
||||||
|
for _, path := range ps {
|
||||||
|
out[path.ID] = path.LocalPath
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProviderAPI) StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) {
|
||||||
|
return p.Stor.FsStat(ctx, id)
|
||||||
|
}
|
||||||
|
|
||||||
func (p *ProviderAPI) Version(context.Context) (api.Version, error) {
|
func (p *ProviderAPI) Version(context.Context) (api.Version, error) {
|
||||||
return api.ProviderAPIVersion0, nil
|
return api.ProviderAPIVersion0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *ProviderAPI) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
|
||||||
|
di := lpmarket.NewPieceIngester(p.Deps.DB, p.Deps.Full)
|
||||||
|
|
||||||
|
return di.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header)
|
||||||
|
}
|
||||||
|
|
||||||
// Trigger shutdown
|
// Trigger shutdown
|
||||||
func (p *ProviderAPI) Shutdown(context.Context) error {
|
func (p *ProviderAPI) Shutdown(context.Context) error {
|
||||||
close(p.ShutdownChan)
|
close(p.ShutdownChan)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *ProviderAPI) StorageAddLocal(ctx context.Context, path string) error {
|
||||||
|
path, err := homedir.Expand(path)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("expanding local path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.LocalStore.OpenPath(ctx, path); err != nil {
|
||||||
|
return xerrors.Errorf("opening local path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.LocalPaths.SetStorage(func(sc *storiface.StorageConfig) {
|
||||||
|
sc.StoragePaths = append(sc.StoragePaths, storiface.LocalPath{Path: path})
|
||||||
|
}); err != nil {
|
||||||
|
return xerrors.Errorf("get storage config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan chan struct{}) error {
|
func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan chan struct{}) error {
|
||||||
fh := &paths.FetchHandler{Local: dependencies.LocalStore, PfHandler: &paths.DefaultPartialFileHandler{}}
|
fh := &paths.FetchHandler{Local: dependencies.LocalStore, PfHandler: &paths.DefaultPartialFileHandler{}}
|
||||||
remoteHandler := func(w http.ResponseWriter, r *http.Request) {
|
remoteHandler := func(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -89,13 +194,6 @@ func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan c
|
|||||||
|
|
||||||
fh.ServeHTTP(w, r)
|
fh.ServeHTTP(w, r)
|
||||||
}
|
}
|
||||||
// local APIs
|
|
||||||
{
|
|
||||||
// debugging
|
|
||||||
mux := mux.NewRouter()
|
|
||||||
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
|
||||||
mux.PathPrefix("/remote").HandlerFunc(remoteHandler)
|
|
||||||
}
|
|
||||||
|
|
||||||
var authVerify func(context.Context, string) ([]auth.Permission, error)
|
var authVerify func(context.Context, string) ([]auth.Permission, error)
|
||||||
{
|
{
|
||||||
@ -117,8 +215,8 @@ func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan c
|
|||||||
Handler: LotusProviderHandler(
|
Handler: LotusProviderHandler(
|
||||||
authVerify,
|
authVerify,
|
||||||
remoteHandler,
|
remoteHandler,
|
||||||
&ProviderAPI{dependencies, shutdownChan},
|
&ProviderAPI{dependencies, dependencies.Si, shutdownChan},
|
||||||
true),
|
permissioned),
|
||||||
ReadHeaderTimeout: time.Minute * 3,
|
ReadHeaderTimeout: time.Minute * 3,
|
||||||
BaseContext: func(listener net.Listener) context.Context {
|
BaseContext: func(listener net.Listener) context.Context {
|
||||||
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker"))
|
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker"))
|
||||||
@ -153,3 +251,26 @@ func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan c
|
|||||||
}
|
}
|
||||||
return eg.Wait()
|
return eg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetProviderAPI(ctx *cli.Context) (api.LotusProvider, jsonrpc.ClientCloser, error) {
|
||||||
|
addr, headers, err := cliutil.GetRawAPI(ctx, repo.Provider, "v0")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch u.Scheme {
|
||||||
|
case "ws":
|
||||||
|
u.Scheme = "http"
|
||||||
|
case "wss":
|
||||||
|
u.Scheme = "https"
|
||||||
|
}
|
||||||
|
|
||||||
|
addr = u.String()
|
||||||
|
|
||||||
|
return client.NewProviderRpc(ctx.Context, addr, headers)
|
||||||
|
}
|
||||||
|
@ -50,11 +50,6 @@ var runCmd = &cli.Command{
|
|||||||
Usage: "manage open file limit",
|
Usage: "manage open file limit",
|
||||||
Value: true,
|
Value: true,
|
||||||
},
|
},
|
||||||
&cli.StringSliceFlag{
|
|
||||||
Name: "layers",
|
|
||||||
Usage: "list of layers to be interpreted (atop defaults). Default: base",
|
|
||||||
Value: cli.NewStringSlice("base"),
|
|
||||||
},
|
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "storage-json",
|
Name: "storage-json",
|
||||||
Usage: "path to json file containing storage config",
|
Usage: "path to json file containing storage config",
|
||||||
@ -83,6 +78,10 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(os.TempDir(), 0755); err != nil {
|
||||||
|
log.Errorf("ensuring tempdir exists: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
ctx, _ := tag.New(lcli.DaemonContext(cctx),
|
ctx, _ := tag.New(lcli.DaemonContext(cctx),
|
||||||
tag.Insert(metrics.Version, build.BuildVersion),
|
tag.Insert(metrics.Version, build.BuildVersion),
|
||||||
tag.Insert(metrics.Commit, build.CurrentCommit),
|
tag.Insert(metrics.Commit, build.CurrentCommit),
|
||||||
@ -117,13 +116,10 @@ var runCmd = &cli.Command{
|
|||||||
dependencies := &deps.Deps{}
|
dependencies := &deps.Deps{}
|
||||||
err = dependencies.PopulateRemainingDeps(ctx, cctx, true)
|
err = dependencies.PopulateRemainingDeps(ctx, cctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("err", err)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Println("ef")
|
|
||||||
|
|
||||||
taskEngine, err := tasks.StartTasks(ctx, dependencies)
|
taskEngine, err := tasks.StartTasks(ctx, dependencies)
|
||||||
fmt.Println("gh")
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
@ -153,11 +149,6 @@ var webCmd = &cli.Command{
|
|||||||
Usage: "Address to listen on",
|
Usage: "Address to listen on",
|
||||||
Value: "127.0.0.1:4701",
|
Value: "127.0.0.1:4701",
|
||||||
},
|
},
|
||||||
&cli.StringSliceFlag{
|
|
||||||
Name: "layers",
|
|
||||||
Usage: "list of layers to be interpreted (atop defaults). Default: base. Web will be added",
|
|
||||||
Value: cli.NewStringSlice("base"),
|
|
||||||
},
|
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "nosync",
|
Name: "nosync",
|
||||||
Usage: "don't check full-node sync status",
|
Usage: "don't check full-node sync status",
|
||||||
|
398
cmd/lotus-provider/storage.go
Normal file
398
cmd/lotus-provider/storage.go
Normal file
@ -0,0 +1,398 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math/bits"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/go-units"
|
||||||
|
"github.com/fatih/color"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-provider/rpc"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
const metaFile = "sectorstore.json"
|
||||||
|
|
||||||
|
var storageCmd = &cli.Command{
|
||||||
|
Name: "storage",
|
||||||
|
Usage: "manage sector storage",
|
||||||
|
Description: `Sectors can be stored across many filesystem paths. These
|
||||||
|
commands provide ways to manage the storage the miner will used to store sectors
|
||||||
|
long term for proving (references as 'store') as well as how sectors will be
|
||||||
|
stored while moving through the sealing pipeline (references as 'seal').`,
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
storageAttachCmd,
|
||||||
|
storageDetachCmd,
|
||||||
|
storageListCmd,
|
||||||
|
/*storageDetachCmd,
|
||||||
|
storageRedeclareCmd,
|
||||||
|
storageFindCmd,
|
||||||
|
storageCleanupCmd,
|
||||||
|
storageLocks,*/
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageAttachCmd = &cli.Command{
|
||||||
|
Name: "attach",
|
||||||
|
Usage: "attach local storage path",
|
||||||
|
ArgsUsage: "[path]",
|
||||||
|
Description: `Storage can be attached to the miner using this command. The storage volume
|
||||||
|
list is stored local to the miner in storage.json set in lotus-provider run. We do not
|
||||||
|
recommend manually modifying this value without further understanding of the
|
||||||
|
storage system.
|
||||||
|
|
||||||
|
Each storage volume contains a configuration file which describes the
|
||||||
|
capabilities of the volume. When the '--init' flag is provided, this file will
|
||||||
|
be created using the additional flags.
|
||||||
|
|
||||||
|
Weight
|
||||||
|
A high weight value means data will be more likely to be stored in this path
|
||||||
|
|
||||||
|
Seal
|
||||||
|
Data for the sealing process will be stored here
|
||||||
|
|
||||||
|
Store
|
||||||
|
Finalized sectors that will be moved here for long term storage and be proven
|
||||||
|
over time
|
||||||
|
`,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "init",
|
||||||
|
Usage: "initialize the path first",
|
||||||
|
},
|
||||||
|
&cli.Uint64Flag{
|
||||||
|
Name: "weight",
|
||||||
|
Usage: "(for init) path weight",
|
||||||
|
Value: 10,
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "seal",
|
||||||
|
Usage: "(for init) use path for sealing",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "store",
|
||||||
|
Usage: "(for init) use path for long-term storage",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "max-storage",
|
||||||
|
Usage: "(for init) limit storage space for sectors (expensive for very large paths!)",
|
||||||
|
},
|
||||||
|
&cli.StringSliceFlag{
|
||||||
|
Name: "groups",
|
||||||
|
Usage: "path group names",
|
||||||
|
},
|
||||||
|
&cli.StringSliceFlag{
|
||||||
|
Name: "allow-to",
|
||||||
|
Usage: "path groups allowed to pull data from this path (allow all if not specified)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
minerApi, closer, err := rpc.GetProviderAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
if cctx.NArg() != 1 {
|
||||||
|
return lcli.IncorrectNumArgs(cctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := homedir.Expand(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("expanding path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("init") {
|
||||||
|
if err := os.MkdirAll(p, 0755); err != nil {
|
||||||
|
if !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := os.Stat(filepath.Join(p, metaFile))
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
if err == nil {
|
||||||
|
return xerrors.Errorf("path is already initialized")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var maxStor int64
|
||||||
|
if cctx.IsSet("max-storage") {
|
||||||
|
maxStor, err = units.RAMInBytes(cctx.String("max-storage"))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parsing max-storage: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &storiface.LocalStorageMeta{
|
||||||
|
ID: storiface.ID(uuid.New().String()),
|
||||||
|
Weight: cctx.Uint64("weight"),
|
||||||
|
CanSeal: cctx.Bool("seal"),
|
||||||
|
CanStore: cctx.Bool("store"),
|
||||||
|
MaxStorage: uint64(maxStor),
|
||||||
|
Groups: cctx.StringSlice("groups"),
|
||||||
|
AllowTo: cctx.StringSlice("allow-to"),
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(cfg.CanStore || cfg.CanSeal) {
|
||||||
|
return xerrors.Errorf("must specify at least one of --store or --seal")
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.MarshalIndent(cfg, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("marshaling storage config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(filepath.Join(p, metaFile), b, 0644); err != nil {
|
||||||
|
return xerrors.Errorf("persisting storage metadata (%s): %w", filepath.Join(p, metaFile), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return minerApi.StorageAddLocal(ctx, p)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageDetachCmd = &cli.Command{
|
||||||
|
Name: "detach",
|
||||||
|
Usage: "detach local storage path",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "really-do-it",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ArgsUsage: "[path]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
minerApi, closer, err := rpc.GetProviderAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
if cctx.NArg() != 1 {
|
||||||
|
return lcli.IncorrectNumArgs(cctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := homedir.Expand(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("expanding path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cctx.Bool("really-do-it") {
|
||||||
|
return xerrors.Errorf("pass --really-do-it to execute the action")
|
||||||
|
}
|
||||||
|
|
||||||
|
return minerApi.StorageDetachLocal(ctx, p)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageListCmd = &cli.Command{
|
||||||
|
Name: "list",
|
||||||
|
Usage: "list local storage paths",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
//storageListSectorsCmd,
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
minerApi, closer, err := rpc.GetProviderAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
st, err := minerApi.StorageList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
local, err := minerApi.StorageLocal(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type fsInfo struct {
|
||||||
|
storiface.ID
|
||||||
|
sectors []storiface.Decl
|
||||||
|
stat fsutil.FsStat
|
||||||
|
}
|
||||||
|
|
||||||
|
sorted := make([]fsInfo, 0, len(st))
|
||||||
|
for id, decls := range st {
|
||||||
|
st, err := minerApi.StorageStat(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
sorted = append(sorted, fsInfo{ID: id, sectors: decls})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sorted = append(sorted, fsInfo{id, decls, st})
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(sorted, func(i, j int) bool {
|
||||||
|
if sorted[i].stat.Capacity != sorted[j].stat.Capacity {
|
||||||
|
return sorted[i].stat.Capacity > sorted[j].stat.Capacity
|
||||||
|
}
|
||||||
|
return sorted[i].ID < sorted[j].ID
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, s := range sorted {
|
||||||
|
|
||||||
|
var cnt [5]int
|
||||||
|
for _, decl := range s.sectors {
|
||||||
|
for i := range cnt {
|
||||||
|
if decl.SectorFileType&(1<<i) != 0 {
|
||||||
|
cnt[i]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s:\n", s.ID)
|
||||||
|
|
||||||
|
pingStart := time.Now()
|
||||||
|
st, err := minerApi.StorageStat(ctx, s.ID)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("\t%s: %s:\n", color.RedString("Error"), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ping := time.Now().Sub(pingStart)
|
||||||
|
|
||||||
|
safeRepeat := func(s string, count int) string {
|
||||||
|
if count < 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return strings.Repeat(s, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
var barCols = int64(50)
|
||||||
|
|
||||||
|
// filesystem use bar
|
||||||
|
{
|
||||||
|
usedPercent := (st.Capacity - st.FSAvailable) * 100 / st.Capacity
|
||||||
|
|
||||||
|
percCol := color.FgGreen
|
||||||
|
switch {
|
||||||
|
case usedPercent > 98:
|
||||||
|
percCol = color.FgRed
|
||||||
|
case usedPercent > 90:
|
||||||
|
percCol = color.FgYellow
|
||||||
|
}
|
||||||
|
|
||||||
|
set := (st.Capacity - st.FSAvailable) * barCols / st.Capacity
|
||||||
|
used := (st.Capacity - (st.FSAvailable + st.Reserved)) * barCols / st.Capacity
|
||||||
|
reserved := set - used
|
||||||
|
bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set))
|
||||||
|
|
||||||
|
desc := ""
|
||||||
|
if st.Max > 0 {
|
||||||
|
desc = " (filesystem)"
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\t[%s] %s/%s %s%s\n", color.New(percCol).Sprint(bar),
|
||||||
|
types.SizeStr(types.NewInt(uint64(st.Capacity-st.FSAvailable))),
|
||||||
|
types.SizeStr(types.NewInt(uint64(st.Capacity))),
|
||||||
|
color.New(percCol).Sprintf("%d%%", usedPercent), desc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional configured limit bar
|
||||||
|
if st.Max > 0 {
|
||||||
|
usedPercent := st.Used * 100 / st.Max
|
||||||
|
|
||||||
|
percCol := color.FgGreen
|
||||||
|
switch {
|
||||||
|
case usedPercent > 98:
|
||||||
|
percCol = color.FgRed
|
||||||
|
case usedPercent > 90:
|
||||||
|
percCol = color.FgYellow
|
||||||
|
}
|
||||||
|
|
||||||
|
set := st.Used * barCols / st.Max
|
||||||
|
used := (st.Used + st.Reserved) * barCols / st.Max
|
||||||
|
reserved := set - used
|
||||||
|
bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set))
|
||||||
|
|
||||||
|
fmt.Printf("\t[%s] %s/%s %s (limit)\n", color.New(percCol).Sprint(bar),
|
||||||
|
types.SizeStr(types.NewInt(uint64(st.Used))),
|
||||||
|
types.SizeStr(types.NewInt(uint64(st.Max))),
|
||||||
|
color.New(percCol).Sprintf("%d%%", usedPercent))
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\t%s; %s; %s; %s; %s; Reserved: %s\n",
|
||||||
|
color.YellowString("Unsealed: %d", cnt[0]),
|
||||||
|
color.GreenString("Sealed: %d", cnt[1]),
|
||||||
|
color.BlueString("Caches: %d", cnt[2]),
|
||||||
|
color.GreenString("Updated: %d", cnt[3]),
|
||||||
|
color.BlueString("Update-caches: %d", cnt[4]),
|
||||||
|
types.SizeStr(types.NewInt(uint64(st.Reserved))))
|
||||||
|
|
||||||
|
si, err := minerApi.StorageInfo(ctx, s.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print("\t")
|
||||||
|
if si.CanSeal || si.CanStore {
|
||||||
|
fmt.Printf("Weight: %d; Use: ", si.Weight)
|
||||||
|
if si.CanSeal {
|
||||||
|
fmt.Print(color.MagentaString("Seal "))
|
||||||
|
}
|
||||||
|
if si.CanStore {
|
||||||
|
fmt.Print(color.CyanString("Store"))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Print(color.HiYellowString("Use: ReadOnly"))
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
if len(si.Groups) > 0 {
|
||||||
|
fmt.Printf("\tGroups: %s\n", strings.Join(si.Groups, ", "))
|
||||||
|
}
|
||||||
|
if len(si.AllowTo) > 0 {
|
||||||
|
fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(si.AllowTypes) > 0 || len(si.DenyTypes) > 0 {
|
||||||
|
denied := storiface.FTAll.SubAllowed(si.AllowTypes, si.DenyTypes)
|
||||||
|
allowed := storiface.FTAll ^ denied
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case bits.OnesCount64(uint64(allowed)) == 0:
|
||||||
|
fmt.Printf("\tAllow Types: %s\n", color.RedString("None"))
|
||||||
|
case bits.OnesCount64(uint64(allowed)) < bits.OnesCount64(uint64(denied)):
|
||||||
|
fmt.Printf("\tAllow Types: %s\n", color.GreenString(strings.Join(allowed.Strings(), " ")))
|
||||||
|
default:
|
||||||
|
fmt.Printf("\tDeny Types: %s\n", color.RedString(strings.Join(denied.Strings(), " ")))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if localPath, ok := local[s.ID]; ok {
|
||||||
|
fmt.Printf("\tLocal: %s\n", color.GreenString(localPath))
|
||||||
|
}
|
||||||
|
for i, l := range si.URLs {
|
||||||
|
var rtt string
|
||||||
|
if _, ok := local[s.ID]; !ok && i == 0 {
|
||||||
|
rtt = " (latency: " + ping.Truncate(time.Microsecond*100).String() + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\tURL: %s%s\n", l, rtt) // TODO; try pinging maybe?? print latency?
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
@ -10,7 +10,10 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
|
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
|
||||||
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
"github.com/filecoin-project/lotus/provider"
|
"github.com/filecoin-project/lotus/provider"
|
||||||
|
"github.com/filecoin-project/lotus/provider/chainsched"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpffi"
|
||||||
"github.com/filecoin-project/lotus/provider/lpmessage"
|
"github.com/filecoin-project/lotus/provider/lpmessage"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpseal"
|
||||||
"github.com/filecoin-project/lotus/provider/lpwinning"
|
"github.com/filecoin-project/lotus/provider/lpwinning"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -25,20 +28,24 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task
|
|||||||
as := dependencies.As
|
as := dependencies.As
|
||||||
maddrs := dependencies.Maddrs
|
maddrs := dependencies.Maddrs
|
||||||
stor := dependencies.Stor
|
stor := dependencies.Stor
|
||||||
|
lstor := dependencies.LocalStore
|
||||||
si := dependencies.Si
|
si := dependencies.Si
|
||||||
var activeTasks []harmonytask.TaskInterface
|
var activeTasks []harmonytask.TaskInterface
|
||||||
|
|
||||||
sender, sendTask := lpmessage.NewSender(full, full, db)
|
sender, sendTask := lpmessage.NewSender(full, full, db)
|
||||||
activeTasks = append(activeTasks, sendTask)
|
activeTasks = append(activeTasks, sendTask)
|
||||||
|
|
||||||
|
chainSched := chainsched.New(full)
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
///// Task Selection
|
///// Task Selection
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
{
|
{
|
||||||
|
// PoSt
|
||||||
|
|
||||||
if cfg.Subsystems.EnableWindowPost {
|
if cfg.Subsystems.EnableWindowPost {
|
||||||
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender,
|
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender,
|
||||||
as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks)
|
chainSched, as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -50,9 +57,77 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task
|
|||||||
activeTasks = append(activeTasks, winPoStTask)
|
activeTasks = append(activeTasks, winPoStTask)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hasAnySealingTask := cfg.Subsystems.EnableSealSDR ||
|
||||||
|
cfg.Subsystems.EnableSealSDRTrees ||
|
||||||
|
cfg.Subsystems.EnableSendPrecommitMsg ||
|
||||||
|
cfg.Subsystems.EnablePoRepProof ||
|
||||||
|
cfg.Subsystems.EnableMoveStorage ||
|
||||||
|
cfg.Subsystems.EnableSendCommitMsg
|
||||||
|
{
|
||||||
|
// Sealing
|
||||||
|
|
||||||
|
var sp *lpseal.SealPoller
|
||||||
|
var slr *lpffi.SealCalls
|
||||||
|
if hasAnySealingTask {
|
||||||
|
sp = lpseal.NewPoller(db, full)
|
||||||
|
go sp.RunPoller(ctx)
|
||||||
|
|
||||||
|
slr = lpffi.NewSealCalls(stor, lstor, si)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: Tasks with the LEAST priority are at the top
|
||||||
|
if cfg.Subsystems.EnableSealSDR {
|
||||||
|
sdrTask := lpseal.NewSDRTask(full, db, sp, slr, cfg.Subsystems.SealSDRMaxTasks)
|
||||||
|
activeTasks = append(activeTasks, sdrTask)
|
||||||
|
}
|
||||||
|
if cfg.Subsystems.EnableSealSDRTrees {
|
||||||
|
treesTask := lpseal.NewTreesTask(sp, db, slr, cfg.Subsystems.SealSDRTreesMaxTasks)
|
||||||
|
finalizeTask := lpseal.NewFinalizeTask(cfg.Subsystems.FinalizeMaxTasks, sp, slr, db)
|
||||||
|
activeTasks = append(activeTasks, treesTask, finalizeTask)
|
||||||
|
}
|
||||||
|
if cfg.Subsystems.EnableSendPrecommitMsg {
|
||||||
|
precommitTask := lpseal.NewSubmitPrecommitTask(sp, db, full, sender, as, cfg.Fees.MaxPreCommitGasFee)
|
||||||
|
activeTasks = append(activeTasks, precommitTask)
|
||||||
|
}
|
||||||
|
if cfg.Subsystems.EnablePoRepProof {
|
||||||
|
porepTask := lpseal.NewPoRepTask(db, full, sp, slr, cfg.Subsystems.PoRepProofMaxTasks)
|
||||||
|
activeTasks = append(activeTasks, porepTask)
|
||||||
|
}
|
||||||
|
if cfg.Subsystems.EnableMoveStorage {
|
||||||
|
moveStorageTask := lpseal.NewMoveStorageTask(sp, slr, db, cfg.Subsystems.MoveStorageMaxTasks)
|
||||||
|
activeTasks = append(activeTasks, moveStorageTask)
|
||||||
|
}
|
||||||
|
if cfg.Subsystems.EnableSendCommitMsg {
|
||||||
|
commitTask := lpseal.NewSubmitCommitTask(sp, db, full, sender, as, cfg.Fees.MaxCommitGasFee)
|
||||||
|
activeTasks = append(activeTasks, commitTask)
|
||||||
|
}
|
||||||
|
}
|
||||||
log.Infow("This lotus_provider instance handles",
|
log.Infow("This lotus_provider instance handles",
|
||||||
"miner_addresses", maddrs,
|
"miner_addresses", maddrs,
|
||||||
"tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name }))
|
"tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name }))
|
||||||
|
|
||||||
return harmonytask.New(db, activeTasks, dependencies.ListenAddr)
|
// harmony treats the first task as highest priority, so reverse the order
|
||||||
|
// (we could have just appended to this list in the reverse order, but defining
|
||||||
|
// tasks in pipeline order is more intuitive)
|
||||||
|
activeTasks = lo.Reverse(activeTasks)
|
||||||
|
|
||||||
|
ht, err := harmonytask.New(db, activeTasks, dependencies.ListenAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasAnySealingTask {
|
||||||
|
watcher, err := lpmessage.NewMessageWatcher(db, ht, chainSched, full)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_ = watcher
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Subsystems.EnableWindowPost || hasAnySealingTask {
|
||||||
|
go chainSched.Run(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ht, nil
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
ma "github.com/multiformats/go-multiaddr"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -39,6 +41,176 @@ var actorCmd = &cli.Command{
|
|||||||
actorGetMethodNum,
|
actorGetMethodNum,
|
||||||
actorProposeChangeBeneficiary,
|
actorProposeChangeBeneficiary,
|
||||||
actorConfirmChangeBeneficiary,
|
actorConfirmChangeBeneficiary,
|
||||||
|
actorSetAddrsCmd,
|
||||||
|
actorSetPeeridCmd,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var actorSetAddrsCmd = &cli.Command{
|
||||||
|
Name: "set-p2p-addrs",
|
||||||
|
Usage: "set addresses that your miner can be publicly dialed on",
|
||||||
|
ArgsUsage: "<multiaddrs>",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "actor",
|
||||||
|
Usage: "specify the address of miner actor",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "from",
|
||||||
|
Usage: "optionally specify the account to send the message from",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "unset",
|
||||||
|
Usage: "unset address",
|
||||||
|
Value: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
args := cctx.Args().Slice()
|
||||||
|
unset := cctx.Bool("unset")
|
||||||
|
if len(args) == 0 && !unset {
|
||||||
|
return cli.ShowSubcommandHelp(cctx)
|
||||||
|
}
|
||||||
|
if len(args) > 0 && unset {
|
||||||
|
return fmt.Errorf("unset can only be used with no arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
var maddr address.Address
|
||||||
|
maddr, err := address.NewFromString(cctx.String("actor"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing address %s: %w", cctx.String("actor"), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer acloser()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
var addrs []abi.Multiaddrs
|
||||||
|
for _, a := range args {
|
||||||
|
maddr, err := ma.NewMultiaddr(a)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse %q as a multiaddr: %w", a, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
maddrNop2p, strip := ma.SplitFunc(maddr, func(c ma.Component) bool {
|
||||||
|
return c.Protocol().Code == ma.P_P2P
|
||||||
|
})
|
||||||
|
|
||||||
|
if strip != nil {
|
||||||
|
fmt.Println("Stripping peerid ", strip, " from ", maddr)
|
||||||
|
}
|
||||||
|
addrs = append(addrs, maddrNop2p.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fromAddr := minfo.Worker
|
||||||
|
if from := cctx.String("from"); from != "" {
|
||||||
|
addr, err := address.NewFromString(from)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fromAddr = addr
|
||||||
|
}
|
||||||
|
|
||||||
|
fromId, err := api.StateLookupID(ctx, fromAddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isController(minfo, fromId) {
|
||||||
|
return xerrors.Errorf("sender isn't a controller of miner: %s", fromId)
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
|
||||||
|
To: maddr,
|
||||||
|
From: fromId,
|
||||||
|
Value: types.NewInt(0),
|
||||||
|
Method: builtin.MethodsMiner.ChangeMultiaddrs,
|
||||||
|
Params: params,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Requested multiaddrs change in message %s\n", smsg.Cid())
|
||||||
|
return nil
|
||||||
|
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var actorSetPeeridCmd = &cli.Command{
|
||||||
|
Name: "set-peer-id",
|
||||||
|
Usage: "set the peer id of your miner",
|
||||||
|
ArgsUsage: "<peer id>",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "actor",
|
||||||
|
Usage: "specify the address of miner actor",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer acloser()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
maddr, err := address.NewFromString(cctx.String("actor"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing address %s: %w", cctx.String("actor"), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.NArg() != 1 {
|
||||||
|
return lcli.IncorrectNumArgs(cctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
pid, err := peer.Decode(cctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse input as a peerId: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := actors.SerializeParams(&miner.ChangePeerIDParams{NewID: abi.PeerID(pid)})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
|
||||||
|
To: maddr,
|
||||||
|
From: minfo.Worker,
|
||||||
|
Value: types.NewInt(0),
|
||||||
|
Method: builtin.MethodsMiner.ChangePeerID,
|
||||||
|
Params: params,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Requested peerid change in message %s\n", smsg.Cid())
|
||||||
|
return nil
|
||||||
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1087,3 +1259,17 @@ var actorConfirmChangeBeneficiary = &cli.Command{
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isController(mi api.MinerInfo, addr address.Address) bool {
|
||||||
|
if addr == mi.Owner || addr == mi.Worker {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ca := range mi.ControlAddresses {
|
||||||
|
if addr == ca {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
574
cmd/lotus-shed/lpdeal.go
Normal file
574
cmd/lotus-shed/lpdeal.go
Normal file
@ -0,0 +1,574 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||||
|
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||||
|
commp "github.com/filecoin-project/go-fil-commp-hashhash"
|
||||||
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
|
||||||
|
"github.com/filecoin-project/lotus/lib/must"
|
||||||
|
"github.com/filecoin-project/lotus/lib/nullreader"
|
||||||
|
"github.com/filecoin-project/lotus/metrics/proxy"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpmarket"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpmarket/fakelm"
|
||||||
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
var lpUtilCmd = &cli.Command{
|
||||||
|
Name: "provider-util",
|
||||||
|
Usage: "lotus provider utility commands",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
lpUtilStartDealCmd,
|
||||||
|
lpBoostProxyCmd,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var lpUtilStartDealCmd = &cli.Command{
|
||||||
|
Name: "start-deal",
|
||||||
|
Usage: "start a deal with a specific lotus-provider instance",
|
||||||
|
ArgsUsage: "[dataFile] [miner]",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "provider-rpc",
|
||||||
|
Value: "http://127.0.0.1:12300",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if cctx.Args().Len() != 2 {
|
||||||
|
return xerrors.Errorf("expected 2 arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
maddr, err := address.NewFromString(cctx.Args().Get(1))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parse miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
full, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
defAddr, err := full.WalletDefaultAddress(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("get default address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// open rpc
|
||||||
|
var rpc api.LotusProviderStruct
|
||||||
|
closer2, err := jsonrpc.NewMergeClient(ctx, cctx.String("provider-rpc"), "Filecoin", []interface{}{&rpc.Internal}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("open rpc: %w", err)
|
||||||
|
}
|
||||||
|
defer closer2()
|
||||||
|
|
||||||
|
v, err := rpc.Version(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("rpc version: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("* provider version: %s\n", v.String())
|
||||||
|
|
||||||
|
// open data file
|
||||||
|
data, err := homedir.Expand(cctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("get data file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
df, err := os.Open(data)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("open data file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dstat, err := df.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("stat data file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// compute commd
|
||||||
|
color.Green("> computing piece CID\n")
|
||||||
|
|
||||||
|
writer := new(commp.Calc)
|
||||||
|
_, err = io.Copy(writer, df)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("compute commd copy: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
commp, pps, err := writer.Digest()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("compute commd: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pieceCid, err := commcid.PieceCommitmentV1ToCID(commp)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("make pieceCid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("* piece CID: %s\n", pieceCid)
|
||||||
|
fmt.Printf("* piece size: %d\n", pps)
|
||||||
|
|
||||||
|
// start serving the file
|
||||||
|
color.Green("> starting temp http server\n")
|
||||||
|
|
||||||
|
deleteCalled := make(chan struct{})
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.HandleFunc("/"+pieceCid.String(), func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// log request and method
|
||||||
|
color.Blue("< %s %s\n", r.Method, r.URL)
|
||||||
|
|
||||||
|
if r.Method == http.MethodDelete {
|
||||||
|
close(deleteCalled)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
http.ServeFile(w, r, data)
|
||||||
|
})
|
||||||
|
|
||||||
|
ts := httptest.NewServer(mux)
|
||||||
|
|
||||||
|
dataUrl, err := url.Parse(ts.URL)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parse data url: %w", err)
|
||||||
|
}
|
||||||
|
dataUrl.Path = "/" + pieceCid.String()
|
||||||
|
|
||||||
|
fmt.Printf("* data url: %s\n", dataUrl)
|
||||||
|
|
||||||
|
// publish the deal
|
||||||
|
color.Green("> publishing deal\n")
|
||||||
|
|
||||||
|
head, err := full.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("get chain head: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verif := false
|
||||||
|
|
||||||
|
bds, err := full.StateDealProviderCollateralBounds(ctx, abi.PaddedPieceSize(pps), verif, head.Key())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("get provider collateral bounds: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pcoll := big.Mul(bds.Min, big.NewInt(2))
|
||||||
|
|
||||||
|
dealProposal := market.DealProposal{
|
||||||
|
PieceCID: pieceCid,
|
||||||
|
PieceSize: abi.PaddedPieceSize(pps),
|
||||||
|
VerifiedDeal: verif,
|
||||||
|
Client: defAddr,
|
||||||
|
Provider: maddr,
|
||||||
|
Label: must.One(market.NewLabelFromString("lotus-shed-made-this")),
|
||||||
|
StartEpoch: head.Height() + 2000,
|
||||||
|
EndEpoch: head.Height() + 2880*300,
|
||||||
|
StoragePricePerEpoch: big.Zero(),
|
||||||
|
ProviderCollateral: pcoll,
|
||||||
|
ClientCollateral: big.Zero(),
|
||||||
|
}
|
||||||
|
|
||||||
|
pbuf, err := cborutil.Dump(&dealProposal)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("dump deal proposal: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sig, err := full.WalletSign(ctx, defAddr, pbuf)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("sign deal proposal: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := market.PublishStorageDealsParams{
|
||||||
|
Deals: []market.ClientDealProposal{
|
||||||
|
{
|
||||||
|
Proposal: dealProposal,
|
||||||
|
ClientSignature: *sig,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err = params.MarshalCBOR(&buf)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("marshal params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
To: builtin.StorageMarketActorAddr,
|
||||||
|
From: defAddr,
|
||||||
|
Method: builtin.MethodsMarket.PublishStorageDeals,
|
||||||
|
Params: buf.Bytes(),
|
||||||
|
}
|
||||||
|
|
||||||
|
smsg, err := full.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("push message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("* PSD message cid: %s\n", smsg.Cid())
|
||||||
|
|
||||||
|
// wait for deal to be published
|
||||||
|
color.Green("> waiting for PublishStorageDeals to land on chain\n")
|
||||||
|
|
||||||
|
rcpt, err := full.StateWaitMsg(ctx, smsg.Cid(), 3)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("wait message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rcpt.Receipt.ExitCode != 0 {
|
||||||
|
return xerrors.Errorf("publish deal failed: exit code %d", rcpt.Receipt.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse results
|
||||||
|
var ret market.PublishStorageDealsReturn
|
||||||
|
err = ret.UnmarshalCBOR(bytes.NewReader(rcpt.Receipt.Return))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("unmarshal return: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ret.IDs) != 1 {
|
||||||
|
return xerrors.Errorf("expected 1 deal id, got %d", len(ret.IDs))
|
||||||
|
}
|
||||||
|
|
||||||
|
dealId := ret.IDs[0]
|
||||||
|
|
||||||
|
fmt.Printf("* deal id: %d\n", dealId)
|
||||||
|
|
||||||
|
// start deal
|
||||||
|
color.Green("> starting deal\n")
|
||||||
|
|
||||||
|
pcid := smsg.Cid()
|
||||||
|
|
||||||
|
pdi := api.PieceDealInfo{
|
||||||
|
PublishCid: &pcid,
|
||||||
|
DealID: dealId,
|
||||||
|
DealProposal: &dealProposal,
|
||||||
|
DealSchedule: api.DealSchedule{
|
||||||
|
StartEpoch: dealProposal.StartEpoch,
|
||||||
|
EndEpoch: dealProposal.EndEpoch,
|
||||||
|
},
|
||||||
|
KeepUnsealed: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
soff, err := rpc.AllocatePieceToSector(ctx, maddr, pdi, dstat.Size(), *dataUrl, nil)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("allocate piece to sector: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("* sector offset: %d\n", soff)
|
||||||
|
|
||||||
|
// wait for delete call on the file
|
||||||
|
color.Green("> waiting for file to be deleted (on sector finalize)\n")
|
||||||
|
|
||||||
|
<-deleteCalled
|
||||||
|
|
||||||
|
fmt.Println("* done")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var lpBoostProxyCmd = &cli.Command{
|
||||||
|
Name: "boost-proxy",
|
||||||
|
Usage: "Start a legacy lotus-miner rpc proxy",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "actor-address",
|
||||||
|
Usage: "Address of the miner actor",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "db-host",
|
||||||
|
EnvVars: []string{"LOTUS_DB_HOST"},
|
||||||
|
Usage: "Command separated list of hostnames for yugabyte cluster",
|
||||||
|
Value: "yugabyte",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "db-name",
|
||||||
|
EnvVars: []string{"LOTUS_DB_NAME", "LOTUS_HARMONYDB_HOSTS"},
|
||||||
|
Value: "yugabyte",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "db-user",
|
||||||
|
EnvVars: []string{"LOTUS_DB_USER", "LOTUS_HARMONYDB_USERNAME"},
|
||||||
|
Value: "yugabyte",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "db-password",
|
||||||
|
EnvVars: []string{"LOTUS_DB_PASSWORD", "LOTUS_HARMONYDB_PASSWORD"},
|
||||||
|
Value: "yugabyte",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "db-port",
|
||||||
|
EnvVars: []string{"LOTUS_DB_PORT", "LOTUS_HARMONYDB_PORT"},
|
||||||
|
Hidden: true,
|
||||||
|
Value: "5433",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "layers",
|
||||||
|
EnvVars: []string{"LOTUS_LAYERS", "LOTUS_CONFIG_LAYERS"},
|
||||||
|
Value: "base",
|
||||||
|
},
|
||||||
|
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "listen",
|
||||||
|
Usage: "Address to listen on",
|
||||||
|
Value: ":32100",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
db, err := deps.MakeDB(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
maddr, err := address.NewFromString(cctx.String("actor-address"))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parsing miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
full, closer, err := lcli.GetFullNodeAPIV1(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
pin := lpmarket.NewPieceIngester(db, full)
|
||||||
|
|
||||||
|
si := paths.NewDBIndex(nil, db)
|
||||||
|
|
||||||
|
mid, err := address.IDFromAddress(maddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting miner id: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mi, err := full.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting miner info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lp := fakelm.NewLMRPCProvider(si, full, maddr, abi.ActorID(mid), mi.SectorSize, pin, db, cctx.String("layers"))
|
||||||
|
|
||||||
|
laddr, err := net.ResolveTCPAddr("tcp", cctx.String("listen"))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("net resolve: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(laddr.IP) == 0 {
|
||||||
|
// set localhost
|
||||||
|
laddr.IP = net.IPv4(127, 0, 0, 1)
|
||||||
|
}
|
||||||
|
rootUrl := url.URL{
|
||||||
|
Scheme: "http",
|
||||||
|
Host: laddr.String(),
|
||||||
|
}
|
||||||
|
|
||||||
|
ast := api.StorageMinerStruct{}
|
||||||
|
|
||||||
|
ast.CommonStruct.Internal.Version = func(ctx context.Context) (api.APIVersion, error) {
|
||||||
|
return api.APIVersion{
|
||||||
|
Version: "lp-proxy-v0",
|
||||||
|
APIVersion: api.MinerAPIVersion0,
|
||||||
|
BlockDelay: build.BlockDelaySecs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ast.CommonStruct.Internal.AuthNew = lp.AuthNew
|
||||||
|
|
||||||
|
ast.Internal.ActorAddress = lp.ActorAddress
|
||||||
|
ast.Internal.WorkerJobs = lp.WorkerJobs
|
||||||
|
ast.Internal.SectorsStatus = lp.SectorsStatus
|
||||||
|
ast.Internal.SectorsList = lp.SectorsList
|
||||||
|
ast.Internal.SectorsSummary = lp.SectorsSummary
|
||||||
|
ast.Internal.SectorsListInStates = lp.SectorsListInStates
|
||||||
|
ast.Internal.StorageRedeclareLocal = lp.StorageRedeclareLocal
|
||||||
|
ast.Internal.ComputeDataCid = lp.ComputeDataCid
|
||||||
|
|
||||||
|
type pieceInfo struct {
|
||||||
|
data storiface.Data
|
||||||
|
size abi.UnpaddedPieceSize
|
||||||
|
|
||||||
|
done chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
pieceInfoLk := new(sync.Mutex)
|
||||||
|
pieceInfos := map[cid.Cid][]pieceInfo{}
|
||||||
|
|
||||||
|
ast.Internal.SectorAddPieceToAny = func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal api.PieceDealInfo) (api.SectorOffset, error) {
|
||||||
|
origPieceData := pieceData
|
||||||
|
defer func() {
|
||||||
|
closer, ok := origPieceData.(io.Closer)
|
||||||
|
if !ok {
|
||||||
|
log.Warnf("DataCid: cannot close pieceData reader %T because it is not an io.Closer", origPieceData)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := closer.Close(); err != nil {
|
||||||
|
log.Warnw("closing pieceData in DataCid", "error", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
pi := pieceInfo{
|
||||||
|
data: pieceData,
|
||||||
|
size: pieceSize,
|
||||||
|
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
pieceInfoLk.Lock()
|
||||||
|
pieceInfos[deal.DealProposal.PieceCID] = append(pieceInfos[deal.DealProposal.PieceCID], pi)
|
||||||
|
pieceInfoLk.Unlock()
|
||||||
|
|
||||||
|
// /piece?piece_cid=xxxx
|
||||||
|
dataUrl := rootUrl
|
||||||
|
dataUrl.Path = "/piece"
|
||||||
|
dataUrl.RawQuery = "piece_cid=" + deal.DealProposal.PieceCID.String()
|
||||||
|
|
||||||
|
// make a sector
|
||||||
|
so, err := pin.AllocatePieceToSector(ctx, maddr, deal, int64(pieceSize), dataUrl, nil)
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorOffset{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
color.Blue("%s piece assigned to sector f0%d:%d @ %d", deal.DealProposal.PieceCID, mid, so.Sector, so.Offset)
|
||||||
|
|
||||||
|
<-pi.done
|
||||||
|
|
||||||
|
return so, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ast.Internal.StorageList = si.StorageList
|
||||||
|
ast.Internal.StorageDetach = si.StorageDetach
|
||||||
|
ast.Internal.StorageReportHealth = si.StorageReportHealth
|
||||||
|
ast.Internal.StorageDeclareSector = si.StorageDeclareSector
|
||||||
|
ast.Internal.StorageDropSector = si.StorageDropSector
|
||||||
|
ast.Internal.StorageFindSector = si.StorageFindSector
|
||||||
|
ast.Internal.StorageInfo = si.StorageInfo
|
||||||
|
ast.Internal.StorageBestAlloc = si.StorageBestAlloc
|
||||||
|
ast.Internal.StorageLock = si.StorageLock
|
||||||
|
ast.Internal.StorageTryLock = si.StorageTryLock
|
||||||
|
ast.Internal.StorageGetLocks = si.StorageGetLocks
|
||||||
|
|
||||||
|
var pieceHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// /piece?piece_cid=xxxx
|
||||||
|
pieceCid, err := cid.Decode(r.URL.Query().Get("piece_cid"))
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "bad piece_cid", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Method != http.MethodGet {
|
||||||
|
http.Error(w, "bad method", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s request for piece from %s\n", pieceCid, r.RemoteAddr)
|
||||||
|
|
||||||
|
pieceInfoLk.Lock()
|
||||||
|
pis, ok := pieceInfos[pieceCid]
|
||||||
|
if !ok {
|
||||||
|
http.Error(w, "piece not found", http.StatusNotFound)
|
||||||
|
color.Red("%s not found", pieceCid)
|
||||||
|
pieceInfoLk.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// pop
|
||||||
|
pi := pis[0]
|
||||||
|
pis = pis[1:]
|
||||||
|
|
||||||
|
pieceInfos[pieceCid] = pis
|
||||||
|
|
||||||
|
pieceInfoLk.Unlock()
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
pieceData := io.LimitReader(io.MultiReader(
|
||||||
|
pi.data,
|
||||||
|
nullreader.Reader{},
|
||||||
|
), int64(pi.size))
|
||||||
|
|
||||||
|
n, err := io.Copy(w, pieceData)
|
||||||
|
close(pi.done)
|
||||||
|
|
||||||
|
took := time.Since(start)
|
||||||
|
mbps := float64(n) / (1024 * 1024) / took.Seconds()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("copying piece data: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
color.Green("%s served %.3f MiB in %s (%.2f MiB/s)", pieceCid, float64(n)/(1024*1024), took, mbps)
|
||||||
|
}
|
||||||
|
|
||||||
|
finalApi := proxy.LoggingAPI[api.StorageMiner, api.StorageMinerStruct](&ast)
|
||||||
|
|
||||||
|
mh, err := node.MinerHandler(finalApi, false) // todo permissioned
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.Handle("/piece", pieceHandler)
|
||||||
|
mux.Handle("/", mh)
|
||||||
|
|
||||||
|
{
|
||||||
|
tok, err := lp.AuthNew(ctx, api.AllPermissions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse listen into multiaddr
|
||||||
|
ma, err := manet.FromNetAddr(laddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("net from addr (%v): %w", laddr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Token: %s:%s\n", tok, ma)
|
||||||
|
}
|
||||||
|
|
||||||
|
server := &http.Server{
|
||||||
|
Addr: cctx.String("listen"),
|
||||||
|
Handler: mux,
|
||||||
|
ReadTimeout: 48 * time.Hour,
|
||||||
|
WriteTimeout: 48 * time.Hour, // really high because we block until TreeD
|
||||||
|
}
|
||||||
|
|
||||||
|
return server.ListenAndServe()
|
||||||
|
},
|
||||||
|
}
|
@ -92,6 +92,7 @@ func main() {
|
|||||||
FevmAnalyticsCmd,
|
FevmAnalyticsCmd,
|
||||||
mismatchesCmd,
|
mismatchesCmd,
|
||||||
blockCmd,
|
blockCmd,
|
||||||
|
lpUtilCmd,
|
||||||
}
|
}
|
||||||
|
|
||||||
app := &cli.App{
|
app := &cli.App{
|
||||||
|
@ -2,6 +2,15 @@
|
|||||||
* [](#)
|
* [](#)
|
||||||
* [Shutdown](#Shutdown)
|
* [Shutdown](#Shutdown)
|
||||||
* [Version](#Version)
|
* [Version](#Version)
|
||||||
|
* [Allocate](#Allocate)
|
||||||
|
* [AllocatePieceToSector](#AllocatePieceToSector)
|
||||||
|
* [Storage](#Storage)
|
||||||
|
* [StorageAddLocal](#StorageAddLocal)
|
||||||
|
* [StorageDetachLocal](#StorageDetachLocal)
|
||||||
|
* [StorageInfo](#StorageInfo)
|
||||||
|
* [StorageList](#StorageList)
|
||||||
|
* [StorageLocal](#StorageLocal)
|
||||||
|
* [StorageStat](#StorageStat)
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|
||||||
@ -23,3 +32,198 @@ Inputs: `null`
|
|||||||
|
|
||||||
Response: `131840`
|
Response: `131840`
|
||||||
|
|
||||||
|
## Allocate
|
||||||
|
|
||||||
|
|
||||||
|
### AllocatePieceToSector
|
||||||
|
|
||||||
|
|
||||||
|
Perms: write
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"f01234",
|
||||||
|
{
|
||||||
|
"PublishCid": {
|
||||||
|
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||||
|
},
|
||||||
|
"DealID": 5432,
|
||||||
|
"DealProposal": {
|
||||||
|
"PieceCID": {
|
||||||
|
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||||
|
},
|
||||||
|
"PieceSize": 1032,
|
||||||
|
"VerifiedDeal": true,
|
||||||
|
"Client": "f01234",
|
||||||
|
"Provider": "f01234",
|
||||||
|
"Label": "",
|
||||||
|
"StartEpoch": 10101,
|
||||||
|
"EndEpoch": 10101,
|
||||||
|
"StoragePricePerEpoch": "0",
|
||||||
|
"ProviderCollateral": "0",
|
||||||
|
"ClientCollateral": "0"
|
||||||
|
},
|
||||||
|
"DealSchedule": {
|
||||||
|
"StartEpoch": 10101,
|
||||||
|
"EndEpoch": 10101
|
||||||
|
},
|
||||||
|
"KeepUnsealed": true
|
||||||
|
},
|
||||||
|
9,
|
||||||
|
{
|
||||||
|
"Scheme": "string value",
|
||||||
|
"Opaque": "string value",
|
||||||
|
"User": {},
|
||||||
|
"Host": "string value",
|
||||||
|
"Path": "string value",
|
||||||
|
"RawPath": "string value",
|
||||||
|
"OmitHost": true,
|
||||||
|
"ForceQuery": true,
|
||||||
|
"RawQuery": "string value",
|
||||||
|
"Fragment": "string value",
|
||||||
|
"RawFragment": "string value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Authorization": [
|
||||||
|
"Bearer ey.."
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"Sector": 9,
|
||||||
|
"Offset": 1032
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Storage
|
||||||
|
|
||||||
|
|
||||||
|
### StorageAddLocal
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"string value"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
|
### StorageDetachLocal
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"string value"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
|
### StorageInfo
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8",
|
||||||
|
"URLs": [
|
||||||
|
"string value"
|
||||||
|
],
|
||||||
|
"Weight": 42,
|
||||||
|
"MaxStorage": 42,
|
||||||
|
"CanSeal": true,
|
||||||
|
"CanStore": true,
|
||||||
|
"Groups": [
|
||||||
|
"string value"
|
||||||
|
],
|
||||||
|
"AllowTo": [
|
||||||
|
"string value"
|
||||||
|
],
|
||||||
|
"AllowTypes": [
|
||||||
|
"string value"
|
||||||
|
],
|
||||||
|
"DenyTypes": [
|
||||||
|
"string value"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### StorageList
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs: `null`
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": [
|
||||||
|
{
|
||||||
|
"Miner": 1000,
|
||||||
|
"Number": 100,
|
||||||
|
"SectorFileType": 2
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### StorageLocal
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs: `null`
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### StorageStat
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"Capacity": 9,
|
||||||
|
"Available": 9,
|
||||||
|
"FSAvailable": 9,
|
||||||
|
"Reserved": 9,
|
||||||
|
"Max": 9,
|
||||||
|
"Used": 9
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
@ -10,11 +10,13 @@ VERSION:
|
|||||||
1.25.3-dev
|
1.25.3-dev
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
|
cli Execute cli commands
|
||||||
run Start a lotus provider process
|
run Start a lotus provider process
|
||||||
stop Stop a running lotus provider
|
stop Stop a running lotus provider
|
||||||
config Manage node config by layers. The layer 'base' will always be applied.
|
config Manage node config by layers. The layer 'base' will always be applied.
|
||||||
test Utility functions for testing
|
test Utility functions for testing
|
||||||
web Start lotus provider web interface
|
web Start lotus provider web interface
|
||||||
|
seal Manage the sealing pipeline
|
||||||
version Print version
|
version Print version
|
||||||
help, h Shows a list of commands or help for one command
|
help, h Shows a list of commands or help for one command
|
||||||
DEVELOPER:
|
DEVELOPER:
|
||||||
@ -24,16 +26,37 @@ COMMANDS:
|
|||||||
fetch-params Fetch proving parameters
|
fetch-params Fetch proving parameters
|
||||||
|
|
||||||
GLOBAL OPTIONS:
|
GLOBAL OPTIONS:
|
||||||
--color use color in display output (default: depends on output being a TTY)
|
--color use color in display output (default: depends on output being a TTY)
|
||||||
--db-host value Command separated list of hostnames for yugabyte cluster (default: "yugabyte") [$LOTUS_DB_HOST]
|
--db-host value Command separated list of hostnames for yugabyte cluster (default: "yugabyte") [$LOTUS_DB_HOST]
|
||||||
--db-name value (default: "yugabyte") [$LOTUS_DB_NAME, $LOTUS_HARMONYDB_HOSTS]
|
--db-name value (default: "yugabyte") [$LOTUS_DB_NAME, $LOTUS_HARMONYDB_HOSTS]
|
||||||
--db-user value (default: "yugabyte") [$LOTUS_DB_USER, $LOTUS_HARMONYDB_USERNAME]
|
--db-user value (default: "yugabyte") [$LOTUS_DB_USER, $LOTUS_HARMONYDB_USERNAME]
|
||||||
--db-password value (default: "yugabyte") [$LOTUS_DB_PASSWORD, $LOTUS_HARMONYDB_PASSWORD]
|
--db-password value (default: "yugabyte") [$LOTUS_DB_PASSWORD, $LOTUS_HARMONYDB_PASSWORD]
|
||||||
--layers value (default: "base") [$LOTUS_LAYERS, $LOTUS_CONFIG_LAYERS]
|
--layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base") [$CURIO_LAYERS]
|
||||||
--repo-path value (default: "~/.lotusprovider") [$LOTUS_REPO_PATH]
|
--repo-path value (default: "~/.lotusprovider") [$LOTUS_REPO_PATH]
|
||||||
--vv enables very verbose mode, useful for debugging the CLI (default: false)
|
--vv enables very verbose mode, useful for debugging the CLI (default: false)
|
||||||
--help, -h show help
|
--help, -h show help
|
||||||
--version, -v print the version
|
--version, -v print the version
|
||||||
|
```
|
||||||
|
|
||||||
|
## lotus-provider cli
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-provider cli - Execute cli commands
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-provider cli command [command options] [arguments...]
|
||||||
|
|
||||||
|
COMMANDS:
|
||||||
|
storage manage sector storage
|
||||||
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--machine value machine host:port (lotus-provider run --listen address)
|
||||||
|
--help, -h show help
|
||||||
|
```
|
||||||
|
|
||||||
|
### lotus-provider cli storage
|
||||||
|
```
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-provider run
|
## lotus-provider run
|
||||||
@ -45,13 +68,12 @@ USAGE:
|
|||||||
lotus-provider run [command options] [arguments...]
|
lotus-provider run [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--listen value host address and port the worker api will listen on (default: "0.0.0.0:12300") [$LOTUS_WORKER_LISTEN]
|
--listen value host address and port the worker api will listen on (default: "0.0.0.0:12300") [$LOTUS_WORKER_LISTEN]
|
||||||
--nosync don't check full-node sync status (default: false)
|
--nosync don't check full-node sync status (default: false)
|
||||||
--manage-fdlimit manage open file limit (default: true)
|
--manage-fdlimit manage open file limit (default: true)
|
||||||
--layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base")
|
--storage-json value path to json file containing storage config (default: "~/.lotus-provider/storage.json")
|
||||||
--storage-json value path to json file containing storage config (default: "~/.lotus-provider/storage.json")
|
--journal value path to journal files (default: "~/.lotus-provider/")
|
||||||
--journal value path to journal files (default: "~/.lotus-provider/")
|
--help, -h show help
|
||||||
--help, -h show help
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-provider stop
|
## lotus-provider stop
|
||||||
@ -81,7 +103,9 @@ COMMANDS:
|
|||||||
list, ls List config layers you can get.
|
list, ls List config layers you can get.
|
||||||
interpret, view, stacked, stack Interpret stacked config layers by this version of lotus-provider, with system-generated comments.
|
interpret, view, stacked, stack Interpret stacked config layers by this version of lotus-provider, with system-generated comments.
|
||||||
remove, rm, del, delete Remove a named config layer.
|
remove, rm, del, delete Remove a named config layer.
|
||||||
|
edit edit a config layer
|
||||||
from-miner Express a database config (for lotus-provider) from an existing miner.
|
from-miner Express a database config (for lotus-provider) from an existing miner.
|
||||||
|
new-cluster Create new coniguration for a new cluster
|
||||||
help, h Shows a list of commands or help for one command
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
@ -163,6 +187,23 @@ OPTIONS:
|
|||||||
--help, -h show help
|
--help, -h show help
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### lotus-provider config edit
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-provider config edit - edit a config layer
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-provider config edit [command options] [layer name]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--editor value editor to use (default: "vim") [$EDITOR]
|
||||||
|
--source value source config layer (default: <edited layer>)
|
||||||
|
--allow-owerwrite allow overwrite of existing layer if source is a different layer (default: false)
|
||||||
|
--no-source-diff save the whole config into the layer, not just the diff (default: false)
|
||||||
|
--no-interpret-source do not interpret source layer (default: true if --source is set)
|
||||||
|
--help, -h show help
|
||||||
|
```
|
||||||
|
|
||||||
### lotus-provider config from-miner
|
### lotus-provider config from-miner
|
||||||
```
|
```
|
||||||
NAME:
|
NAME:
|
||||||
@ -175,12 +216,24 @@ DESCRIPTION:
|
|||||||
Express a database config (for lotus-provider) from an existing miner.
|
Express a database config (for lotus-provider) from an existing miner.
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH]
|
--miner-repo value, --storagerepo value Miner repo path (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH]
|
||||||
--to-layer value, -t value The layer name for this data push. 'base' is recommended for single-miner setup.
|
--to-layer value, -t value The layer name for this data push. 'base' is recommended for single-miner setup.
|
||||||
--overwrite, -o Use this with --to-layer to replace an existing layer (default: false)
|
--overwrite, -o Use this with --to-layer to replace an existing layer (default: false)
|
||||||
--help, -h show help
|
--help, -h show help
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### lotus-provider config new-cluster
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-provider config new-cluster - Create new coniguration for a new cluster
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-provider config new-cluster [command options] [SP actor address...]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--help, -h show help
|
||||||
|
```
|
||||||
|
|
||||||
## lotus-provider test
|
## lotus-provider test
|
||||||
```
|
```
|
||||||
NAME:
|
NAME:
|
||||||
@ -243,9 +296,8 @@ USAGE:
|
|||||||
lotus-provider test window-post task [command options] [arguments...]
|
lotus-provider test window-post task [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--deadline value deadline to compute WindowPoSt for (default: 0)
|
--deadline value deadline to compute WindowPoSt for (default: 0)
|
||||||
--layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base")
|
--help, -h show help
|
||||||
--help, -h show help
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-provider web
|
## lotus-provider web
|
||||||
@ -261,10 +313,42 @@ DESCRIPTION:
|
|||||||
This creates the 'web' layer if it does not exist, then calls run with that layer.
|
This creates the 'web' layer if it does not exist, then calls run with that layer.
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--listen value Address to listen on (default: "127.0.0.1:4701")
|
--listen value Address to listen on (default: "127.0.0.1:4701")
|
||||||
--layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base. Web will be added (default: "base")
|
--nosync don't check full-node sync status (default: false)
|
||||||
--nosync don't check full-node sync status (default: false)
|
--help, -h show help
|
||||||
--help, -h show help
|
```
|
||||||
|
|
||||||
|
## lotus-provider seal
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-provider seal - Manage the sealing pipeline
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-provider seal command [command options] [arguments...]
|
||||||
|
|
||||||
|
COMMANDS:
|
||||||
|
start Start new sealing operations manually
|
||||||
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--help, -h show help
|
||||||
|
```
|
||||||
|
|
||||||
|
### lotus-provider seal start
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-provider seal start - Start new sealing operations manually
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-provider seal start [command options] [arguments...]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--actor value Specify actor address to start sealing sectors for
|
||||||
|
--now Start sealing sectors for all actors now (not on schedule) (default: false)
|
||||||
|
--cc Start sealing new CC sectors (default: false)
|
||||||
|
--count value Number of sectors to start (default: 1)
|
||||||
|
--synthetic Use synthetic PoRep (default: false)
|
||||||
|
--help, -h show help
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-provider version
|
## lotus-provider version
|
||||||
|
@ -1,16 +1,135 @@
|
|||||||
[Subsystems]
|
[Subsystems]
|
||||||
|
# EnableWindowPost enables window post to be executed on this lotus-provider instance. Each machine in the cluster
|
||||||
|
# with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple
|
||||||
|
# machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline,
|
||||||
|
# will allow for parallel processing of partitions.
|
||||||
|
#
|
||||||
|
# It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without
|
||||||
|
# the need for additional machines. In setups like this it is generally recommended to run
|
||||||
|
# partitionsPerDeadline+1 machines.
|
||||||
|
#
|
||||||
# type: bool
|
# type: bool
|
||||||
#EnableWindowPost = false
|
#EnableWindowPost = false
|
||||||
|
|
||||||
# type: int
|
# type: int
|
||||||
#WindowPostMaxTasks = 0
|
#WindowPostMaxTasks = 0
|
||||||
|
|
||||||
|
# EnableWinningPost enables winning post to be executed on this lotus-provider instance.
|
||||||
|
# Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler.
|
||||||
|
# It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost
|
||||||
|
# documentation.
|
||||||
|
#
|
||||||
# type: bool
|
# type: bool
|
||||||
#EnableWinningPost = false
|
#EnableWinningPost = false
|
||||||
|
|
||||||
# type: int
|
# type: int
|
||||||
#WinningPostMaxTasks = 0
|
#WinningPostMaxTasks = 0
|
||||||
|
|
||||||
|
# EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation
|
||||||
|
# creating 11 layer files in sector cache directory.
|
||||||
|
#
|
||||||
|
# SDR is the first task in the sealing pipeline. It's inputs are just the hash of the
|
||||||
|
# unsealed data (CommD), sector number, miner id, and the seal proof type.
|
||||||
|
# It's outputs are the 11 layer files in the sector cache directory.
|
||||||
|
#
|
||||||
|
# In lotus-miner this was run as part of PreCommit1.
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#EnableSealSDR = false
|
||||||
|
|
||||||
|
# The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
# also be bounded by resources available on the machine.
|
||||||
|
#
|
||||||
|
# type: int
|
||||||
|
#SealSDRMaxTasks = 0
|
||||||
|
|
||||||
|
# EnableSealSDRTrees enables the SDR pipeline tree-building task to run.
|
||||||
|
# This task handles encoding of unsealed data into last sdr layer and building
|
||||||
|
# of TreeR, TreeC and TreeD.
|
||||||
|
#
|
||||||
|
# This task runs after SDR
|
||||||
|
# TreeD is first computed with optional input of unsealed data
|
||||||
|
# TreeR is computed from replica, which is first computed as field
|
||||||
|
# addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data)
|
||||||
|
# TreeC is computed from the 11 SDR layers
|
||||||
|
# The 3 trees will later be used to compute the PoRep proof.
|
||||||
|
#
|
||||||
|
# In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers
|
||||||
|
# will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk)
|
||||||
|
# then using a small subset of them for the actual PoRep computation. This allows for significant scratch space
|
||||||
|
# saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step)
|
||||||
|
#
|
||||||
|
# In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
|
||||||
|
# Note that nodes with SDRTrees enabled will also answer to Finalize tasks,
|
||||||
|
# which just remove unneeded tree data after PoRep is computed.
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#EnableSealSDRTrees = false
|
||||||
|
|
||||||
|
# The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
# also be bounded by resources available on the machine.
|
||||||
|
#
|
||||||
|
# type: int
|
||||||
|
#SealSDRTreesMaxTasks = 0
|
||||||
|
|
||||||
|
# FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously.
|
||||||
|
# The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever
|
||||||
|
# machine holds sector cache files, as it removes unneeded tree data after PoRep is computed.
|
||||||
|
# Finalize will run in parallel with the SubmitCommitMsg task.
|
||||||
|
#
|
||||||
|
# type: int
|
||||||
|
#FinalizeMaxTasks = 0
|
||||||
|
|
||||||
|
# EnableSendPrecommitMsg enables the sending of precommit messages to the chain
|
||||||
|
# from this lotus-provider instance.
|
||||||
|
# This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#EnableSendPrecommitMsg = false
|
||||||
|
|
||||||
|
# EnablePoRepProof enables the computation of the porep proof
|
||||||
|
#
|
||||||
|
# This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the
|
||||||
|
# precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are
|
||||||
|
# requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees
|
||||||
|
# task.
|
||||||
|
#
|
||||||
|
# In lotus-miner this was Commit1 / Commit2
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#EnablePoRepProof = false
|
||||||
|
|
||||||
|
# The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
# also be bounded by resources available on the machine.
|
||||||
|
#
|
||||||
|
# type: int
|
||||||
|
#PoRepProofMaxTasks = 0
|
||||||
|
|
||||||
|
# EnableSendCommitMsg enables the sending of commit messages to the chain
|
||||||
|
# from this lotus-provider instance.
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#EnableSendCommitMsg = false
|
||||||
|
|
||||||
|
# EnableMoveStorage enables the move-into-long-term-storage task to run on this lotus-provider instance.
|
||||||
|
# This tasks should only be enabled on nodes with long-term storage.
|
||||||
|
#
|
||||||
|
# The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the
|
||||||
|
# SDRTrees machine into long-term storage. This task runs after the Finalize task.
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
#EnableMoveStorage = false
|
||||||
|
|
||||||
|
# The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
# also be bounded by resources available on the machine. It is recommended that this value is set to a number which
|
||||||
|
# uses all available network (or disk) bandwidth on the machine without causing bottlenecks.
|
||||||
|
#
|
||||||
|
# type: int
|
||||||
|
#MoveStorageMaxTasks = 0
|
||||||
|
|
||||||
|
# EnableWebGui enables the web GUI on this lotus-provider instance. The UI has minimal local overhead, but it should
|
||||||
|
# only need to be run on a single machine in the cluster.
|
||||||
|
#
|
||||||
# type: bool
|
# type: bool
|
||||||
#EnableWebGui = false
|
#EnableWebGui = false
|
||||||
|
|
||||||
@ -67,6 +186,8 @@
|
|||||||
|
|
||||||
#DisableWorkerFallback = false
|
#DisableWorkerFallback = false
|
||||||
|
|
||||||
|
#MinerAddresses = []
|
||||||
|
|
||||||
|
|
||||||
[Proving]
|
[Proving]
|
||||||
# Maximum number of sector checks to run in parallel. (0 = unlimited)
|
# Maximum number of sector checks to run in parallel. (0 = unlimited)
|
||||||
|
11
go.mod
11
go.mod
@ -12,6 +12,7 @@ require (
|
|||||||
github.com/DataDog/zstd v1.4.5
|
github.com/DataDog/zstd v1.4.5
|
||||||
github.com/GeertJohan/go.rice v1.0.3
|
github.com/GeertJohan/go.rice v1.0.3
|
||||||
github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee
|
github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee
|
||||||
|
github.com/KarpelesLab/reflink v1.0.1
|
||||||
github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa
|
github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||||
github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921
|
github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921
|
||||||
@ -37,6 +38,7 @@ require (
|
|||||||
github.com/filecoin-project/go-bitfield v0.2.4
|
github.com/filecoin-project/go-bitfield v0.2.4
|
||||||
github.com/filecoin-project/go-cbor-util v0.0.1
|
github.com/filecoin-project/go-cbor-util v0.0.1
|
||||||
github.com/filecoin-project/go-commp-utils v0.1.3
|
github.com/filecoin-project/go-commp-utils v0.1.3
|
||||||
|
github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837
|
||||||
github.com/filecoin-project/go-crypto v0.0.1
|
github.com/filecoin-project/go-crypto v0.0.1
|
||||||
github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7
|
github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7
|
||||||
github.com/filecoin-project/go-fil-commcid v0.1.0
|
github.com/filecoin-project/go-fil-commcid v0.1.0
|
||||||
@ -117,6 +119,7 @@ require (
|
|||||||
github.com/mattn/go-isatty v0.0.19
|
github.com/mattn/go-isatty v0.0.19
|
||||||
github.com/mattn/go-sqlite3 v1.14.16
|
github.com/mattn/go-sqlite3 v1.14.16
|
||||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
|
||||||
|
github.com/minio/sha256-simd v1.0.1
|
||||||
github.com/mitchellh/go-homedir v1.1.0
|
github.com/mitchellh/go-homedir v1.1.0
|
||||||
github.com/multiformats/go-base32 v0.1.0
|
github.com/multiformats/go-base32 v0.1.0
|
||||||
github.com/multiformats/go-multiaddr v0.12.1
|
github.com/multiformats/go-multiaddr v0.12.1
|
||||||
@ -126,7 +129,6 @@ require (
|
|||||||
github.com/multiformats/go-multihash v0.2.3
|
github.com/multiformats/go-multihash v0.2.3
|
||||||
github.com/multiformats/go-varint v0.0.7
|
github.com/multiformats/go-varint v0.0.7
|
||||||
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
|
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
|
||||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
|
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/polydawn/refmt v0.89.0
|
github.com/polydawn/refmt v0.89.0
|
||||||
github.com/prometheus/client_golang v1.16.0
|
github.com/prometheus/client_golang v1.16.0
|
||||||
@ -193,7 +195,6 @@ require (
|
|||||||
github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect
|
github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect
|
||||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect
|
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect
|
||||||
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect
|
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect
|
||||||
github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 // indirect
|
|
||||||
github.com/filecoin-project/go-ds-versioning v0.1.2 // indirect
|
github.com/filecoin-project/go-ds-versioning v0.1.2 // indirect
|
||||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect
|
github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect
|
||||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect
|
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect
|
||||||
@ -276,7 +277,6 @@ require (
|
|||||||
github.com/miekg/dns v1.1.55 // indirect
|
github.com/miekg/dns v1.1.55 // indirect
|
||||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||||
@ -286,6 +286,7 @@ require (
|
|||||||
github.com/onsi/ginkgo/v2 v2.11.0 // indirect
|
github.com/onsi/ginkgo/v2 v2.11.0 // indirect
|
||||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect
|
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.4.0 // indirect
|
github.com/prometheus/client_model v0.4.0 // indirect
|
||||||
@ -332,6 +333,10 @@ require (
|
|||||||
lukechampine.com/blake3 v1.2.1 // indirect
|
lukechampine.com/blake3 v1.2.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// https://github.com/magik6k/reflink/commit/cff5a40f3eeca17f44fc95a57ff3878e5ac761dc
|
||||||
|
// https://github.com/KarpelesLab/reflink/pull/2
|
||||||
|
replace github.com/KarpelesLab/reflink => github.com/magik6k/reflink v1.0.2-patch1
|
||||||
|
|
||||||
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
|
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
|
||||||
|
|
||||||
replace github.com/filecoin-project/test-vectors => ./extern/test-vectors
|
replace github.com/filecoin-project/test-vectors => ./extern/test-vectors
|
||||||
|
2
go.sum
2
go.sum
@ -1148,6 +1148,8 @@ github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0Q
|
|||||||
github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE=
|
github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE=
|
||||||
github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/magik6k/reflink v1.0.2-patch1 h1:NXSgQugcESI8Z/jBtuAI83YsZuRauY9i9WOyOnJ7Vns=
|
||||||
|
github.com/magik6k/reflink v1.0.2-patch1/go.mod h1:WGkTOKNjd1FsJKBw3mu4JvrPEDJyJJ+JPtxBkbPoCok=
|
||||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
@ -36,7 +36,7 @@ type DB struct {
|
|||||||
schema string
|
schema string
|
||||||
hostnames []string
|
hostnames []string
|
||||||
BTFPOnce sync.Once
|
BTFPOnce sync.Once
|
||||||
BTFP atomic.Uintptr
|
BTFP atomic.Uintptr // BeginTransactionFramePointer
|
||||||
}
|
}
|
||||||
|
|
||||||
var logger = logging.Logger("harmonydb")
|
var logger = logging.Logger("harmonydb")
|
||||||
|
129
lib/harmony/harmonydb/sql/20231217-sdr-pipeline.sql
Normal file
129
lib/harmony/harmonydb/sql/20231217-sdr-pipeline.sql
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
-- NOTE: task_ids can be the same between different task types and between different sectors
|
||||||
|
-- e.g. SN-supraseal doing 128 sdr/TreeC/TreeR with the same task_id
|
||||||
|
|
||||||
|
create table sectors_sdr_pipeline (
|
||||||
|
sp_id bigint not null,
|
||||||
|
sector_number bigint not null,
|
||||||
|
|
||||||
|
-- at request time
|
||||||
|
create_time timestamp not null default current_timestamp,
|
||||||
|
reg_seal_proof int not null,
|
||||||
|
|
||||||
|
-- sdr
|
||||||
|
ticket_epoch bigint,
|
||||||
|
ticket_value bytea,
|
||||||
|
|
||||||
|
task_id_sdr bigint,
|
||||||
|
after_sdr bool not null default false,
|
||||||
|
|
||||||
|
-- tree D
|
||||||
|
tree_d_cid text, -- commd from treeD compute, should match comm_d_cid
|
||||||
|
|
||||||
|
task_id_tree_d bigint,
|
||||||
|
after_tree_d bool not null default false,
|
||||||
|
|
||||||
|
-- tree C
|
||||||
|
task_id_tree_c bigint,
|
||||||
|
after_tree_c bool not null default false,
|
||||||
|
|
||||||
|
-- tree R
|
||||||
|
tree_r_cid text, -- commr from treeR compute
|
||||||
|
|
||||||
|
task_id_tree_r bigint,
|
||||||
|
after_tree_r bool not null default false,
|
||||||
|
|
||||||
|
-- precommit message sending
|
||||||
|
precommit_msg_cid text,
|
||||||
|
|
||||||
|
task_id_precommit_msg bigint,
|
||||||
|
after_precommit_msg bool not null default false,
|
||||||
|
|
||||||
|
-- precommit message wait
|
||||||
|
seed_epoch bigint,
|
||||||
|
precommit_msg_tsk bytea,
|
||||||
|
after_precommit_msg_success bool not null default false,
|
||||||
|
|
||||||
|
-- seed
|
||||||
|
seed_value bytea,
|
||||||
|
|
||||||
|
-- Commit (PoRep snark)
|
||||||
|
task_id_porep bigint,
|
||||||
|
porep_proof bytea,
|
||||||
|
after_porep bool not null default false,
|
||||||
|
|
||||||
|
-- Finalize (trim cache)
|
||||||
|
task_id_finalize bigint,
|
||||||
|
after_finalize bool not null default false,
|
||||||
|
|
||||||
|
-- MoveStorage (move data to storage)
|
||||||
|
task_id_move_storage bigint,
|
||||||
|
after_move_storage bool not null default false,
|
||||||
|
|
||||||
|
-- Commit message sending
|
||||||
|
commit_msg_cid text,
|
||||||
|
|
||||||
|
task_id_commit_msg bigint,
|
||||||
|
after_commit_msg bool not null default false,
|
||||||
|
|
||||||
|
-- Commit message wait
|
||||||
|
commit_msg_tsk bytea,
|
||||||
|
after_commit_msg_success bool not null default false,
|
||||||
|
|
||||||
|
-- Failure handling
|
||||||
|
failed bool not null default false,
|
||||||
|
failed_at timestamp,
|
||||||
|
failed_reason varchar(20) not null default '',
|
||||||
|
failed_reason_msg text not null default '',
|
||||||
|
|
||||||
|
-- foreign key
|
||||||
|
-- note: those foreign keys are a part of the retry mechanism. If a task
|
||||||
|
-- fails due to retry limit, it will drop the assigned task_id, and the
|
||||||
|
-- poller will reassign the task to a new node if it deems the task is
|
||||||
|
-- still valid to be retried.
|
||||||
|
foreign key (task_id_sdr) references harmony_task (id) on delete set null,
|
||||||
|
foreign key (task_id_tree_d) references harmony_task (id) on delete set null,
|
||||||
|
foreign key (task_id_tree_c) references harmony_task (id) on delete set null,
|
||||||
|
foreign key (task_id_tree_r) references harmony_task (id) on delete set null,
|
||||||
|
foreign key (task_id_precommit_msg) references harmony_task (id) on delete set null,
|
||||||
|
foreign key (task_id_porep) references harmony_task (id) on delete set null,
|
||||||
|
foreign key (task_id_finalize) references harmony_task (id) on delete set null,
|
||||||
|
foreign key (task_id_move_storage) references harmony_task (id) on delete set null,
|
||||||
|
foreign key (task_id_commit_msg) references harmony_task (id) on delete set null,
|
||||||
|
|
||||||
|
-- constraints
|
||||||
|
primary key (sp_id, sector_number)
|
||||||
|
);
|
||||||
|
|
||||||
|
create table sectors_sdr_initial_pieces (
|
||||||
|
sp_id bigint not null,
|
||||||
|
sector_number bigint not null,
|
||||||
|
|
||||||
|
piece_index bigint not null,
|
||||||
|
piece_cid text not null,
|
||||||
|
piece_size bigint not null, -- padded size
|
||||||
|
|
||||||
|
-- data source
|
||||||
|
data_url text not null,
|
||||||
|
data_headers jsonb not null default '{}',
|
||||||
|
data_raw_size bigint not null,
|
||||||
|
data_delete_on_finalize bool not null,
|
||||||
|
|
||||||
|
-- deal info
|
||||||
|
f05_publish_cid text,
|
||||||
|
f05_deal_id bigint,
|
||||||
|
f05_deal_proposal jsonb,
|
||||||
|
f05_deal_start_epoch bigint,
|
||||||
|
f05_deal_end_epoch bigint,
|
||||||
|
|
||||||
|
-- foreign key
|
||||||
|
foreign key (sp_id, sector_number) references sectors_sdr_pipeline (sp_id, sector_number) on delete cascade,
|
||||||
|
|
||||||
|
primary key (sp_id, sector_number, piece_index)
|
||||||
|
);
|
||||||
|
|
||||||
|
comment on column sectors_sdr_initial_pieces.piece_size is 'padded size of the piece';
|
||||||
|
|
||||||
|
create table sectors_allocated_numbers (
|
||||||
|
sp_id bigint not null primary key,
|
||||||
|
allocated jsonb not null
|
||||||
|
);
|
13
lib/harmony/harmonydb/sql/20231225-message-waits.sql
Normal file
13
lib/harmony/harmonydb/sql/20231225-message-waits.sql
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
create table message_waits (
|
||||||
|
signed_message_cid text primary key,
|
||||||
|
waiter_machine_id int references harmony_machines (id) on delete set null,
|
||||||
|
|
||||||
|
executed_tsk_cid text,
|
||||||
|
executed_tsk_epoch bigint,
|
||||||
|
executed_msg_cid text,
|
||||||
|
executed_msg_data jsonb,
|
||||||
|
|
||||||
|
executed_rcpt_exitcode bigint,
|
||||||
|
executed_rcpt_return bytea,
|
||||||
|
executed_rcpt_gas_used bigint
|
||||||
|
)
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/georgysavva/scany/v2/pgxscan"
|
"github.com/georgysavva/scany/v2/pgxscan"
|
||||||
"github.com/jackc/pgerrcode"
|
"github.com/jackc/pgerrcode"
|
||||||
@ -129,6 +130,25 @@ func (db *DB) usedInTransaction() bool {
|
|||||||
return lo.Contains(framePtrs, db.BTFP.Load()) // Unsafe read @ beginTx overlap, but 'return false' is correct there.
|
return lo.Contains(framePtrs, db.BTFP.Load()) // Unsafe read @ beginTx overlap, but 'return false' is correct there.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TransactionOptions struct {
|
||||||
|
RetrySerializationError bool
|
||||||
|
InitialSerializationErrorRetryWait time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type TransactionOption func(*TransactionOptions)
|
||||||
|
|
||||||
|
func OptionRetry() TransactionOption {
|
||||||
|
return func(o *TransactionOptions) {
|
||||||
|
o.RetrySerializationError = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func OptionSerialRetryTime(d time.Duration) TransactionOption {
|
||||||
|
return func(o *TransactionOptions) {
|
||||||
|
o.InitialSerializationErrorRetryWait = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// BeginTransaction is how you can access transactions using this library.
|
// BeginTransaction is how you can access transactions using this library.
|
||||||
// The entire transaction happens in the function passed in.
|
// The entire transaction happens in the function passed in.
|
||||||
// The return must be true or a rollback will occur.
|
// The return must be true or a rollback will occur.
|
||||||
@ -137,7 +157,7 @@ func (db *DB) usedInTransaction() bool {
|
|||||||
// when there is a DB serialization error.
|
// when there is a DB serialization error.
|
||||||
//
|
//
|
||||||
//go:noinline
|
//go:noinline
|
||||||
func (db *DB) BeginTransaction(ctx context.Context, f func(*Tx) (commit bool, err error)) (didCommit bool, retErr error) {
|
func (db *DB) BeginTransaction(ctx context.Context, f func(*Tx) (commit bool, err error), opt ...TransactionOption) (didCommit bool, retErr error) {
|
||||||
db.BTFPOnce.Do(func() {
|
db.BTFPOnce.Do(func() {
|
||||||
fp := make([]uintptr, 20)
|
fp := make([]uintptr, 20)
|
||||||
runtime.Callers(1, fp)
|
runtime.Callers(1, fp)
|
||||||
@ -146,6 +166,28 @@ func (db *DB) BeginTransaction(ctx context.Context, f func(*Tx) (commit bool, er
|
|||||||
if db.usedInTransaction() {
|
if db.usedInTransaction() {
|
||||||
return false, errTx
|
return false, errTx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts := TransactionOptions{
|
||||||
|
RetrySerializationError: false,
|
||||||
|
InitialSerializationErrorRetryWait: 10 * time.Millisecond,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, o := range opt {
|
||||||
|
o(&opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
retry:
|
||||||
|
comm, err := db.transactionInner(ctx, f)
|
||||||
|
if err != nil && opts.RetrySerializationError && IsErrSerialization(err) {
|
||||||
|
time.Sleep(opts.InitialSerializationErrorRetryWait)
|
||||||
|
opts.InitialSerializationErrorRetryWait *= 2
|
||||||
|
goto retry
|
||||||
|
}
|
||||||
|
|
||||||
|
return comm, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (db *DB) transactionInner(ctx context.Context, f func(*Tx) (commit bool, err error)) (didCommit bool, retErr error) {
|
||||||
tx, err := db.pgx.BeginTx(ctx, pgx.TxOptions{})
|
tx, err := db.pgx.BeginTx(ctx, pgx.TxOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -12,9 +12,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Consts (except for unit test)
|
// Consts (except for unit test)
|
||||||
var POLL_DURATION = time.Second * 3 // Poll for Work this frequently
|
var POLL_DURATION = time.Second * 3 // Poll for Work this frequently
|
||||||
var CLEANUP_FREQUENCY = 5 * time.Minute // Check for dead workers this often * everyone
|
var POLL_NEXT_DURATION = 100 * time.Millisecond // After scheduling a task, wait this long before scheduling another
|
||||||
var FOLLOW_FREQUENCY = 1 * time.Minute // Check for work to follow this often
|
var CLEANUP_FREQUENCY = 5 * time.Minute // Check for dead workers this often * everyone
|
||||||
|
var FOLLOW_FREQUENCY = 1 * time.Minute // Check for work to follow this often
|
||||||
|
|
||||||
type TaskTypeDetails struct {
|
type TaskTypeDetails struct {
|
||||||
// Max returns how many tasks this machine can run of this type.
|
// Max returns how many tasks this machine can run of this type.
|
||||||
@ -211,13 +212,19 @@ top:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *TaskEngine) poller() {
|
func (e *TaskEngine) poller() {
|
||||||
|
nextWait := POLL_NEXT_DURATION
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-time.NewTicker(POLL_DURATION).C: // Find work periodically
|
case <-time.After(nextWait): // Find work periodically
|
||||||
case <-e.ctx.Done(): ///////////////////// Graceful exit
|
case <-e.ctx.Done(): ///////////////////// Graceful exit
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
e.pollerTryAllWork()
|
nextWait = POLL_DURATION
|
||||||
|
|
||||||
|
accepted := e.pollerTryAllWork()
|
||||||
|
if accepted {
|
||||||
|
nextWait = POLL_NEXT_DURATION
|
||||||
|
}
|
||||||
if time.Since(e.lastFollowTime) > FOLLOW_FREQUENCY {
|
if time.Since(e.lastFollowTime) > FOLLOW_FREQUENCY {
|
||||||
e.followWorkInDB()
|
e.followWorkInDB()
|
||||||
}
|
}
|
||||||
@ -233,7 +240,7 @@ func (e *TaskEngine) followWorkInDB() {
|
|||||||
for fromName, srcs := range e.follows {
|
for fromName, srcs := range e.follows {
|
||||||
var cList []int // Which work is done (that we follow) since we last checked?
|
var cList []int // Which work is done (that we follow) since we last checked?
|
||||||
err := e.db.Select(e.ctx, &cList, `SELECT h.task_id FROM harmony_task_history
|
err := e.db.Select(e.ctx, &cList, `SELECT h.task_id FROM harmony_task_history
|
||||||
WHERE h.work_end>$1 AND h.name=$2`, lastFollowTime, fromName)
|
WHERE h.work_end>$1 AND h.name=$2`, lastFollowTime.UTC(), fromName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Could not query DB: ", err)
|
log.Error("Could not query DB: ", err)
|
||||||
return
|
return
|
||||||
@ -266,7 +273,7 @@ func (e *TaskEngine) followWorkInDB() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// pollerTryAllWork starts the next 1 task
|
// pollerTryAllWork starts the next 1 task
|
||||||
func (e *TaskEngine) pollerTryAllWork() {
|
func (e *TaskEngine) pollerTryAllWork() bool {
|
||||||
if time.Since(e.lastCleanup.Load().(time.Time)) > CLEANUP_FREQUENCY {
|
if time.Since(e.lastCleanup.Load().(time.Time)) > CLEANUP_FREQUENCY {
|
||||||
e.lastCleanup.Store(time.Now())
|
e.lastCleanup.Store(time.Now())
|
||||||
resources.CleanupMachines(e.ctx, e.db)
|
resources.CleanupMachines(e.ctx, e.db)
|
||||||
@ -287,11 +294,13 @@ func (e *TaskEngine) pollerTryAllWork() {
|
|||||||
if len(unownedTasks) > 0 {
|
if len(unownedTasks) > 0 {
|
||||||
accepted := v.considerWork(workSourcePoller, unownedTasks)
|
accepted := v.considerWork(workSourcePoller, unownedTasks)
|
||||||
if accepted {
|
if accepted {
|
||||||
return // accept new work slowly and in priority order
|
return true // accept new work slowly and in priority order
|
||||||
}
|
}
|
||||||
log.Warn("Work not accepted for " + strconv.Itoa(len(unownedTasks)) + " " + v.Name + " task(s)")
|
log.Warn("Work not accepted for " + strconv.Itoa(len(unownedTasks)) + " " + v.Name + " task(s)")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourcesAvailable determines what resources are still unassigned.
|
// ResourcesAvailable determines what resources are still unassigned.
|
||||||
|
@ -29,15 +29,11 @@ func (h *taskTypeHandler) AddTask(extra func(TaskID, *harmonydb.Tx) (bool, error
|
|||||||
retryAddTask:
|
retryAddTask:
|
||||||
_, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) {
|
_, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) {
|
||||||
// create taskID (from DB)
|
// create taskID (from DB)
|
||||||
_, err := tx.Exec(`INSERT INTO harmony_task (name, added_by, posted_time)
|
err := tx.QueryRow(`INSERT INTO harmony_task (name, added_by, posted_time)
|
||||||
VALUES ($1, $2, CURRENT_TIMESTAMP) `, h.Name, h.TaskEngine.ownerID)
|
VALUES ($1, $2, CURRENT_TIMESTAMP) RETURNING id`, h.Name, h.TaskEngine.ownerID).Scan(&tID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("could not insert into harmonyTask: %w", err)
|
return false, fmt.Errorf("could not insert into harmonyTask: %w", err)
|
||||||
}
|
}
|
||||||
err = tx.QueryRow("SELECT id FROM harmony_task ORDER BY update_time DESC LIMIT 1").Scan(&tID)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("Could not select ID: %v", err)
|
|
||||||
}
|
|
||||||
return extra(tID, tx)
|
return extra(tID, tx)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -51,7 +47,7 @@ retryAddTask:
|
|||||||
retryWait *= 2
|
retryWait *= 2
|
||||||
goto retryAddTask
|
goto retryAddTask
|
||||||
}
|
}
|
||||||
log.Error("Could not add task. AddTasFunc failed: %v", err)
|
log.Errorw("Could not add task. AddTasFunc failed", "error", err, "type", h.Name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -173,6 +169,7 @@ retryRecordCompletion:
|
|||||||
cm, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) {
|
cm, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) {
|
||||||
var postedTime time.Time
|
var postedTime time.Time
|
||||||
err := tx.QueryRow(`SELECT posted_time FROM harmony_task WHERE id=$1`, tID).Scan(&postedTime)
|
err := tx.QueryRow(`SELECT posted_time FROM harmony_task WHERE id=$1`, tID).Scan(&postedTime)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("could not log completion: %w ", err)
|
return false, fmt.Errorf("could not log completion: %w ", err)
|
||||||
}
|
}
|
||||||
@ -218,7 +215,7 @@ retryRecordCompletion:
|
|||||||
}
|
}
|
||||||
_, err = tx.Exec(`INSERT INTO harmony_task_history
|
_, err = tx.Exec(`INSERT INTO harmony_task_history
|
||||||
(task_id, name, posted, work_start, work_end, result, completed_by_host_and_port, err)
|
(task_id, name, posted, work_start, work_end, result, completed_by_host_and_port, err)
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, tID, h.Name, postedTime, workStart, workEnd, done, h.TaskEngine.hostAndPort, result)
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, tID, h.Name, postedTime.UTC(), workStart.UTC(), workEnd.UTC(), done, h.TaskEngine.hostAndPort, result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("could not write history: %w", err)
|
return false, fmt.Errorf("could not write history: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
func getGPUDevices() float64 { // GPU boolean
|
func getGPUDevices() float64 { // GPU boolean
|
||||||
gpus, err := ffi.GetGPUDevices()
|
gpus, err := ffi.GetGPUDevices()
|
||||||
|
logger.Infow("GPUs", "list", gpus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("getting gpu devices failed: %+v", err)
|
logger.Errorf("getting gpu devices failed: %+v", err)
|
||||||
}
|
}
|
||||||
|
@ -9,8 +9,8 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/elastic/go-sysinfo"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/pbnjay/memory"
|
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ func Register(db *harmonydb.DB, hostnameAndPort string) (*Reg, error) {
|
|||||||
if reg.shutdown.Load() {
|
if reg.shutdown.Load() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err := db.Exec(ctx, `UPDATE harmony_machines SET last_contact=CURRENT_TIMESTAMP`)
|
_, err := db.Exec(ctx, `UPDATE harmony_machines SET last_contact=CURRENT_TIMESTAMP where id=$1`, reg.MachineID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Cannot keepalive ", err)
|
logger.Error("Cannot keepalive ", err)
|
||||||
}
|
}
|
||||||
@ -122,9 +122,19 @@ func getResources() (res Resources, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
h, err := sysinfo.Host()
|
||||||
|
if err != nil {
|
||||||
|
return Resources{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mem, err := h.Memory()
|
||||||
|
if err != nil {
|
||||||
|
return Resources{}, err
|
||||||
|
}
|
||||||
|
|
||||||
res = Resources{
|
res = Resources{
|
||||||
Cpu: runtime.NumCPU(),
|
Cpu: runtime.NumCPU(),
|
||||||
Ram: memory.FreeMemory(),
|
Ram: mem.Available,
|
||||||
Gpu: getGPUDevices(),
|
Gpu: getGPUDevices(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,3 +45,9 @@ func (p *Promise[T]) Val(ctx context.Context) T {
|
|||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Promise[T]) IsSet() bool {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
return p.done != nil
|
||||||
|
}
|
||||||
|
@ -2,8 +2,10 @@ package proxy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -69,3 +71,41 @@ func proxy(in interface{}, outstr interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var log = logging.Logger("api_proxy")
|
||||||
|
|
||||||
|
func LoggingAPI[T, P any](a T) *P {
|
||||||
|
var out P
|
||||||
|
logProxy(a, &out)
|
||||||
|
return &out
|
||||||
|
}
|
||||||
|
|
||||||
|
func logProxy(in interface{}, outstr interface{}) {
|
||||||
|
outs := api.GetInternalStructs(outstr)
|
||||||
|
for _, out := range outs {
|
||||||
|
rint := reflect.ValueOf(out).Elem()
|
||||||
|
ra := reflect.ValueOf(in)
|
||||||
|
|
||||||
|
for f := 0; f < rint.NumField(); f++ {
|
||||||
|
field := rint.Type().Field(f)
|
||||||
|
fn := ra.MethodByName(field.Name)
|
||||||
|
|
||||||
|
rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) {
|
||||||
|
var wargs []interface{}
|
||||||
|
wargs = append(wargs, "method", field.Name)
|
||||||
|
|
||||||
|
for i := 1; i < len(args); i++ {
|
||||||
|
wargs = append(wargs, fmt.Sprintf("arg%d", i), args[i].Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
res := fn.Call(args)
|
||||||
|
for i, r := range res {
|
||||||
|
wargs = append(wargs, fmt.Sprintf("ret%d", i), r.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugw("APICALL", wargs...)
|
||||||
|
return res
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -353,6 +353,7 @@ func DefaultLotusProvider() *LotusProviderConfig {
|
|||||||
PreCommitControl: []string{},
|
PreCommitControl: []string{},
|
||||||
CommitControl: []string{},
|
CommitControl: []string{},
|
||||||
TerminateControl: []string{},
|
TerminateControl: []string{},
|
||||||
|
MinerAddresses: []string{},
|
||||||
}},
|
}},
|
||||||
Proving: ProvingConfig{
|
Proving: ProvingConfig{
|
||||||
ParallelCheckLimit: 32,
|
ParallelCheckLimit: 32,
|
||||||
|
@ -987,7 +987,14 @@ block rewards will be missed!`,
|
|||||||
Name: "EnableWindowPost",
|
Name: "EnableWindowPost",
|
||||||
Type: "bool",
|
Type: "bool",
|
||||||
|
|
||||||
Comment: ``,
|
Comment: `EnableWindowPost enables window post to be executed on this lotus-provider instance. Each machine in the cluster
|
||||||
|
with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple
|
||||||
|
machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline,
|
||||||
|
will allow for parallel processing of partitions.
|
||||||
|
|
||||||
|
It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without
|
||||||
|
the need for additional machines. In setups like this it is generally recommended to run
|
||||||
|
partitionsPerDeadline+1 machines.`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "WindowPostMaxTasks",
|
Name: "WindowPostMaxTasks",
|
||||||
@ -999,7 +1006,10 @@ block rewards will be missed!`,
|
|||||||
Name: "EnableWinningPost",
|
Name: "EnableWinningPost",
|
||||||
Type: "bool",
|
Type: "bool",
|
||||||
|
|
||||||
Comment: ``,
|
Comment: `EnableWinningPost enables winning post to be executed on this lotus-provider instance.
|
||||||
|
Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler.
|
||||||
|
It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost
|
||||||
|
documentation.`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "WinningPostMaxTasks",
|
Name: "WinningPostMaxTasks",
|
||||||
@ -1007,11 +1017,125 @@ block rewards will be missed!`,
|
|||||||
|
|
||||||
Comment: ``,
|
Comment: ``,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "EnableSealSDR",
|
||||||
|
Type: "bool",
|
||||||
|
|
||||||
|
Comment: `EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation
|
||||||
|
creating 11 layer files in sector cache directory.
|
||||||
|
|
||||||
|
SDR is the first task in the sealing pipeline. It's inputs are just the hash of the
|
||||||
|
unsealed data (CommD), sector number, miner id, and the seal proof type.
|
||||||
|
It's outputs are the 11 layer files in the sector cache directory.
|
||||||
|
|
||||||
|
In lotus-miner this was run as part of PreCommit1.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SealSDRMaxTasks",
|
||||||
|
Type: "int",
|
||||||
|
|
||||||
|
Comment: `The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
also be bounded by resources available on the machine.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "EnableSealSDRTrees",
|
||||||
|
Type: "bool",
|
||||||
|
|
||||||
|
Comment: `EnableSealSDRTrees enables the SDR pipeline tree-building task to run.
|
||||||
|
This task handles encoding of unsealed data into last sdr layer and building
|
||||||
|
of TreeR, TreeC and TreeD.
|
||||||
|
|
||||||
|
This task runs after SDR
|
||||||
|
TreeD is first computed with optional input of unsealed data
|
||||||
|
TreeR is computed from replica, which is first computed as field
|
||||||
|
addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data)
|
||||||
|
TreeC is computed from the 11 SDR layers
|
||||||
|
The 3 trees will later be used to compute the PoRep proof.
|
||||||
|
|
||||||
|
In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers
|
||||||
|
will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk)
|
||||||
|
then using a small subset of them for the actual PoRep computation. This allows for significant scratch space
|
||||||
|
saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step)
|
||||||
|
|
||||||
|
In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
|
||||||
|
Note that nodes with SDRTrees enabled will also answer to Finalize tasks,
|
||||||
|
which just remove unneeded tree data after PoRep is computed.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SealSDRTreesMaxTasks",
|
||||||
|
Type: "int",
|
||||||
|
|
||||||
|
Comment: `The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
also be bounded by resources available on the machine.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "FinalizeMaxTasks",
|
||||||
|
Type: "int",
|
||||||
|
|
||||||
|
Comment: `FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously.
|
||||||
|
The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever
|
||||||
|
machine holds sector cache files, as it removes unneeded tree data after PoRep is computed.
|
||||||
|
Finalize will run in parallel with the SubmitCommitMsg task.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "EnableSendPrecommitMsg",
|
||||||
|
Type: "bool",
|
||||||
|
|
||||||
|
Comment: `EnableSendPrecommitMsg enables the sending of precommit messages to the chain
|
||||||
|
from this lotus-provider instance.
|
||||||
|
This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "EnablePoRepProof",
|
||||||
|
Type: "bool",
|
||||||
|
|
||||||
|
Comment: `EnablePoRepProof enables the computation of the porep proof
|
||||||
|
|
||||||
|
This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the
|
||||||
|
precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are
|
||||||
|
requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees
|
||||||
|
task.
|
||||||
|
|
||||||
|
In lotus-miner this was Commit1 / Commit2`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "PoRepProofMaxTasks",
|
||||||
|
Type: "int",
|
||||||
|
|
||||||
|
Comment: `The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
also be bounded by resources available on the machine.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "EnableSendCommitMsg",
|
||||||
|
Type: "bool",
|
||||||
|
|
||||||
|
Comment: `EnableSendCommitMsg enables the sending of commit messages to the chain
|
||||||
|
from this lotus-provider instance.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "EnableMoveStorage",
|
||||||
|
Type: "bool",
|
||||||
|
|
||||||
|
Comment: `EnableMoveStorage enables the move-into-long-term-storage task to run on this lotus-provider instance.
|
||||||
|
This tasks should only be enabled on nodes with long-term storage.
|
||||||
|
|
||||||
|
The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the
|
||||||
|
SDRTrees machine into long-term storage. This task runs after the Finalize task.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "MoveStorageMaxTasks",
|
||||||
|
Type: "int",
|
||||||
|
|
||||||
|
Comment: `The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
also be bounded by resources available on the machine. It is recommended that this value is set to a number which
|
||||||
|
uses all available network (or disk) bandwidth on the machine without causing bottlenecks.`,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "EnableWebGui",
|
Name: "EnableWebGui",
|
||||||
Type: "bool",
|
Type: "bool",
|
||||||
|
|
||||||
Comment: ``,
|
Comment: `EnableWebGui enables the web GUI on this lotus-provider instance. The UI has minimal local overhead, but it should
|
||||||
|
only need to be run on a single machine in the cluster.`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "GuiAddress",
|
Name: "GuiAddress",
|
||||||
|
@ -8,12 +8,18 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
func StorageFromFile(path string, def *storiface.StorageConfig) (*storiface.StorageConfig, error) {
|
func StorageFromFile(path string, def *storiface.StorageConfig) (*storiface.StorageConfig, error) {
|
||||||
|
path, err := homedir.Expand(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("expanding storage config path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
switch {
|
switch {
|
||||||
case os.IsNotExist(err):
|
case os.IsNotExist(err):
|
||||||
@ -40,6 +46,11 @@ func StorageFromReader(reader io.Reader) (*storiface.StorageConfig, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func WriteStorageFile(filePath string, config storiface.StorageConfig) error {
|
func WriteStorageFile(filePath string, config storiface.StorageConfig) error {
|
||||||
|
filePath, err := homedir.Expand(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("expanding storage config path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
b, err := json.MarshalIndent(config, "", " ")
|
b, err := json.MarshalIndent(config, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("marshaling storage config: %w", err)
|
return xerrors.Errorf("marshaling storage config: %w", err)
|
||||||
|
@ -93,12 +93,108 @@ type JournalConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ProviderSubsystemsConfig struct {
|
type ProviderSubsystemsConfig struct {
|
||||||
EnableWindowPost bool
|
// EnableWindowPost enables window post to be executed on this lotus-provider instance. Each machine in the cluster
|
||||||
WindowPostMaxTasks int
|
// with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple
|
||||||
|
// machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline,
|
||||||
|
// will allow for parallel processing of partitions.
|
||||||
|
//
|
||||||
|
// It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without
|
||||||
|
// the need for additional machines. In setups like this it is generally recommended to run
|
||||||
|
// partitionsPerDeadline+1 machines.
|
||||||
|
EnableWindowPost bool
|
||||||
|
WindowPostMaxTasks int
|
||||||
|
|
||||||
|
// EnableWinningPost enables winning post to be executed on this lotus-provider instance.
|
||||||
|
// Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler.
|
||||||
|
// It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost
|
||||||
|
// documentation.
|
||||||
EnableWinningPost bool
|
EnableWinningPost bool
|
||||||
WinningPostMaxTasks int
|
WinningPostMaxTasks int
|
||||||
|
|
||||||
|
// EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation
|
||||||
|
// creating 11 layer files in sector cache directory.
|
||||||
|
//
|
||||||
|
// SDR is the first task in the sealing pipeline. It's inputs are just the hash of the
|
||||||
|
// unsealed data (CommD), sector number, miner id, and the seal proof type.
|
||||||
|
// It's outputs are the 11 layer files in the sector cache directory.
|
||||||
|
//
|
||||||
|
// In lotus-miner this was run as part of PreCommit1.
|
||||||
|
EnableSealSDR bool
|
||||||
|
|
||||||
|
// The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
// also be bounded by resources available on the machine.
|
||||||
|
SealSDRMaxTasks int
|
||||||
|
|
||||||
|
// EnableSealSDRTrees enables the SDR pipeline tree-building task to run.
|
||||||
|
// This task handles encoding of unsealed data into last sdr layer and building
|
||||||
|
// of TreeR, TreeC and TreeD.
|
||||||
|
//
|
||||||
|
// This task runs after SDR
|
||||||
|
// TreeD is first computed with optional input of unsealed data
|
||||||
|
// TreeR is computed from replica, which is first computed as field
|
||||||
|
// addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data)
|
||||||
|
// TreeC is computed from the 11 SDR layers
|
||||||
|
// The 3 trees will later be used to compute the PoRep proof.
|
||||||
|
//
|
||||||
|
// In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers
|
||||||
|
// will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk)
|
||||||
|
// then using a small subset of them for the actual PoRep computation. This allows for significant scratch space
|
||||||
|
// saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step)
|
||||||
|
//
|
||||||
|
// In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
|
||||||
|
// Note that nodes with SDRTrees enabled will also answer to Finalize tasks,
|
||||||
|
// which just remove unneeded tree data after PoRep is computed.
|
||||||
|
EnableSealSDRTrees bool
|
||||||
|
|
||||||
|
// The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
// also be bounded by resources available on the machine.
|
||||||
|
SealSDRTreesMaxTasks int
|
||||||
|
|
||||||
|
// FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously.
|
||||||
|
// The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever
|
||||||
|
// machine holds sector cache files, as it removes unneeded tree data after PoRep is computed.
|
||||||
|
// Finalize will run in parallel with the SubmitCommitMsg task.
|
||||||
|
FinalizeMaxTasks int
|
||||||
|
|
||||||
|
// EnableSendPrecommitMsg enables the sending of precommit messages to the chain
|
||||||
|
// from this lotus-provider instance.
|
||||||
|
// This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message
|
||||||
|
EnableSendPrecommitMsg bool
|
||||||
|
|
||||||
|
// EnablePoRepProof enables the computation of the porep proof
|
||||||
|
//
|
||||||
|
// This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the
|
||||||
|
// precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are
|
||||||
|
// requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees
|
||||||
|
// task.
|
||||||
|
//
|
||||||
|
// In lotus-miner this was Commit1 / Commit2
|
||||||
|
EnablePoRepProof bool
|
||||||
|
|
||||||
|
// The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
// also be bounded by resources available on the machine.
|
||||||
|
PoRepProofMaxTasks int
|
||||||
|
|
||||||
|
// EnableSendCommitMsg enables the sending of commit messages to the chain
|
||||||
|
// from this lotus-provider instance.
|
||||||
|
EnableSendCommitMsg bool
|
||||||
|
|
||||||
|
// EnableMoveStorage enables the move-into-long-term-storage task to run on this lotus-provider instance.
|
||||||
|
// This tasks should only be enabled on nodes with long-term storage.
|
||||||
|
//
|
||||||
|
// The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the
|
||||||
|
// SDRTrees machine into long-term storage. This task runs after the Finalize task.
|
||||||
|
EnableMoveStorage bool
|
||||||
|
|
||||||
|
// The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||||
|
// also be bounded by resources available on the machine. It is recommended that this value is set to a number which
|
||||||
|
// uses all available network (or disk) bandwidth on the machine without causing bottlenecks.
|
||||||
|
MoveStorageMaxTasks int
|
||||||
|
|
||||||
|
// EnableWebGui enables the web GUI on this lotus-provider instance. The UI has minimal local overhead, but it should
|
||||||
|
// only need to be run on a single machine in the cluster.
|
||||||
EnableWebGui bool
|
EnableWebGui bool
|
||||||
|
|
||||||
// The address that should listen for Web GUI requests.
|
// The address that should listen for Web GUI requests.
|
||||||
GuiAddress string
|
GuiAddress string
|
||||||
}
|
}
|
||||||
|
@ -820,7 +820,13 @@ func StorageAuth(ctx helpers.MetricsCtx, ca v0api.Common) (sealer.StorageAuth, e
|
|||||||
return sealer.StorageAuth(headers), nil
|
return sealer.StorageAuth(headers), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func StorageAuthWithURL(apiInfo string) func(ctx helpers.MetricsCtx, ca v0api.Common) (sealer.StorageAuth, error) {
|
func StorageAuthWithURL(apiInfo string) interface{} {
|
||||||
|
if strings.HasPrefix(apiInfo, "harmony:") {
|
||||||
|
return func(ctx helpers.MetricsCtx, ca MinerStorageService) (sealer.StorageAuth, error) {
|
||||||
|
return StorageAuth(ctx, ca)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return func(ctx helpers.MetricsCtx, ca v0api.Common) (sealer.StorageAuth, error) {
|
return func(ctx helpers.MetricsCtx, ca v0api.Common) (sealer.StorageAuth, error) {
|
||||||
s := strings.Split(apiInfo, ":")
|
s := strings.Split(apiInfo, ":")
|
||||||
if len(s) != 2 {
|
if len(s) != 2 {
|
||||||
|
@ -2,14 +2,26 @@ package modules
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"go.uber.org/fx"
|
"go.uber.org/fx"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/client"
|
"github.com/filecoin-project/lotus/api/client"
|
||||||
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpmarket"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpmarket/fakelm"
|
||||||
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -18,8 +30,98 @@ type MinerStorageService api.StorageMiner
|
|||||||
|
|
||||||
var _ sectorblocks.SectorBuilder = *new(MinerSealingService)
|
var _ sectorblocks.SectorBuilder = *new(MinerSealingService)
|
||||||
|
|
||||||
func connectMinerService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) {
|
func harmonyApiInfoToConf(apiInfo string) (config.HarmonyDB, error) {
|
||||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) {
|
hc := config.HarmonyDB{}
|
||||||
|
|
||||||
|
// apiInfo - harmony:layer:maddr:user:pass:dbname:host:port
|
||||||
|
|
||||||
|
parts := strings.Split(apiInfo, ":")
|
||||||
|
|
||||||
|
if len(parts) != 8 {
|
||||||
|
return config.HarmonyDB{}, xerrors.Errorf("invalid harmonydb info '%s'", apiInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
hc.Username = parts[3]
|
||||||
|
hc.Password = parts[4]
|
||||||
|
hc.Database = parts[5]
|
||||||
|
hc.Hosts = []string{parts[6]}
|
||||||
|
hc.Port = parts[7]
|
||||||
|
|
||||||
|
return hc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func connectHarmony(apiInfo string, fapi v1api.FullNode, mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) {
|
||||||
|
log.Info("Connecting to harmonydb")
|
||||||
|
|
||||||
|
hc, err := harmonyApiInfoToConf(apiInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := harmonydb.NewFromConfig(hc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("connecting to harmonydb: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.Split(apiInfo, ":")
|
||||||
|
maddr, err := address.NewFromString(parts[2])
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("parsing miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pin := lpmarket.NewPieceIngester(db, fapi)
|
||||||
|
|
||||||
|
si := paths.NewDBIndex(nil, db)
|
||||||
|
|
||||||
|
mid, err := address.IDFromAddress(maddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting miner id: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mi, err := fapi.StateMinerInfo(mctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting miner info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lp := fakelm.NewLMRPCProvider(si, fapi, maddr, abi.ActorID(mid), mi.SectorSize, pin, db, parts[1])
|
||||||
|
|
||||||
|
ast := api.StorageMinerStruct{}
|
||||||
|
|
||||||
|
ast.CommonStruct.Internal.AuthNew = lp.AuthNew
|
||||||
|
|
||||||
|
ast.Internal.ActorAddress = lp.ActorAddress
|
||||||
|
ast.Internal.WorkerJobs = lp.WorkerJobs
|
||||||
|
ast.Internal.SectorsStatus = lp.SectorsStatus
|
||||||
|
ast.Internal.SectorsList = lp.SectorsList
|
||||||
|
ast.Internal.SectorsSummary = lp.SectorsSummary
|
||||||
|
ast.Internal.SectorsListInStates = lp.SectorsListInStates
|
||||||
|
ast.Internal.StorageRedeclareLocal = lp.StorageRedeclareLocal
|
||||||
|
ast.Internal.ComputeDataCid = lp.ComputeDataCid
|
||||||
|
ast.Internal.SectorAddPieceToAny = func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 api.PieceDealInfo) (api.SectorOffset, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
ast.Internal.StorageList = si.StorageList
|
||||||
|
ast.Internal.StorageDetach = si.StorageDetach
|
||||||
|
ast.Internal.StorageReportHealth = si.StorageReportHealth
|
||||||
|
ast.Internal.StorageDeclareSector = si.StorageDeclareSector
|
||||||
|
ast.Internal.StorageDropSector = si.StorageDropSector
|
||||||
|
ast.Internal.StorageFindSector = si.StorageFindSector
|
||||||
|
ast.Internal.StorageInfo = si.StorageInfo
|
||||||
|
ast.Internal.StorageBestAlloc = si.StorageBestAlloc
|
||||||
|
ast.Internal.StorageLock = si.StorageLock
|
||||||
|
ast.Internal.StorageTryLock = si.StorageTryLock
|
||||||
|
ast.Internal.StorageGetLocks = si.StorageGetLocks
|
||||||
|
|
||||||
|
return &ast, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func connectMinerService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, fapi v1api.FullNode) (api.StorageMiner, error) {
|
||||||
|
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, fapi v1api.FullNode) (api.StorageMiner, error) {
|
||||||
|
if strings.HasPrefix(apiInfo, "harmony:") {
|
||||||
|
return connectHarmony(apiInfo, fapi, mctx, lc)
|
||||||
|
}
|
||||||
|
|
||||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||||
info := cliutil.ParseApiInfo(apiInfo)
|
info := cliutil.ParseApiInfo(apiInfo)
|
||||||
addr, err := info.DialArgs("v0")
|
addr, err := info.DialArgs("v0")
|
||||||
@ -55,16 +157,16 @@ func connectMinerService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lif
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ConnectSealingService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) {
|
func ConnectSealingService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, fapi v1api.FullNode) (MinerSealingService, error) {
|
||||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) {
|
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, fapi v1api.FullNode) (MinerSealingService, error) {
|
||||||
log.Info("Connecting sealing service to miner")
|
log.Info("Connecting sealing service to miner")
|
||||||
return connectMinerService(apiInfo)(mctx, lc)
|
return connectMinerService(apiInfo)(mctx, lc, fapi)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ConnectStorageService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) {
|
func ConnectStorageService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, fapi v1api.FullNode) (MinerStorageService, error) {
|
||||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) {
|
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, fapi v1api.FullNode) (MinerStorageService, error) {
|
||||||
log.Info("Connecting storage service to miner")
|
log.Info("Connecting storage service to miner")
|
||||||
return connectMinerService(apiInfo)(mctx, lc)
|
return connectMinerService(apiInfo)(mctx, lc, fapi)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) {
|
|||||||
rootMux := mux.NewRouter()
|
rootMux := mux.NewRouter()
|
||||||
|
|
||||||
// remote storage
|
// remote storage
|
||||||
{
|
if _, realImpl := a.(*impl.StorageMinerAPI); realImpl {
|
||||||
m := mux.NewRouter()
|
m := mux.NewRouter()
|
||||||
m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote(permissioned))
|
m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote(permissioned))
|
||||||
|
|
||||||
|
@ -20,14 +20,12 @@ import (
|
|||||||
//var log = logging.Logger("provider")
|
//var log = logging.Logger("provider")
|
||||||
|
|
||||||
func WindowPostScheduler(ctx context.Context, fc config.LotusProviderFees, pc config.ProvingConfig,
|
func WindowPostScheduler(ctx context.Context, fc config.LotusProviderFees, pc config.ProvingConfig,
|
||||||
api api.FullNode, verif storiface.Verifier, lw *sealer.LocalWorker, sender *lpmessage.Sender,
|
api api.FullNode, verif storiface.Verifier, lw *sealer.LocalWorker, sender *lpmessage.Sender, chainSched *chainsched.ProviderChainSched,
|
||||||
as *multictladdr.MultiAddressSelector, addresses map[dtypes.MinerAddress]bool, db *harmonydb.DB,
|
as *multictladdr.MultiAddressSelector, addresses map[dtypes.MinerAddress]bool, db *harmonydb.DB,
|
||||||
stor paths.Store, idx paths.SectorIndex, max int) (*lpwindow.WdPostTask, *lpwindow.WdPostSubmitTask, *lpwindow.WdPostRecoverDeclareTask, error) {
|
stor paths.Store, idx paths.SectorIndex, max int) (*lpwindow.WdPostTask, *lpwindow.WdPostSubmitTask, *lpwindow.WdPostRecoverDeclareTask, error) {
|
||||||
|
|
||||||
chainSched := chainsched.New(api)
|
|
||||||
|
|
||||||
// todo config
|
// todo config
|
||||||
ft := lpwindow.NewSimpleFaultTracker(stor, idx, 32, 5*time.Second, 300*time.Second)
|
ft := lpwindow.NewSimpleFaultTracker(stor, idx, pc.ParallelCheckLimit, time.Duration(pc.SingleCheckTimeout), time.Duration(pc.PartitionCheckTimeout))
|
||||||
|
|
||||||
computeTask, err := lpwindow.NewWdPostTask(db, api, ft, lw, verif, chainSched, addresses, max)
|
computeTask, err := lpwindow.NewWdPostTask(db, api, ft, lw, verif, chainSched, addresses, max)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -44,7 +42,5 @@ func WindowPostScheduler(ctx context.Context, fc config.LotusProviderFees, pc co
|
|||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
go chainSched.Run(ctx)
|
|
||||||
|
|
||||||
return computeTask, submitTask, recoverTask, nil
|
return computeTask, submitTask, recoverTask, nil
|
||||||
}
|
}
|
||||||
|
@ -66,13 +66,13 @@ func (s *ProviderChainSched) Run(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
gotCur = false
|
gotCur = false
|
||||||
log.Info("restarting window post scheduler")
|
log.Info("restarting chain scheduler")
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case changes, ok := <-notifs:
|
case changes, ok := <-notifs:
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn("window post scheduler notifs channel closed")
|
log.Warn("chain notifs channel closed")
|
||||||
notifs = nil
|
notifs = nil
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -124,7 +124,7 @@ func (s *ProviderChainSched) Run(ctx context.Context) {
|
|||||||
|
|
||||||
func (s *ProviderChainSched) update(ctx context.Context, revert, apply *types.TipSet) {
|
func (s *ProviderChainSched) update(ctx context.Context, revert, apply *types.TipSet) {
|
||||||
if apply == nil {
|
if apply == nil {
|
||||||
log.Error("no new tipset in window post ProviderChainSched.update")
|
log.Error("no new tipset in ProviderChainSched.update")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
421
provider/lpffi/sdr_funcs.go
Normal file
421
provider/lpffi/sdr_funcs.go
Normal file
@ -0,0 +1,421 @@
|
|||||||
|
package lpffi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/KarpelesLab/reflink"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
|
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
proof2 "github.com/filecoin-project/go-state-types/proof"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpproof"
|
||||||
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/proofpaths"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("lpffi")
|
||||||
|
|
||||||
|
/*
|
||||||
|
type ExternPrecommit2 func(ctx context.Context, sector storiface.SectorRef, cache, sealed string, pc1out storiface.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error)
|
||||||
|
|
||||||
|
type ExternalSealer struct {
|
||||||
|
PreCommit2 ExternPrecommit2
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
type SealCalls struct {
|
||||||
|
sectors *storageProvider
|
||||||
|
|
||||||
|
/*// externCalls cointain overrides for calling alternative sealing logic
|
||||||
|
externCalls ExternalSealer*/
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSealCalls(st paths.Store, ls *paths.Local, si paths.SectorIndex) *SealCalls {
|
||||||
|
return &SealCalls{
|
||||||
|
sectors: &storageProvider{
|
||||||
|
storage: st,
|
||||||
|
localStore: ls,
|
||||||
|
sindex: si,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type storageProvider struct {
|
||||||
|
storage paths.Store
|
||||||
|
localStore *paths.Local
|
||||||
|
sindex paths.SectorIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *storageProvider) AcquireSector(ctx context.Context, sector storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||||
|
paths, storageIDs, err := l.storage.AcquireSector(ctx, sector, existing, allocate, sealing, storiface.AcquireMove)
|
||||||
|
if err != nil {
|
||||||
|
return storiface.SectorPaths{}, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
releaseStorage, err := l.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal)
|
||||||
|
if err != nil {
|
||||||
|
return storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths)
|
||||||
|
|
||||||
|
return paths, func() {
|
||||||
|
releaseStorage()
|
||||||
|
|
||||||
|
for _, fileType := range storiface.PathTypes {
|
||||||
|
if fileType&allocate == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sid := storiface.PathByType(storageIDs, fileType)
|
||||||
|
if err := l.sindex.StorageDeclareSector(ctx, storiface.ID(sid), sector.ID, fileType, true); err != nil {
|
||||||
|
log.Errorf("declare sector error: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) GenerateSDR(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, commKcid cid.Cid) error {
|
||||||
|
paths, releaseSector, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTNone, storiface.FTCache, storiface.PathSealing)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("acquiring sector paths: %w", err)
|
||||||
|
}
|
||||||
|
defer releaseSector()
|
||||||
|
|
||||||
|
// prepare SDR params
|
||||||
|
commp, err := commcid.CIDToDataCommitmentV1(commKcid)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("computing commK: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commp)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("computing replica id: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate new sector key
|
||||||
|
err = ffi.GenerateSDR(
|
||||||
|
sector.ProofType,
|
||||||
|
paths.Cache,
|
||||||
|
replicaID,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("generating SDR %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) TreeD(ctx context.Context, sector storiface.SectorRef, size abi.PaddedPieceSize, data io.Reader, unpaddedData bool) (cid.Cid, error) {
|
||||||
|
maybeUns := storiface.FTNone
|
||||||
|
// todo sectors with data
|
||||||
|
|
||||||
|
paths, releaseSector, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, maybeUns, storiface.PathSealing)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||||
|
}
|
||||||
|
defer releaseSector()
|
||||||
|
|
||||||
|
return lpproof.BuildTreeD(data, unpaddedData, filepath.Join(paths.Cache, proofpaths.TreeDName), size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) TreeRC(ctx context.Context, sector storiface.SectorRef, unsealed cid.Cid) (cid.Cid, cid.Cid, error) {
|
||||||
|
p1o, err := sb.makePhase1Out(unsealed, sector.ProofType)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("make phase1 output: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
paths, releaseSector, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, storiface.FTSealed, storiface.PathSealing)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||||
|
}
|
||||||
|
defer releaseSector()
|
||||||
|
|
||||||
|
{
|
||||||
|
// create sector-sized file at paths.Sealed; PC2 transforms it into a sealed sector in-place
|
||||||
|
ssize, err := sector.ProofType.SectorSize()
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("getting sector size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// copy TreeD prefix to sealed sector, SealPreCommitPhase2 will mutate it in place into the sealed sector
|
||||||
|
|
||||||
|
// first try reflink + truncate, that should be way faster
|
||||||
|
err := reflink.Always(filepath.Join(paths.Cache, proofpaths.TreeDName), paths.Sealed)
|
||||||
|
if err == nil {
|
||||||
|
err = os.Truncate(paths.Sealed, int64(ssize))
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("truncating reflinked sealed file: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Errorw("reflink treed -> sealed failed, falling back to slow copy, use single scratch btrfs or xfs filesystem", "error", err, "sector", sector, "cache", paths.Cache, "sealed", paths.Sealed)
|
||||||
|
|
||||||
|
// fallback to slow copy, copy ssize bytes from treed to sealed
|
||||||
|
dst, err := os.OpenFile(paths.Sealed, os.O_WRONLY|os.O_CREATE, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("opening sealed sector file: %w", err)
|
||||||
|
}
|
||||||
|
src, err := os.Open(filepath.Join(paths.Cache, proofpaths.TreeDName))
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("opening treed sector file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.CopyN(dst, src, int64(ssize))
|
||||||
|
derr := dst.Close()
|
||||||
|
_ = src.Close()
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("copying treed -> sealed: %w", err)
|
||||||
|
}
|
||||||
|
if derr != nil {
|
||||||
|
return cid.Undef, cid.Undef, xerrors.Errorf("closing sealed file: %w", derr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ffi.SealPreCommitPhase2(p1o, paths.Cache, paths.Sealed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) GenerateSynthPoRep() {
|
||||||
|
panic("todo")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) PoRepSnark(ctx context.Context, sn storiface.SectorRef, sealed, unsealed cid.Cid, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness) ([]byte, error) {
|
||||||
|
vproof, err := sb.sectors.storage.GeneratePoRepVanillaProof(ctx, sn, sealed, unsealed, ticket, seed)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to generate vanilla proof: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
proof, err := ffi.SealCommitPhase2(vproof, sn.ID.Number, sn.ID.Miner)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("computing seal proof failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, err := ffi.VerifySeal(proof2.SealVerifyInfo{
|
||||||
|
SealProof: sn.ProofType,
|
||||||
|
SectorID: sn.ID,
|
||||||
|
DealIDs: nil,
|
||||||
|
Randomness: ticket,
|
||||||
|
InteractiveRandomness: seed,
|
||||||
|
Proof: proof,
|
||||||
|
SealedCID: sealed,
|
||||||
|
UnsealedCID: unsealed,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to verify proof: %w", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return nil, xerrors.Errorf("porep failed to validate")
|
||||||
|
}
|
||||||
|
|
||||||
|
return proof, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) makePhase1Out(unsCid cid.Cid, spt abi.RegisteredSealProof) ([]byte, error) {
|
||||||
|
commd, err := commcid.CIDToDataCommitmentV1(unsCid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("make uns cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
RowsToDiscard int `json:"rows_to_discard"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Labels struct {
|
||||||
|
H *string `json:"_h"` // proofs want this..
|
||||||
|
Labels []Config `json:"labels"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var phase1Output struct {
|
||||||
|
CommD [32]byte `json:"comm_d"`
|
||||||
|
Config Config `json:"config"` // TreeD
|
||||||
|
Labels map[string]*Labels `json:"labels"`
|
||||||
|
RegisteredProof string `json:"registered_proof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(phase1Output.CommD[:], commd)
|
||||||
|
|
||||||
|
phase1Output.Config.ID = "tree-d"
|
||||||
|
phase1Output.Config.Path = "/placeholder"
|
||||||
|
phase1Output.Labels = map[string]*Labels{}
|
||||||
|
|
||||||
|
switch spt {
|
||||||
|
case abi.RegisteredSealProof_StackedDrg2KiBV1_1, abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep:
|
||||||
|
phase1Output.Config.RowsToDiscard = 0
|
||||||
|
phase1Output.Config.Size = 127
|
||||||
|
phase1Output.Labels["StackedDrg2KiBV1"] = &Labels{}
|
||||||
|
phase1Output.RegisteredProof = "StackedDrg2KiBV1_1"
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
phase1Output.Labels["StackedDrg2KiBV1"].Labels = append(phase1Output.Labels["StackedDrg2KiBV1"].Labels, Config{
|
||||||
|
ID: fmt.Sprintf("layer-%d", i+1),
|
||||||
|
Path: "/placeholder",
|
||||||
|
RowsToDiscard: 0,
|
||||||
|
Size: 64,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case abi.RegisteredSealProof_StackedDrg512MiBV1_1:
|
||||||
|
phase1Output.Config.RowsToDiscard = 0
|
||||||
|
phase1Output.Config.Size = 33554431
|
||||||
|
phase1Output.Labels["StackedDrg512MiBV1"] = &Labels{}
|
||||||
|
phase1Output.RegisteredProof = "StackedDrg512MiBV1_1"
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
phase1Output.Labels["StackedDrg512MiBV1"].Labels = append(phase1Output.Labels["StackedDrg512MiBV1"].Labels, Config{
|
||||||
|
ID: fmt.Sprintf("layer-%d", i+1),
|
||||||
|
Path: "placeholder",
|
||||||
|
RowsToDiscard: 0,
|
||||||
|
Size: 16777216,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
case abi.RegisteredSealProof_StackedDrg32GiBV1_1:
|
||||||
|
phase1Output.Config.RowsToDiscard = 0
|
||||||
|
phase1Output.Config.Size = 2147483647
|
||||||
|
phase1Output.Labels["StackedDrg32GiBV1"] = &Labels{}
|
||||||
|
phase1Output.RegisteredProof = "StackedDrg32GiBV1_1"
|
||||||
|
|
||||||
|
for i := 0; i < 11; i++ {
|
||||||
|
phase1Output.Labels["StackedDrg32GiBV1"].Labels = append(phase1Output.Labels["StackedDrg32GiBV1"].Labels, Config{
|
||||||
|
ID: fmt.Sprintf("layer-%d", i+1),
|
||||||
|
Path: "/placeholder",
|
||||||
|
RowsToDiscard: 0,
|
||||||
|
Size: 1073741824,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
case abi.RegisteredSealProof_StackedDrg64GiBV1_1:
|
||||||
|
phase1Output.Config.RowsToDiscard = 0
|
||||||
|
phase1Output.Config.Size = 4294967295
|
||||||
|
phase1Output.Labels["StackedDrg64GiBV1"] = &Labels{}
|
||||||
|
phase1Output.RegisteredProof = "StackedDrg64GiBV1_1"
|
||||||
|
|
||||||
|
for i := 0; i < 11; i++ {
|
||||||
|
phase1Output.Labels["StackedDrg64GiBV1"].Labels = append(phase1Output.Labels["StackedDrg64GiBV1"].Labels, Config{
|
||||||
|
ID: fmt.Sprintf("layer-%d", i+1),
|
||||||
|
Path: "/placeholder",
|
||||||
|
RowsToDiscard: 0,
|
||||||
|
Size: 2147483648,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("proof type not handled")
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(phase1Output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) LocalStorage(ctx context.Context) ([]storiface.StoragePath, error) {
|
||||||
|
return sb.sectors.localStore.Local(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) FinalizeSector(ctx context.Context, sector storiface.SectorRef, keepUnsealed bool) error {
|
||||||
|
alloc := storiface.FTNone
|
||||||
|
if keepUnsealed {
|
||||||
|
// note: In lotus-provider we don't write the unsealed file in any of the previous stages, it's only written here from tree-d
|
||||||
|
alloc = storiface.FTUnsealed
|
||||||
|
}
|
||||||
|
|
||||||
|
sectorPaths, releaseSector, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, alloc, storiface.PathSealing)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("acquiring sector paths: %w", err)
|
||||||
|
}
|
||||||
|
defer releaseSector()
|
||||||
|
|
||||||
|
ssize, err := sector.ProofType.SectorSize()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting sector size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if keepUnsealed {
|
||||||
|
// tree-d contains exactly unsealed data in the prefix, so
|
||||||
|
// * we move it to a temp file
|
||||||
|
// * we truncate the temp file to the sector size
|
||||||
|
// * we move the temp file to the unsealed location
|
||||||
|
|
||||||
|
// move tree-d to temp file
|
||||||
|
tempUnsealed := filepath.Join(sectorPaths.Cache, storiface.SectorName(sector.ID))
|
||||||
|
if err := os.Rename(filepath.Join(sectorPaths.Cache, proofpaths.TreeDName), tempUnsealed); err != nil {
|
||||||
|
return xerrors.Errorf("moving tree-d to temp file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// truncate sealed file to sector size
|
||||||
|
if err := os.Truncate(tempUnsealed, int64(ssize)); err != nil {
|
||||||
|
return xerrors.Errorf("truncating unsealed file to sector size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// move temp file to unsealed location
|
||||||
|
if err := paths.Move(tempUnsealed, sectorPaths.Unsealed); err != nil {
|
||||||
|
return xerrors.Errorf("move temp unsealed sector to final location (%s -> %s): %w", tempUnsealed, sectorPaths.Unsealed, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ffi.ClearCache(uint64(ssize), sectorPaths.Cache); err != nil {
|
||||||
|
return xerrors.Errorf("clearing cache: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) MoveStorage(ctx context.Context, sector storiface.SectorRef) error {
|
||||||
|
// only move the unsealed file if it still exists and needs moving
|
||||||
|
moveUnsealed := storiface.FTUnsealed
|
||||||
|
{
|
||||||
|
found, unsealedPathType, err := sb.sectorStorageType(ctx, sector, storiface.FTUnsealed)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("checking cache storage type: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found || unsealedPathType == storiface.PathStorage {
|
||||||
|
moveUnsealed = storiface.FTNone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
toMove := storiface.FTCache | storiface.FTSealed | moveUnsealed
|
||||||
|
|
||||||
|
err := sb.sectors.storage.MoveStorage(ctx, sector, toMove)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("moving storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fileType := range toMove.AllSet() {
|
||||||
|
if err := sb.sectors.storage.RemoveCopies(ctx, sector.ID, fileType); err != nil {
|
||||||
|
return xerrors.Errorf("rm copies (t:%s, s:%v): %w", fileType, sector, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *SealCalls) sectorStorageType(ctx context.Context, sector storiface.SectorRef, ft storiface.SectorFileType) (sectorFound bool, ptype storiface.PathType, err error) {
|
||||||
|
stores, err := sb.sectors.sindex.StorageFindSector(ctx, sector.ID, ft, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return false, "", xerrors.Errorf("finding sector: %w", err)
|
||||||
|
}
|
||||||
|
if len(stores) == 0 {
|
||||||
|
return false, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, store := range stores {
|
||||||
|
if store.CanSeal {
|
||||||
|
return true, storiface.PathSealing, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, storiface.PathStorage, nil
|
||||||
|
}
|
137
provider/lpmarket/deal_ingest.go
Normal file
137
provider/lpmarket/deal_ingest.go
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
package lpmarket
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
"github.com/filecoin-project/go-padreader"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpseal"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Ingester interface {
|
||||||
|
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PieceIngesterApi interface {
|
||||||
|
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
|
||||||
|
StateMinerAllocated(ctx context.Context, a address.Address, key types.TipSetKey) (*bitfield.BitField, error)
|
||||||
|
StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PieceIngester struct {
|
||||||
|
db *harmonydb.DB
|
||||||
|
api PieceIngesterApi
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPieceIngester(db *harmonydb.DB, api PieceIngesterApi) *PieceIngester {
|
||||||
|
return &PieceIngester{db: db, api: api}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
|
||||||
|
mi, err := p.api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorOffset{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if piece.DealProposal.PieceSize != abi.PaddedPieceSize(mi.SectorSize) {
|
||||||
|
return api.SectorOffset{}, xerrors.Errorf("only full sector pieces supported for now")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check raw size
|
||||||
|
if piece.DealProposal.PieceSize != padreader.PaddedSize(uint64(rawSize)).Padded() {
|
||||||
|
return api.SectorOffset{}, xerrors.Errorf("raw size doesn't match padded piece size")
|
||||||
|
}
|
||||||
|
|
||||||
|
// add initial piece + to a sector
|
||||||
|
nv, err := p.api.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorOffset{}, xerrors.Errorf("getting network version: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
synth := false // todo synthetic porep config
|
||||||
|
spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType, synth)
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorOffset{}, xerrors.Errorf("getting seal proof type: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mid, err := address.IDFromAddress(maddr)
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorOffset{}, xerrors.Errorf("getting miner ID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
num, err := lpseal.AllocateSectorNumbers(ctx, p.api, p.db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) {
|
||||||
|
if len(numbers) != 1 {
|
||||||
|
return false, xerrors.Errorf("expected one sector number")
|
||||||
|
}
|
||||||
|
n := numbers[0]
|
||||||
|
|
||||||
|
_, err := tx.Exec("INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3)", mid, n, spt)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dataHdrJson, err := json.Marshal(header)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("json.Marshal(header): %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dealProposalJson, err := json.Marshal(piece.DealProposal)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("json.Marshal(piece.DealProposal): %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tx.Exec(`INSERT INTO sectors_sdr_initial_pieces (sp_id,
|
||||||
|
sector_number,
|
||||||
|
piece_index,
|
||||||
|
|
||||||
|
piece_cid,
|
||||||
|
piece_size,
|
||||||
|
|
||||||
|
data_url,
|
||||||
|
data_headers,
|
||||||
|
data_raw_size,
|
||||||
|
data_delete_on_finalize,
|
||||||
|
|
||||||
|
f05_publish_cid,
|
||||||
|
f05_deal_id,
|
||||||
|
f05_deal_proposal,
|
||||||
|
f05_deal_start_epoch,
|
||||||
|
f05_deal_end_epoch) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`,
|
||||||
|
mid, n, 0,
|
||||||
|
piece.DealProposal.PieceCID, piece.DealProposal.PieceSize,
|
||||||
|
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
|
||||||
|
piece.PublishCid, piece.DealID, dealProposalJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("inserting into sectors_sdr_initial_pieces: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorOffset{}, xerrors.Errorf("allocating sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(num) != 1 {
|
||||||
|
return api.SectorOffset{}, xerrors.Errorf("expected one sector number")
|
||||||
|
}
|
||||||
|
|
||||||
|
// After we insert the piece/sector_pipeline entries, the lpseal/poller will take it from here
|
||||||
|
|
||||||
|
return api.SectorOffset{
|
||||||
|
Sector: num[0],
|
||||||
|
Offset: 0,
|
||||||
|
}, nil
|
||||||
|
}
|
33
provider/lpmarket/fakelm/iface.go
Normal file
33
provider/lpmarket/fakelm/iface.go
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
package fakelm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MinimalLMApi is a subset of the LotusMiner API that is exposed by lotus-provider
|
||||||
|
// for consumption by boost
|
||||||
|
type MinimalLMApi interface {
|
||||||
|
ActorAddress(context.Context) (address.Address, error)
|
||||||
|
|
||||||
|
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error)
|
||||||
|
|
||||||
|
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error)
|
||||||
|
|
||||||
|
SectorsList(context.Context) ([]abi.SectorNumber, error)
|
||||||
|
SectorsSummary(ctx context.Context) (map[api.SectorState]int, error)
|
||||||
|
|
||||||
|
SectorsListInStates(context.Context, []api.SectorState) ([]abi.SectorNumber, error)
|
||||||
|
|
||||||
|
StorageRedeclareLocal(context.Context, *storiface.ID, bool) error
|
||||||
|
|
||||||
|
ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error)
|
||||||
|
SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error)
|
||||||
|
}
|
367
provider/lpmarket/fakelm/lmimpl.go
Normal file
367
provider/lpmarket/fakelm/lmimpl.go
Normal file
@ -0,0 +1,367 @@
|
|||||||
|
package fakelm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/BurntSushi/toml"
|
||||||
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpmarket"
|
||||||
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LMRPCProvider struct {
|
||||||
|
si paths.SectorIndex
|
||||||
|
full api.FullNode
|
||||||
|
|
||||||
|
maddr address.Address // lotus-miner RPC is single-actor
|
||||||
|
minerID abi.ActorID
|
||||||
|
|
||||||
|
ssize abi.SectorSize
|
||||||
|
|
||||||
|
pi lpmarket.Ingester
|
||||||
|
db *harmonydb.DB
|
||||||
|
confLayer string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLMRPCProvider(si paths.SectorIndex, full api.FullNode, maddr address.Address, minerID abi.ActorID, ssize abi.SectorSize, pi lpmarket.Ingester, db *harmonydb.DB, confLayer string) *LMRPCProvider {
|
||||||
|
return &LMRPCProvider{
|
||||||
|
si: si,
|
||||||
|
full: full,
|
||||||
|
maddr: maddr,
|
||||||
|
minerID: minerID,
|
||||||
|
ssize: ssize,
|
||||||
|
pi: pi,
|
||||||
|
db: db,
|
||||||
|
confLayer: confLayer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) ActorAddress(ctx context.Context) (address.Address, error) {
|
||||||
|
return l.maddr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) {
|
||||||
|
// correct enough
|
||||||
|
return map[uuid.UUID][]storiface.WorkerJob{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
|
||||||
|
si, err := l.si.StorageFindSector(ctx, abi.SectorID{Miner: l.minerID, Number: sid}, storiface.FTSealed|storiface.FTCache, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ssip []struct {
|
||||||
|
PieceCID *string `db:"piece_cid"`
|
||||||
|
DealID *int64 `db:"f05_deal_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = l.db.Select(ctx, &ssip, "SELECT ssip.piece_cid, ssip.f05_deal_id FROM sectors_sdr_pipeline p LEFT JOIN sectors_sdr_initial_pieces ssip ON p.sp_id = ssip.sp_id AND p.sector_number = ssip.sector_number WHERE p.sp_id = $1 AND p.sector_number = $2", l.minerID, sid)
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var deals []abi.DealID
|
||||||
|
if len(ssip) > 0 {
|
||||||
|
for _, d := range ssip {
|
||||||
|
if d.DealID != nil {
|
||||||
|
deals = append(deals, abi.DealID(*d.DealID))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
osi, err := l.full.StateSectorGetInfo(ctx, l.maddr, sid, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if osi != nil {
|
||||||
|
deals = osi.DealIDs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
spt, err := miner.SealProofTypeFromSectorSize(l.ssize, network.Version20, false) // good enough, just need this for ssize anyways
|
||||||
|
if err != nil {
|
||||||
|
return api.SectorInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(si) == 0 {
|
||||||
|
state := api.SectorState(sealing.UndefinedSectorState)
|
||||||
|
if len(ssip) > 0 {
|
||||||
|
state = api.SectorState(sealing.PreCommit1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return api.SectorInfo{
|
||||||
|
SectorID: sid,
|
||||||
|
State: state,
|
||||||
|
CommD: nil,
|
||||||
|
CommR: nil,
|
||||||
|
Proof: nil,
|
||||||
|
Deals: deals,
|
||||||
|
Pieces: nil,
|
||||||
|
Ticket: api.SealTicket{},
|
||||||
|
Seed: api.SealSeed{},
|
||||||
|
PreCommitMsg: nil,
|
||||||
|
CommitMsg: nil,
|
||||||
|
Retries: 0,
|
||||||
|
ToUpgrade: false,
|
||||||
|
ReplicaUpdateMessage: nil,
|
||||||
|
LastErr: "",
|
||||||
|
Log: nil,
|
||||||
|
SealProof: spt,
|
||||||
|
Activation: 0,
|
||||||
|
Expiration: 0,
|
||||||
|
DealWeight: big.Zero(),
|
||||||
|
VerifiedDealWeight: big.Zero(),
|
||||||
|
InitialPledge: big.Zero(),
|
||||||
|
OnTime: 0,
|
||||||
|
Early: 0,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var state = api.SectorState(sealing.Proving)
|
||||||
|
if !si[0].CanStore {
|
||||||
|
state = api.SectorState(sealing.PreCommit2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// todo improve this with on-chain info
|
||||||
|
return api.SectorInfo{
|
||||||
|
SectorID: sid,
|
||||||
|
State: state,
|
||||||
|
CommD: nil,
|
||||||
|
CommR: nil,
|
||||||
|
Proof: nil,
|
||||||
|
Deals: deals,
|
||||||
|
Pieces: nil,
|
||||||
|
Ticket: api.SealTicket{},
|
||||||
|
Seed: api.SealSeed{},
|
||||||
|
PreCommitMsg: nil,
|
||||||
|
CommitMsg: nil,
|
||||||
|
Retries: 0,
|
||||||
|
ToUpgrade: false,
|
||||||
|
ReplicaUpdateMessage: nil,
|
||||||
|
LastErr: "",
|
||||||
|
Log: nil,
|
||||||
|
|
||||||
|
SealProof: spt,
|
||||||
|
Activation: 0,
|
||||||
|
Expiration: 0,
|
||||||
|
DealWeight: big.Zero(),
|
||||||
|
VerifiedDealWeight: big.Zero(),
|
||||||
|
InitialPledge: big.Zero(),
|
||||||
|
OnTime: 0,
|
||||||
|
Early: 0,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) {
|
||||||
|
decls, err := l.si.StorageList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var out []abi.SectorNumber
|
||||||
|
for _, decl := range decls {
|
||||||
|
for _, s := range decl {
|
||||||
|
if s.Miner != l.minerID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, s.SectorID.Number)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type sectorParts struct {
|
||||||
|
sealed, unsealed, cache bool
|
||||||
|
inStorage bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) SectorsSummary(ctx context.Context) (map[api.SectorState]int, error) {
|
||||||
|
decls, err := l.si.StorageList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
states := map[abi.SectorID]sectorParts{}
|
||||||
|
for si, decll := range decls {
|
||||||
|
sinfo, err := l.si.StorageInfo(ctx, si)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, decl := range decll {
|
||||||
|
if decl.Miner != l.minerID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}]
|
||||||
|
state.sealed = state.sealed || decl.Has(storiface.FTSealed)
|
||||||
|
state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed)
|
||||||
|
state.cache = state.cache || decl.Has(storiface.FTCache)
|
||||||
|
state.inStorage = state.inStorage || sinfo.CanStore
|
||||||
|
states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out := map[api.SectorState]int{}
|
||||||
|
for _, state := range states {
|
||||||
|
switch {
|
||||||
|
case state.sealed && state.inStorage:
|
||||||
|
out[api.SectorState(sealing.Proving)]++
|
||||||
|
default:
|
||||||
|
// not even close to correct, but good enough for now
|
||||||
|
out[api.SectorState(sealing.PreCommit1)]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) SectorsListInStates(ctx context.Context, want []api.SectorState) ([]abi.SectorNumber, error) {
|
||||||
|
decls, err := l.si.StorageList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wantProving, wantPrecommit1 := false, false
|
||||||
|
for _, s := range want {
|
||||||
|
switch s {
|
||||||
|
case api.SectorState(sealing.Proving):
|
||||||
|
wantProving = true
|
||||||
|
case api.SectorState(sealing.PreCommit1):
|
||||||
|
wantPrecommit1 = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
states := map[abi.SectorID]sectorParts{}
|
||||||
|
|
||||||
|
for si, decll := range decls {
|
||||||
|
sinfo, err := l.si.StorageInfo(ctx, si)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, decl := range decll {
|
||||||
|
if decl.Miner != l.minerID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}]
|
||||||
|
state.sealed = state.sealed || decl.Has(storiface.FTSealed)
|
||||||
|
state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed)
|
||||||
|
state.cache = state.cache || decl.Has(storiface.FTCache)
|
||||||
|
state.inStorage = state.inStorage || sinfo.CanStore
|
||||||
|
states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var out []abi.SectorNumber
|
||||||
|
|
||||||
|
for id, state := range states {
|
||||||
|
switch {
|
||||||
|
case state.sealed && state.inStorage:
|
||||||
|
if wantProving {
|
||||||
|
out = append(out, id.Number)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// not even close to correct, but good enough for now
|
||||||
|
if wantPrecommit1 {
|
||||||
|
out = append(out, id.Number)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) StorageRedeclareLocal(ctx context.Context, id *storiface.ID, b bool) error {
|
||||||
|
// so this rescans and redeclares sectors on lotus-miner; whyyy is boost even calling this?
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) IsUnsealed(ctx context.Context, sectorNum abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) {
|
||||||
|
sectorID := abi.SectorID{Miner: l.minerID, Number: sectorNum}
|
||||||
|
|
||||||
|
si, err := l.si.StorageFindSector(ctx, sectorID, storiface.FTUnsealed, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// yes, yes, technically sectors can be partially unsealed, but that is never done in practice
|
||||||
|
// and can't even be easily done with the current implementation
|
||||||
|
return len(si) > 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) {
|
||||||
|
return abi.PieceInfo{}, xerrors.Errorf("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) {
|
||||||
|
if d.DealProposal.PieceSize != abi.PaddedPieceSize(l.ssize) {
|
||||||
|
return api.SectorOffset{}, xerrors.Errorf("only full-sector pieces are supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
return api.SectorOffset{}, xerrors.Errorf("not supported, use AllocatePieceToSector")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece api.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
|
||||||
|
return l.pi.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LMRPCProvider) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) {
|
||||||
|
var cs []struct {
|
||||||
|
Config string
|
||||||
|
}
|
||||||
|
|
||||||
|
err := l.db.Select(ctx, &cs, "select config from harmony_config where title = $1", l.confLayer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cs) == 0 {
|
||||||
|
return nil, xerrors.Errorf("no harmony config found")
|
||||||
|
}
|
||||||
|
|
||||||
|
lp := config.DefaultLotusProvider()
|
||||||
|
if _, err := toml.Decode(cs[0].Config, lp); err != nil {
|
||||||
|
return nil, xerrors.Errorf("decode harmony config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type jwtPayload struct {
|
||||||
|
Allow []auth.Permission
|
||||||
|
}
|
||||||
|
|
||||||
|
p := jwtPayload{
|
||||||
|
Allow: perms,
|
||||||
|
}
|
||||||
|
|
||||||
|
sk, err := base64.StdEncoding.DecodeString(lp.Apis.StorageRPCSecret)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("decode secret: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return jwt.Sign(&p, jwt.NewHS256(sk))
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ MinimalLMApi = &LMRPCProvider{}
|
@ -390,7 +390,7 @@ func (s *Sender) Send(ctx context.Context, msg *types.Message, mss *api.MessageS
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infow("sent message", "cid", sigCid, "task_id", taskAdder, "send_error", sendErr, "poll_loops", pollLoops)
|
log.Infow("sent message", "cid", sigCid, "task_id", sendTaskID, "send_error", sendErr, "poll_loops", pollLoops)
|
||||||
|
|
||||||
return sigCid, sendErr
|
return sigCid, sendErr
|
||||||
}
|
}
|
||||||
|
214
provider/lpmessage/watch.go
Normal file
214
provider/lpmessage/watch.go
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
package lpmessage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/provider/chainsched"
|
||||||
|
)
|
||||||
|
|
||||||
|
const MinConfidence = 6
|
||||||
|
|
||||||
|
type MessageWaiterApi interface {
|
||||||
|
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
|
||||||
|
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||||
|
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
||||||
|
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
|
||||||
|
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type MessageWatcher struct {
|
||||||
|
db *harmonydb.DB
|
||||||
|
ht *harmonytask.TaskEngine
|
||||||
|
api MessageWaiterApi
|
||||||
|
|
||||||
|
stopping, stopped chan struct{}
|
||||||
|
|
||||||
|
updateCh chan struct{}
|
||||||
|
bestTs atomic.Pointer[types.TipSetKey]
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMessageWatcher(db *harmonydb.DB, ht *harmonytask.TaskEngine, pcs *chainsched.ProviderChainSched, api MessageWaiterApi) (*MessageWatcher, error) {
|
||||||
|
mw := &MessageWatcher{
|
||||||
|
db: db,
|
||||||
|
ht: ht,
|
||||||
|
api: api,
|
||||||
|
stopping: make(chan struct{}),
|
||||||
|
stopped: make(chan struct{}),
|
||||||
|
updateCh: make(chan struct{}),
|
||||||
|
}
|
||||||
|
go mw.run()
|
||||||
|
if err := pcs.AddHandler(mw.processHeadChange); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return mw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MessageWatcher) run() {
|
||||||
|
defer close(mw.stopped)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-mw.stopping:
|
||||||
|
// todo cleanup assignments
|
||||||
|
return
|
||||||
|
case <-mw.updateCh:
|
||||||
|
mw.update()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MessageWatcher) update() {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
tsk := *mw.bestTs.Load()
|
||||||
|
|
||||||
|
ts, err := mw.api.ChainGetTipSet(ctx, tsk)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get tipset: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lbts, err := mw.api.ChainGetTipSetByHeight(ctx, ts.Height()-MinConfidence, tsk)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get tipset: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lbtsk := lbts.Key()
|
||||||
|
|
||||||
|
machineID := mw.ht.ResourcesAvailable().MachineID
|
||||||
|
|
||||||
|
// first if we see pending messages with null owner, assign them to ourselves
|
||||||
|
{
|
||||||
|
n, err := mw.db.Exec(ctx, `UPDATE message_waits SET waiter_machine_id = $1 WHERE waiter_machine_id IS NULL AND executed_tsk_cid IS NULL`, machineID)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to assign pending messages: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n > 0 {
|
||||||
|
log.Debugw("assigned pending messages to ourselves", "assigned", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get messages assigned to us
|
||||||
|
var msgs []struct {
|
||||||
|
Cid string `db:"signed_message_cid"`
|
||||||
|
From string `db:"from_key"`
|
||||||
|
Nonce uint64 `db:"nonce"`
|
||||||
|
|
||||||
|
FromAddr address.Address `db:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// really large limit in case of things getting stuck and backlogging severely
|
||||||
|
err = mw.db.Select(ctx, &msgs, `SELECT signed_message_cid, from_key, nonce FROM message_waits
|
||||||
|
JOIN message_sends ON signed_message_cid = signed_cid
|
||||||
|
WHERE waiter_machine_id = $1 LIMIT 10000`, machineID)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get assigned messages: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// get address/nonce set to check
|
||||||
|
toCheck := make(map[address.Address]uint64)
|
||||||
|
|
||||||
|
for i := range msgs {
|
||||||
|
msgs[i].FromAddr, err = address.NewFromString(msgs[i].From)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to parse from address: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
toCheck[msgs[i].FromAddr] = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the nonce for each address
|
||||||
|
for addr := range toCheck {
|
||||||
|
act, err := mw.api.StateGetActor(ctx, addr, lbtsk)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get actor: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
toCheck[addr] = act.Nonce
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if any of the messages we have assigned to us are now on chain, and have been for MinConfidence epochs
|
||||||
|
for _, msg := range msgs {
|
||||||
|
if msg.Nonce > toCheck[msg.FromAddr] {
|
||||||
|
continue // definitely not on chain yet
|
||||||
|
}
|
||||||
|
|
||||||
|
look, err := mw.api.StateSearchMsg(ctx, lbtsk, cid.MustParse(msg.Cid), api.LookbackNoLimit, false)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to search for message: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if look == nil {
|
||||||
|
continue // not on chain yet (or not executed yet)
|
||||||
|
}
|
||||||
|
|
||||||
|
tskCid, err := look.TipSet.Cid()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get tipset cid: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
emsg, err := mw.api.ChainGetMessage(ctx, look.Message)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get message: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
execMsg, err := json.Marshal(emsg)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to marshal message: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// record in db
|
||||||
|
_, err = mw.db.Exec(ctx, `UPDATE message_waits SET
|
||||||
|
waiter_machine_id = NULL,
|
||||||
|
executed_tsk_cid = $1, executed_tsk_epoch = $2,
|
||||||
|
executed_msg_cid = $3, executed_msg_data = $4,
|
||||||
|
executed_rcpt_exitcode = $5, executed_rcpt_return = $6, executed_rcpt_gas_used = $7
|
||||||
|
WHERE signed_message_cid = $8`, tskCid, look.Height,
|
||||||
|
look.Message, execMsg,
|
||||||
|
look.Receipt.ExitCode, look.Receipt.Return, look.Receipt.GasUsed,
|
||||||
|
msg.Cid)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to update message wait: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MessageWatcher) Stop(ctx context.Context) error {
|
||||||
|
close(mw.stopping)
|
||||||
|
select {
|
||||||
|
case <-mw.stopped:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MessageWatcher) processHeadChange(ctx context.Context, revert *types.TipSet, apply *types.TipSet) error {
|
||||||
|
best := apply.Key()
|
||||||
|
mw.bestTs.Store(&best)
|
||||||
|
select {
|
||||||
|
case mw.updateCh <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
283
provider/lpproof/treed_build.go
Normal file
283
provider/lpproof/treed_build.go
Normal file
@ -0,0 +1,283 @@
|
|||||||
|
package lpproof
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"math/bits"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
pool "github.com/libp2p/go-buffer-pool"
|
||||||
|
"github.com/minio/sha256-simd"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/fr32"
|
||||||
|
)
|
||||||
|
|
||||||
|
const nodeSize = 32
|
||||||
|
const threadChunkSize = 1 << 20
|
||||||
|
|
||||||
|
func hashChunk(data [][]byte) {
|
||||||
|
l1Nodes := len(data[0]) / nodeSize / 2
|
||||||
|
|
||||||
|
d := sha256.New()
|
||||||
|
|
||||||
|
sumBuf := make([]byte, nodeSize)
|
||||||
|
|
||||||
|
for i := 0; i < l1Nodes; i++ {
|
||||||
|
levels := bits.TrailingZeros(^uint(i)) + 1
|
||||||
|
|
||||||
|
inNode := i * 2 // at level 0
|
||||||
|
outNode := i
|
||||||
|
|
||||||
|
for l := 0; l < levels; l++ {
|
||||||
|
d.Reset()
|
||||||
|
inNodeData := data[l][inNode*nodeSize : (inNode+2)*nodeSize]
|
||||||
|
d.Write(inNodeData)
|
||||||
|
copy(data[l+1][outNode*nodeSize:(outNode+1)*nodeSize], d.Sum(sumBuf[:0]))
|
||||||
|
// set top bits to 00
|
||||||
|
data[l+1][outNode*nodeSize+nodeSize-1] &= 0x3f
|
||||||
|
|
||||||
|
inNode--
|
||||||
|
inNode >>= 1
|
||||||
|
outNode >>= 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BuildTreeD(data io.Reader, unpaddedData bool, outPath string, size abi.PaddedPieceSize) (_ cid.Cid, err error) {
|
||||||
|
out, err := os.Create(outPath)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
cerr := out.Close()
|
||||||
|
if cerr != nil {
|
||||||
|
err = multierror.Append(err, cerr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
outSize := treeSize(size)
|
||||||
|
|
||||||
|
// allocate space for the tree
|
||||||
|
err = out.Truncate(int64(outSize))
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup buffers
|
||||||
|
maxThreads := int64(size) / threadChunkSize
|
||||||
|
if maxThreads > int64(runtime.NumCPU())*15/10 {
|
||||||
|
maxThreads = int64(runtime.NumCPU()) * 15 / 10
|
||||||
|
}
|
||||||
|
if maxThreads < 1 {
|
||||||
|
maxThreads = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate buffers
|
||||||
|
var bufLk sync.Mutex
|
||||||
|
workerBuffers := make([][][]byte, maxThreads) // [worker][level][levelSize]
|
||||||
|
|
||||||
|
for i := range workerBuffers {
|
||||||
|
workerBuffer := make([][]byte, 1)
|
||||||
|
|
||||||
|
bottomBufSize := int64(threadChunkSize)
|
||||||
|
if bottomBufSize > int64(size) {
|
||||||
|
bottomBufSize = int64(size)
|
||||||
|
}
|
||||||
|
workerBuffer[0] = pool.Get(int(bottomBufSize))
|
||||||
|
|
||||||
|
// append levels until we get to a 32 byte level
|
||||||
|
for len(workerBuffer[len(workerBuffer)-1]) > 32 {
|
||||||
|
newLevel := pool.Get(len(workerBuffer[len(workerBuffer)-1]) / 2)
|
||||||
|
workerBuffer = append(workerBuffer, newLevel)
|
||||||
|
}
|
||||||
|
workerBuffers[i] = workerBuffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepare apex buffer
|
||||||
|
var apexBuf [][]byte
|
||||||
|
{
|
||||||
|
apexBottomSize := uint64(size) / uint64(len(workerBuffers[0][0]))
|
||||||
|
if apexBottomSize == 0 {
|
||||||
|
apexBottomSize = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
apexBuf = make([][]byte, 1)
|
||||||
|
apexBuf[0] = pool.Get(int(apexBottomSize * nodeSize))
|
||||||
|
for len(apexBuf[len(apexBuf)-1]) > 32 {
|
||||||
|
newLevel := pool.Get(len(apexBuf[len(apexBuf)-1]) / 2)
|
||||||
|
apexBuf = append(apexBuf, newLevel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defer free pool buffers
|
||||||
|
defer func() {
|
||||||
|
for _, workerBuffer := range workerBuffers {
|
||||||
|
for _, level := range workerBuffer {
|
||||||
|
pool.Put(level)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, level := range apexBuf {
|
||||||
|
pool.Put(level)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// start processing
|
||||||
|
var processed uint64
|
||||||
|
var workWg sync.WaitGroup
|
||||||
|
var errLock sync.Mutex
|
||||||
|
var oerr error
|
||||||
|
|
||||||
|
for processed < uint64(size) {
|
||||||
|
// get a buffer
|
||||||
|
bufLk.Lock()
|
||||||
|
if len(workerBuffers) == 0 {
|
||||||
|
bufLk.Unlock()
|
||||||
|
time.Sleep(50 * time.Microsecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// pop last
|
||||||
|
workBuffer := workerBuffers[len(workerBuffers)-1]
|
||||||
|
workerBuffers = workerBuffers[:len(workerBuffers)-1]
|
||||||
|
|
||||||
|
bufLk.Unlock()
|
||||||
|
|
||||||
|
// before reading check that we didn't get a write error
|
||||||
|
errLock.Lock()
|
||||||
|
if oerr != nil {
|
||||||
|
errLock.Unlock()
|
||||||
|
return cid.Undef, oerr
|
||||||
|
}
|
||||||
|
errLock.Unlock()
|
||||||
|
|
||||||
|
// read data into the bottom level
|
||||||
|
// note: the bottom level will never be too big; data is power of two
|
||||||
|
// size, and if it's smaller than a single buffer, we only have one
|
||||||
|
// smaller buffer
|
||||||
|
|
||||||
|
processedSize := uint64(len(workBuffer[0]))
|
||||||
|
if unpaddedData {
|
||||||
|
workBuffer[0] = workBuffer[0][:abi.PaddedPieceSize(len(workBuffer[0])).Unpadded()]
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := io.ReadFull(data, workBuffer[0])
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// start processing
|
||||||
|
workWg.Add(1)
|
||||||
|
go func(startOffset uint64) {
|
||||||
|
defer workWg.Done()
|
||||||
|
|
||||||
|
if unpaddedData {
|
||||||
|
paddedBuf := pool.Get(int(abi.UnpaddedPieceSize(len(workBuffer[0])).Padded()))
|
||||||
|
fr32.PadSingle(workBuffer[0], paddedBuf)
|
||||||
|
pool.Put(workBuffer[0])
|
||||||
|
workBuffer[0] = paddedBuf
|
||||||
|
}
|
||||||
|
hashChunk(workBuffer)
|
||||||
|
|
||||||
|
// persist apex
|
||||||
|
{
|
||||||
|
apexHash := workBuffer[len(workBuffer)-1]
|
||||||
|
hashPos := startOffset / uint64(len(workBuffer[0])) * nodeSize
|
||||||
|
|
||||||
|
copy(apexBuf[0][hashPos:hashPos+nodeSize], apexHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// write results
|
||||||
|
offsetInLayer := startOffset
|
||||||
|
for layer, layerData := range workBuffer {
|
||||||
|
|
||||||
|
// layerOff is outSize:bits[most significant bit - layer]
|
||||||
|
layerOff := layerOffset(uint64(size), layer)
|
||||||
|
dataOff := offsetInLayer + layerOff
|
||||||
|
offsetInLayer /= 2
|
||||||
|
|
||||||
|
_, werr := out.WriteAt(layerData, int64(dataOff))
|
||||||
|
if werr != nil {
|
||||||
|
errLock.Lock()
|
||||||
|
oerr = multierror.Append(oerr, werr)
|
||||||
|
errLock.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// return buffer
|
||||||
|
bufLk.Lock()
|
||||||
|
workerBuffers = append(workerBuffers, workBuffer)
|
||||||
|
bufLk.Unlock()
|
||||||
|
}(processed)
|
||||||
|
|
||||||
|
processed += processedSize
|
||||||
|
}
|
||||||
|
|
||||||
|
workWg.Wait()
|
||||||
|
|
||||||
|
if oerr != nil {
|
||||||
|
return cid.Undef, oerr
|
||||||
|
}
|
||||||
|
|
||||||
|
threadLayers := bits.Len(uint(len(workerBuffers[0][0])) / nodeSize)
|
||||||
|
|
||||||
|
if len(apexBuf) > 0 {
|
||||||
|
// hash the apex
|
||||||
|
hashChunk(apexBuf)
|
||||||
|
|
||||||
|
// write apex
|
||||||
|
for apexLayer, layerData := range apexBuf {
|
||||||
|
if apexLayer == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
layer := apexLayer + threadLayers - 1
|
||||||
|
|
||||||
|
layerOff := layerOffset(uint64(size), layer)
|
||||||
|
_, werr := out.WriteAt(layerData, int64(layerOff))
|
||||||
|
if werr != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("write apex: %w", werr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var commp [32]byte
|
||||||
|
copy(commp[:], apexBuf[len(apexBuf)-1])
|
||||||
|
|
||||||
|
commCid, err := commcid.DataCommitmentV1ToCID(commp[:])
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return commCid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func treeSize(data abi.PaddedPieceSize) uint64 {
|
||||||
|
bytesToAlloc := uint64(data)
|
||||||
|
|
||||||
|
// append bytes until we get to nodeSize
|
||||||
|
for todo := bytesToAlloc; todo > nodeSize; todo /= 2 {
|
||||||
|
bytesToAlloc += todo / 2
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytesToAlloc
|
||||||
|
}
|
||||||
|
|
||||||
|
func layerOffset(size uint64, layer int) uint64 {
|
||||||
|
allOnes := uint64(0xffff_ffff_ffff_ffff)
|
||||||
|
|
||||||
|
// get 'layer' bits set to 1
|
||||||
|
layerOnes := allOnes >> uint64(64-layer)
|
||||||
|
|
||||||
|
// shift layerOnes to the left such that the highest bit is at the same position as the highest bit in size (which is power-of-two)
|
||||||
|
sizeBitPos := bits.Len64(size) - 1
|
||||||
|
layerOnes <<= sizeBitPos - (layer - 1)
|
||||||
|
return layerOnes
|
||||||
|
}
|
516
provider/lpproof/treed_build_test.go
Normal file
516
provider/lpproof/treed_build_test.go
Normal file
@ -0,0 +1,516 @@
|
|||||||
|
package lpproof
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
pool "github.com/libp2p/go-buffer-pool"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTreeSize(t *testing.T) {
|
||||||
|
require.Equal(t, uint64(32), treeSize(abi.PaddedPieceSize(32)))
|
||||||
|
require.Equal(t, uint64(64+32), treeSize(abi.PaddedPieceSize(64)))
|
||||||
|
require.Equal(t, uint64(128+64+32), treeSize(abi.PaddedPieceSize(128)))
|
||||||
|
require.Equal(t, uint64(256+128+64+32), treeSize(abi.PaddedPieceSize(256)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTreeLayerOffset(t *testing.T) {
|
||||||
|
require.Equal(t, uint64(0), layerOffset(128, 0))
|
||||||
|
require.Equal(t, uint64(128), layerOffset(128, 1))
|
||||||
|
require.Equal(t, uint64(128+64), layerOffset(128, 2))
|
||||||
|
require.Equal(t, uint64(128+64+32), layerOffset(128, 3))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHashChunk(t *testing.T) {
|
||||||
|
chunk := make([]byte, 64)
|
||||||
|
chunk[0] = 0x01
|
||||||
|
|
||||||
|
out := make([]byte, 32)
|
||||||
|
|
||||||
|
data := [][]byte{chunk, out}
|
||||||
|
hashChunk(data)
|
||||||
|
|
||||||
|
// 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d
|
||||||
|
// d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f
|
||||||
|
expect := []byte{
|
||||||
|
0x16, 0xab, 0xab, 0x34, 0x1f, 0xb7, 0xf3, 0x70,
|
||||||
|
0xe2, 0x7e, 0x4d, 0xad, 0xcf, 0x81, 0x76, 0x6d,
|
||||||
|
0xd0, 0xdf, 0xd0, 0xae, 0x64, 0x46, 0x94, 0x77,
|
||||||
|
0xbb, 0x2c, 0xf6, 0x61, 0x49, 0x38, 0xb2, 0x2f,
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, expect, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHashChunk2L(t *testing.T) {
|
||||||
|
data0 := make([]byte, 128)
|
||||||
|
data0[0] = 0x01
|
||||||
|
|
||||||
|
l1 := make([]byte, 64)
|
||||||
|
l2 := make([]byte, 32)
|
||||||
|
|
||||||
|
data := [][]byte{data0, l1, l2}
|
||||||
|
hashChunk(data)
|
||||||
|
|
||||||
|
// 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d
|
||||||
|
// d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f
|
||||||
|
expectL1Left := []byte{
|
||||||
|
0x16, 0xab, 0xab, 0x34, 0x1f, 0xb7, 0xf3, 0x70,
|
||||||
|
0xe2, 0x7e, 0x4d, 0xad, 0xcf, 0x81, 0x76, 0x6d,
|
||||||
|
0xd0, 0xdf, 0xd0, 0xae, 0x64, 0x46, 0x94, 0x77,
|
||||||
|
0xbb, 0x2c, 0xf6, 0x61, 0x49, 0x38, 0xb2, 0x2f,
|
||||||
|
}
|
||||||
|
|
||||||
|
// f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b
|
||||||
|
// 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b
|
||||||
|
expectL1Rest := []byte{
|
||||||
|
0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30,
|
||||||
|
0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b,
|
||||||
|
0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8,
|
||||||
|
0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x0b,
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, expectL1Left, l1[:32])
|
||||||
|
require.Equal(t, expectL1Rest, l1[32:])
|
||||||
|
|
||||||
|
// 0d d6 da e4 1c 2f 75 55 01 29 59 4f b6 44 e4 a8
|
||||||
|
// 42 cf af b3 16 a2 d5 93 21 e3 88 fe 84 a1 ec 2f
|
||||||
|
expectL2 := []byte{
|
||||||
|
0x0d, 0xd6, 0xda, 0xe4, 0x1c, 0x2f, 0x75, 0x55,
|
||||||
|
0x01, 0x29, 0x59, 0x4f, 0xb6, 0x44, 0xe4, 0xa8,
|
||||||
|
0x42, 0xcf, 0xaf, 0xb3, 0x16, 0xa2, 0xd5, 0x93,
|
||||||
|
0x21, 0xe3, 0x88, 0xfe, 0x84, 0xa1, 0xec, 0x2f,
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, expectL2, l2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test2K(t *testing.T) {
|
||||||
|
data := make([]byte, 2048)
|
||||||
|
data[0] = 0x01
|
||||||
|
|
||||||
|
tempFile := filepath.Join(t.TempDir(), "tree.dat")
|
||||||
|
|
||||||
|
commd, err := BuildTreeD(bytes.NewReader(data), false, tempFile, 2048)
|
||||||
|
require.NoError(t, err)
|
||||||
|
fmt.Println(commd)
|
||||||
|
|
||||||
|
// dump tree.dat
|
||||||
|
dat, err := os.ReadFile(tempFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for i, b := range dat {
|
||||||
|
// 32 values per line
|
||||||
|
if i%32 == 0 {
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
// line offset hexdump style
|
||||||
|
fmt.Printf("%04x: ", i)
|
||||||
|
}
|
||||||
|
fmt.Printf("%02x ", b)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
require.Equal(t, "baga6ea4seaqovgk4kr4eoifujh6jfmdqvw3m6zrvyjqzu6s6abkketui6jjoydi", commd.String())
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
const expectD8M = `00000000: 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
|
||||||
|
00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
|
||||||
|
*
|
||||||
|
00800000: 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f
|
||||||
|
00800020: f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b
|
||||||
|
*
|
||||||
|
00c00000: 0d d6 da e4 1c 2f 75 55 01 29 59 4f b6 44 e4 a8 42 cf af b3 16 a2 d5 93 21 e3 88 fe 84 a1 ec 2f
|
||||||
|
00c00020: 37 31 bb 99 ac 68 9f 66 ee f5 97 3e 4a 94 da 18 8f 4d dc ae 58 07 24 fc 6f 3f d6 0d fd 48 83 33
|
||||||
|
*
|
||||||
|
00e00000: 11 b1 c4 80 05 21 d5 e5 83 4a de b3 70 7c 74 15 9f f3 37 b0 96 16 3c 94 31 16 73 40 e7 b1 17 1d
|
||||||
|
00e00020: 64 2a 60 7e f8 86 b0 04 bf 2c 19 78 46 3a e1 d4 69 3a c0 f4 10 eb 2d 1b 7a 47 fe 20 5e 5e 75 0f
|
||||||
|
*
|
||||||
|
00f00000: ec 69 25 55 9b cc 52 84 0a 22 38 5b 2b 6b 35 b4 50 14 50 04 28 f4 59 fe c1 23 01 0f e7 ef 18 1c
|
||||||
|
00f00020: 57 a2 38 1a 28 65 2b f4 7f 6b ef 7a ca 67 9b e4 ae de 58 71 ab 5c f3 eb 2c 08 11 44 88 cb 85 26
|
||||||
|
*
|
||||||
|
00f80000: 3d d2 eb 19 3e e2 f0 47 34 87 bf 4b 83 aa 3a bd a9 c8 4e fa e5 52 6d 8a fd 61 2d 5d 9e 3d 79 34
|
||||||
|
00f80020: 1f 7a c9 59 55 10 e0 9e a4 1c 46 0b 17 64 30 bb 32 2c d6 fb 41 2e c5 7c b1 7d 98 9a 43 10 37 2f
|
||||||
|
*
|
||||||
|
00fc0000: ea 99 5c 54 78 47 20 b4 49 fc 92 b0 70 ad b6 cf 66 35 c2 61 9a 7a 5e 00 54 a2 4e 88 f2 52 ec 0d
|
||||||
|
00fc0020: fc 7e 92 82 96 e5 16 fa ad e9 86 b2 8f 92 d4 4a 4f 24 b9 35 48 52 23 37 6a 79 90 27 bc 18 f8 33
|
||||||
|
*
|
||||||
|
00fe0000: b9 97 02 8b 06 d7 2e 96 07 86 79 58 e1 5f 8d 07 b7 ae 37 ab 29 ab 3f a9 de fe c9 8e aa 37 6e 28
|
||||||
|
00fe0020: 08 c4 7b 38 ee 13 bc 43 f4 1b 91 5c 0e ed 99 11 a2 60 86 b3 ed 62 40 1b f9 d5 8b 8d 19 df f6 24
|
||||||
|
*
|
||||||
|
00ff0000: a0 c4 4f 7b a4 4c d2 3c 2e bf 75 98 7b e8 98 a5 63 80 73 b2 f9 11 cf ee ce 14 5a 77 58 0c 6c 12
|
||||||
|
00ff0020: b2 e4 7b fb 11 fa cd 94 1f 62 af 5c 75 0f 3e a5 cc 4d f5 17 d5 c4 f1 6d b2 b4 d7 7b ae c1 a3 2f
|
||||||
|
*
|
||||||
|
00ff8000: 89 2d 2b 00 a5 c1 54 10 94 ca 65 de 21 3b bd 45 90 14 15 ed d1 10 17 cd 29 f3 ed 75 73 02 a0 3f
|
||||||
|
00ff8020: f9 22 61 60 c8 f9 27 bf dc c4 18 cd f2 03 49 31 46 00 8e ae fb 7d 02 19 4d 5e 54 81 89 00 51 08
|
||||||
|
*
|
||||||
|
00ffc000: 22 48 54 8b ba a5 8f e2 db 0b 07 18 c1 d7 20 1f ed 64 c7 8d 7d 22 88 36 b2 a1 b2 f9 42 0b ef 3c
|
||||||
|
00ffc020: 2c 1a 96 4b b9 0b 59 eb fe 0f 6d a2 9a d6 5a e3 e4 17 72 4a 8f 7c 11 74 5a 40 ca c1 e5 e7 40 11
|
||||||
|
*
|
||||||
|
00ffe000: 1c 6a 48 08 3e 17 49 90 ef c0 56 ec b1 44 75 1d e2 76 d8 a5 1c 3d 93 d7 4c 81 92 48 ab 78 cc 30
|
||||||
|
00ffe020: fe e3 78 ce f1 64 04 b1 99 ed e0 b1 3e 11 b6 24 ff 9d 78 4f bb ed 87 8d 83 29 7e 79 5e 02 4f 02
|
||||||
|
*
|
||||||
|
00fff000: 0a b4 26 38 1b 72 cd 3b b3 e3 c7 82 18 fe 1f 18 3b 3a 19 db c4 d9 26 94 30 03 cd 01 b6 d1 8d 0b
|
||||||
|
00fff020: 8e 9e 24 03 fa 88 4c f6 23 7f 60 df 25 f8 3e e4 0d ca 9e d8 79 eb 6f 63 52 d1 50 84 f5 ad 0d 3f
|
||||||
|
*
|
||||||
|
00fff800: 16 0d 87 17 1b e7 ae e4 20 a3 54 24 cf df 4f fe a2 fd 7b 94 58 89 58 f3 45 11 57 fc 39 8f 34 26
|
||||||
|
00fff820: 75 2d 96 93 fa 16 75 24 39 54 76 e3 17 a9 85 80 f0 09 47 af b7 a3 05 40 d6 25 a9 29 1c c1 2a 07
|
||||||
|
*
|
||||||
|
00fffc00: 1f 40 60 11 da 08 f8 09 80 63 97 dc 1c 57 b9 87 83 37 5a 59 5d d6 81 42 6c 1e cd d4 3c ab e3 3c
|
||||||
|
00fffc20: 70 22 f6 0f 7e f6 ad fa 17 11 7a 52 61 9e 30 ce a8 2c 68 07 5a df 1c 66 77 86 ec 50 6e ef 2d 19
|
||||||
|
*
|
||||||
|
00fffe00: 51 4e dd 2f 6f 8f 6d fd 54 b0 d1 20 7b b7 06 df 85 c5 a3 19 0e af 38 72 37 20 c5 07 56 67 7f 14
|
||||||
|
00fffe20: d9 98 87 b9 73 57 3a 96 e1 13 93 64 52 36 c1 7b 1f 4c 70 34 d7 23 c7 a9 9f 70 9b b4 da 61 16 2b
|
||||||
|
*
|
||||||
|
00ffff00: 5a 1d 84 74 85 a3 4b 28 08 93 a9 cf b2 8b 54 44 67 12 8b eb c0 22 bd de c1 04 be ca b4 f4 81 31
|
||||||
|
00ffff20: d0 b5 30 db b0 b4 f2 5c 5d 2f 2a 28 df ee 80 8b 53 41 2a 02 93 1f 18 c4 99 f5 a2 54 08 6b 13 26
|
||||||
|
*
|
||||||
|
00ffff80: c5 fb f3 f9 4c c2 2b 3c 51 ad c1 ea af e9 4b a0 9f b2 73 f3 73 d2 10 1f 12 0b 11 c6 85 21 66 2f
|
||||||
|
00ffffa0: 84 c0 42 1b a0 68 5a 01 bf 79 5a 23 44 06 4f e4 24 bd 52 a9 d2 43 77 b3 94 ff 4c 4b 45 68 e8 11
|
||||||
|
00ffffc0: 23 40 4a 88 80 f9 cb c7 20 39 cb 86 14 35 9c 28 34 84 55 70 fe 95 19 0b bd 4d 93 41 42 e8 25 2c
|
||||||
|
`
|
||||||
|
|
||||||
|
func Test8MiB(t *testing.T) {
|
||||||
|
data := make([]byte, 8<<20)
|
||||||
|
data[0] = 0x01
|
||||||
|
|
||||||
|
tempFile := filepath.Join(t.TempDir(), "tree.dat")
|
||||||
|
|
||||||
|
commd, err := BuildTreeD(bytes.NewReader(data), false, tempFile, 8<<20)
|
||||||
|
require.NoError(t, err)
|
||||||
|
fmt.Println(commd)
|
||||||
|
|
||||||
|
// dump tree.dat
|
||||||
|
dat, err := os.ReadFile(tempFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
actualD := hexPrint32LDedup(bytes.NewReader(dat))
|
||||||
|
fmt.Println(actualD)
|
||||||
|
|
||||||
|
require.EqualValues(t, expectD8M, actualD)
|
||||||
|
require.Equal(t, "baga6ea4seaqcgqckrcapts6hea44xbqugwocqneekvyp5fizbo6u3e2biluckla", commd.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test8MiBUnpad(t *testing.T) {
|
||||||
|
data := make([]byte, abi.PaddedPieceSize(8<<20).Unpadded())
|
||||||
|
data[0] = 0x01
|
||||||
|
|
||||||
|
tempFile := filepath.Join(t.TempDir(), "tree.dat")
|
||||||
|
|
||||||
|
commd, err := BuildTreeD(bytes.NewReader(data), true, tempFile, 8<<20)
|
||||||
|
require.NoError(t, err)
|
||||||
|
fmt.Println(commd)
|
||||||
|
|
||||||
|
// dump tree.dat
|
||||||
|
dat, err := os.ReadFile(tempFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
actualD := hexPrint32LDedup(bytes.NewReader(dat))
|
||||||
|
fmt.Println(actualD)
|
||||||
|
|
||||||
|
require.EqualValues(t, expectD8M, actualD)
|
||||||
|
require.Equal(t, "baga6ea4seaqcgqckrcapts6hea44xbqugwocqneekvyp5fizbo6u3e2biluckla", commd.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
/*func Test32Golden(t *testing.T) {
|
||||||
|
datFile, err := os.Open("../../seal/cac/sc-02-data-tree-d.dat")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bufReader := bufio.NewReaderSize(datFile, 1<<20)
|
||||||
|
|
||||||
|
actualD := hexPrint32LDedup(bufReader)
|
||||||
|
fmt.Println(actualD)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
var expect32Null = `00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
|
||||||
|
*
|
||||||
|
800000000: f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b
|
||||||
|
*
|
||||||
|
c00000000: 37 31 bb 99 ac 68 9f 66 ee f5 97 3e 4a 94 da 18 8f 4d dc ae 58 07 24 fc 6f 3f d6 0d fd 48 83 33
|
||||||
|
*
|
||||||
|
e00000000: 64 2a 60 7e f8 86 b0 04 bf 2c 19 78 46 3a e1 d4 69 3a c0 f4 10 eb 2d 1b 7a 47 fe 20 5e 5e 75 0f
|
||||||
|
*
|
||||||
|
f00000000: 57 a2 38 1a 28 65 2b f4 7f 6b ef 7a ca 67 9b e4 ae de 58 71 ab 5c f3 eb 2c 08 11 44 88 cb 85 26
|
||||||
|
*
|
||||||
|
f80000000: 1f 7a c9 59 55 10 e0 9e a4 1c 46 0b 17 64 30 bb 32 2c d6 fb 41 2e c5 7c b1 7d 98 9a 43 10 37 2f
|
||||||
|
*
|
||||||
|
fc0000000: fc 7e 92 82 96 e5 16 fa ad e9 86 b2 8f 92 d4 4a 4f 24 b9 35 48 52 23 37 6a 79 90 27 bc 18 f8 33
|
||||||
|
*
|
||||||
|
fe0000000: 08 c4 7b 38 ee 13 bc 43 f4 1b 91 5c 0e ed 99 11 a2 60 86 b3 ed 62 40 1b f9 d5 8b 8d 19 df f6 24
|
||||||
|
*
|
||||||
|
ff0000000: b2 e4 7b fb 11 fa cd 94 1f 62 af 5c 75 0f 3e a5 cc 4d f5 17 d5 c4 f1 6d b2 b4 d7 7b ae c1 a3 2f
|
||||||
|
*
|
||||||
|
ff8000000: f9 22 61 60 c8 f9 27 bf dc c4 18 cd f2 03 49 31 46 00 8e ae fb 7d 02 19 4d 5e 54 81 89 00 51 08
|
||||||
|
*
|
||||||
|
ffc000000: 2c 1a 96 4b b9 0b 59 eb fe 0f 6d a2 9a d6 5a e3 e4 17 72 4a 8f 7c 11 74 5a 40 ca c1 e5 e7 40 11
|
||||||
|
*
|
||||||
|
ffe000000: fe e3 78 ce f1 64 04 b1 99 ed e0 b1 3e 11 b6 24 ff 9d 78 4f bb ed 87 8d 83 29 7e 79 5e 02 4f 02
|
||||||
|
*
|
||||||
|
fff000000: 8e 9e 24 03 fa 88 4c f6 23 7f 60 df 25 f8 3e e4 0d ca 9e d8 79 eb 6f 63 52 d1 50 84 f5 ad 0d 3f
|
||||||
|
*
|
||||||
|
fff800000: 75 2d 96 93 fa 16 75 24 39 54 76 e3 17 a9 85 80 f0 09 47 af b7 a3 05 40 d6 25 a9 29 1c c1 2a 07
|
||||||
|
*
|
||||||
|
fffc00000: 70 22 f6 0f 7e f6 ad fa 17 11 7a 52 61 9e 30 ce a8 2c 68 07 5a df 1c 66 77 86 ec 50 6e ef 2d 19
|
||||||
|
*
|
||||||
|
fffe00000: d9 98 87 b9 73 57 3a 96 e1 13 93 64 52 36 c1 7b 1f 4c 70 34 d7 23 c7 a9 9f 70 9b b4 da 61 16 2b
|
||||||
|
*
|
||||||
|
ffff00000: d0 b5 30 db b0 b4 f2 5c 5d 2f 2a 28 df ee 80 8b 53 41 2a 02 93 1f 18 c4 99 f5 a2 54 08 6b 13 26
|
||||||
|
*
|
||||||
|
ffff80000: 84 c0 42 1b a0 68 5a 01 bf 79 5a 23 44 06 4f e4 24 bd 52 a9 d2 43 77 b3 94 ff 4c 4b 45 68 e8 11
|
||||||
|
*
|
||||||
|
ffffc0000: 65 f2 9e 5d 98 d2 46 c3 8b 38 8c fc 06 db 1f 6b 02 13 03 c5 a2 89 00 0b dc e8 32 a9 c3 ec 42 1c
|
||||||
|
*
|
||||||
|
ffffe0000: a2 24 75 08 28 58 50 96 5b 7e 33 4b 31 27 b0 c0 42 b1 d0 46 dc 54 40 21 37 62 7c d8 79 9c e1 3a
|
||||||
|
*
|
||||||
|
fffff0000: da fd ab 6d a9 36 44 53 c2 6d 33 72 6b 9f ef e3 43 be 8f 81 64 9e c0 09 aa d3 fa ff 50 61 75 08
|
||||||
|
*
|
||||||
|
fffff8000: d9 41 d5 e0 d6 31 4a 99 5c 33 ff bd 4f be 69 11 8d 73 d4 e5 fd 2c d3 1f 0f 7c 86 eb dd 14 e7 06
|
||||||
|
*
|
||||||
|
fffffc000: 51 4c 43 5c 3d 04 d3 49 a5 36 5f bd 59 ff c7 13 62 91 11 78 59 91 c1 a3 c5 3a f2 20 79 74 1a 2f
|
||||||
|
*
|
||||||
|
fffffe000: ad 06 85 39 69 d3 7d 34 ff 08 e0 9f 56 93 0a 4a d1 9a 89 de f6 0c bf ee 7e 1d 33 81 c1 e7 1c 37
|
||||||
|
*
|
||||||
|
ffffff000: 39 56 0e 7b 13 a9 3b 07 a2 43 fd 27 20 ff a7 cb 3e 1d 2e 50 5a b3 62 9e 79 f4 63 13 51 2c da 06
|
||||||
|
*
|
||||||
|
ffffff800: cc c3 c0 12 f5 b0 5e 81 1a 2b bf dd 0f 68 33 b8 42 75 b4 7b f2 29 c0 05 2a 82 48 4f 3c 1a 5b 3d
|
||||||
|
*
|
||||||
|
ffffffc00: 7d f2 9b 69 77 31 99 e8 f2 b4 0b 77 91 9d 04 85 09 ee d7 68 e2 c7 29 7b 1f 14 37 03 4f c3 c6 2c
|
||||||
|
*
|
||||||
|
ffffffe00: 66 ce 05 a3 66 75 52 cf 45 c0 2b cc 4e 83 92 91 9b de ac 35 de 2f f5 62 71 84 8e 9f 7b 67 51 07
|
||||||
|
*
|
||||||
|
fffffff00: d8 61 02 18 42 5a b5 e9 5b 1c a6 23 9d 29 a2 e4 20 d7 06 a9 6f 37 3e 2f 9c 9a 91 d7 59 d1 9b 01
|
||||||
|
*
|
||||||
|
fffffff80: 6d 36 4b 1e f8 46 44 1a 5a 4a 68 86 23 14 ac c0 a4 6f 01 67 17 e5 34 43 e8 39 ee df 83 c2 85 3c
|
||||||
|
*
|
||||||
|
fffffffc0: 07 7e 5f de 35 c5 0a 93 03 a5 50 09 e3 49 8a 4e be df f3 9c 42 b7 10 b7 30 d8 ec 7a c7 af a6 3e
|
||||||
|
`
|
||||||
|
|
||||||
|
func Test32G(t *testing.T) {
|
||||||
|
if os.Getenv("LOTUS_TEST_LARGE_SECTORS") != "1" {
|
||||||
|
t.Skip("skipping large sector test without env LOTUS_TEST_LARGE_SECTORS=1")
|
||||||
|
}
|
||||||
|
|
||||||
|
data := nullreader.NewNullReader(abi.PaddedPieceSize(32 << 30).Unpadded())
|
||||||
|
|
||||||
|
tempFile := filepath.Join(t.TempDir(), "tree.dat")
|
||||||
|
|
||||||
|
commd, err := BuildTreeD(data, true, tempFile, 32<<30)
|
||||||
|
require.NoError(t, err)
|
||||||
|
fmt.Println(commd)
|
||||||
|
|
||||||
|
// dump tree.dat
|
||||||
|
datFile, err := os.Open(tempFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, datFile.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
actualD := hexPrint32LDedup(bufio.NewReaderSize(datFile, 1<<20))
|
||||||
|
fmt.Println(actualD)
|
||||||
|
|
||||||
|
require.EqualValues(t, expect32Null, actualD)
|
||||||
|
require.Equal(t, "baga6ea4seaqao7s73y24kcutaosvacpdjgfe5pw76ooefnyqw4ynr3d2y6x2mpq", commd.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func hexPrint32LDedup(r io.Reader) string {
|
||||||
|
var prevLine []byte
|
||||||
|
var outStr string
|
||||||
|
var duplicateLine bool
|
||||||
|
buffer := make([]byte, 32)
|
||||||
|
offset := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
n, err := r.Read(buffer)
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// Handle the error according to your application's requirements
|
||||||
|
fmt.Println("Error reading:", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(prevLine) == string(buffer) {
|
||||||
|
// Mark as duplicate and skip processing
|
||||||
|
duplicateLine = true
|
||||||
|
} else {
|
||||||
|
if duplicateLine {
|
||||||
|
// Output a marker for the previous duplicate line
|
||||||
|
outStr += "*\n"
|
||||||
|
duplicateLine = false
|
||||||
|
}
|
||||||
|
// Convert to hex and output
|
||||||
|
outStr += fmt.Sprintf("%08x: %s\n", offset, toHex(buffer))
|
||||||
|
|
||||||
|
// Update prevLine
|
||||||
|
if len(prevLine) != 32 {
|
||||||
|
prevLine = make([]byte, 32)
|
||||||
|
}
|
||||||
|
copy(prevLine, buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
offset += n
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the last line was a duplicate, ensure we mark it
|
||||||
|
if duplicateLine {
|
||||||
|
outStr += "*\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
return outStr
|
||||||
|
}
|
||||||
|
|
||||||
|
func toHex(data []byte) string {
|
||||||
|
var hexStr string
|
||||||
|
for _, b := range data {
|
||||||
|
hexStr += fmt.Sprintf("%02x ", b)
|
||||||
|
}
|
||||||
|
return hexStr
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkHashChunk(b *testing.B) {
|
||||||
|
const benchSize = 1024 * 1024
|
||||||
|
|
||||||
|
// Generate 1 MiB of random data
|
||||||
|
randomData := make([]byte, benchSize)
|
||||||
|
if _, err := rand.Read(randomData); err != nil {
|
||||||
|
b.Fatalf("Failed to generate random data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare data structure for hashChunk
|
||||||
|
data := make([][]byte, 1)
|
||||||
|
data[0] = randomData
|
||||||
|
|
||||||
|
// append levels until we get to a 32 byte level
|
||||||
|
for len(data[len(data)-1]) > 32 {
|
||||||
|
newLevel := make([]byte, len(data[len(data)-1])/2)
|
||||||
|
data = append(data, newLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.SetBytes(benchSize) // Set the number of bytes for the benchmark
|
||||||
|
|
||||||
|
b.ResetTimer() // Start the timer after setup
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
hashChunk(data)
|
||||||
|
// Use the result in some way to avoid compiler optimization
|
||||||
|
_ = data[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBuildTreeD512M(b *testing.B) {
|
||||||
|
const dataSize = 512 * 1024 * 1024 // 512 MiB
|
||||||
|
|
||||||
|
// Generate 512 MiB of random data
|
||||||
|
data := make([]byte, dataSize)
|
||||||
|
if _, err := rand.Read(data); err != nil {
|
||||||
|
b.Fatalf("Failed to generate random data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// preallocate NumCPU+1 1MiB/512k/256k/...
|
||||||
|
// with Pool.Get / Pool.Put, so that they are in the pool
|
||||||
|
{
|
||||||
|
nc := runtime.NumCPU()
|
||||||
|
bufs := [][]byte{}
|
||||||
|
for i := 0; i < nc+1; i++ {
|
||||||
|
for sz := 1 << 20; sz > 32; sz >>= 1 {
|
||||||
|
b := pool.Get(sz)
|
||||||
|
bufs = append(bufs, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, b := range bufs {
|
||||||
|
pool.Put(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*if b.N == 1 {
|
||||||
|
b.N = 10
|
||||||
|
}*/
|
||||||
|
|
||||||
|
b.SetBytes(int64(dataSize)) // Set the number of bytes for the benchmark
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
// Create a temporary file for each iteration
|
||||||
|
tempFile, err := os.CreateTemp("", "tree.dat")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to create temporary file: %v", err)
|
||||||
|
}
|
||||||
|
tempFilePath := tempFile.Name()
|
||||||
|
err = tempFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to close temporary file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.StartTimer() // Start the timer for the BuildTreeD operation
|
||||||
|
|
||||||
|
_, err = BuildTreeD(bytes.NewReader(data), false, tempFilePath, dataSize)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("BuildTreeD failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.StopTimer() // Stop the timer after BuildTreeD completes
|
||||||
|
|
||||||
|
// Clean up the temporary file
|
||||||
|
err = os.Remove(tempFilePath)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to remove temporary file: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLayerOffset(t *testing.T) {
|
||||||
|
{
|
||||||
|
size := uint64(2048)
|
||||||
|
|
||||||
|
require.Equal(t, uint64(0), layerOffset(size, 0))
|
||||||
|
require.Equal(t, size, layerOffset(size, 1))
|
||||||
|
require.Equal(t, size+(size/2), layerOffset(size, 2))
|
||||||
|
require.Equal(t, size+(size/2)+(size/4), layerOffset(size, 3))
|
||||||
|
require.Equal(t, size+(size/2)+(size/4)+(size/8), layerOffset(size, 4))
|
||||||
|
require.Equal(t, size+(size/2)+(size/4)+(size/8)+(size/16), layerOffset(size, 5))
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
size := uint64(32 << 30)
|
||||||
|
maxLayers := 30
|
||||||
|
|
||||||
|
for i := 0; i <= maxLayers; i++ {
|
||||||
|
var expect uint64
|
||||||
|
for j := 0; j < i; j++ {
|
||||||
|
expect += size >> uint64(j)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("layer %d: %d\n", i, expect)
|
||||||
|
require.Equal(t, expect, layerOffset(size, i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
size := uint64(64 << 30)
|
||||||
|
maxLayers := 31
|
||||||
|
|
||||||
|
for i := 0; i <= maxLayers; i++ {
|
||||||
|
var expect uint64
|
||||||
|
for j := 0; j < i; j++ {
|
||||||
|
expect += size >> uint64(j)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("layer %d: %d\n", i, expect)
|
||||||
|
require.Equal(t, expect, layerOffset(size, i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
28
provider/lpseal/README.md
Normal file
28
provider/lpseal/README.md
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# Lotus-Provider Sealer
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The lotus-provider sealer is a collection of harmony tasks and a common poller
|
||||||
|
which implement the sealing functionality of the Filecoin protocol.
|
||||||
|
|
||||||
|
## Pipeline Tasks
|
||||||
|
|
||||||
|
* SDR pipeline
|
||||||
|
* `SDR` - Generate SDR layers
|
||||||
|
* `SDRTrees` - Generate tree files (TreeD, TreeR, TreeC)
|
||||||
|
* `PreCommitSubmit` - Submit precommit message to the network
|
||||||
|
* `PoRep` - Generate PoRep proof
|
||||||
|
* `CommitSubmit` - Submit commit message to the network
|
||||||
|
|
||||||
|
# Poller
|
||||||
|
|
||||||
|
The poller is a background process running on every node which runs any of the
|
||||||
|
SDR pipeline tasks. It periodically checks the state of sectors in the SDR pipeline
|
||||||
|
and schedules any tasks to run which will move the sector along the pipeline.
|
||||||
|
|
||||||
|
# Error Handling
|
||||||
|
|
||||||
|
* Pipeline tasks are expected to always finish successfully as harmonytask tasks.
|
||||||
|
If a sealing task encounters an error, it should mark the sector pipeline entry
|
||||||
|
as failed and exit without erroring. The poller will then figure out a recovery
|
||||||
|
strategy for the sector.
|
285
provider/lpseal/poller.go
Normal file
285
provider/lpseal/poller.go
Normal file
@ -0,0 +1,285 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/lib/promise"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("lpseal")
|
||||||
|
|
||||||
|
const (
|
||||||
|
pollerSDR = iota
|
||||||
|
pollerTrees
|
||||||
|
pollerPrecommitMsg
|
||||||
|
pollerPoRep
|
||||||
|
pollerCommitMsg
|
||||||
|
pollerFinalize
|
||||||
|
pollerMoveStorage
|
||||||
|
|
||||||
|
numPollers
|
||||||
|
)
|
||||||
|
|
||||||
|
const sealPollerInterval = 10 * time.Second
|
||||||
|
const seedEpochConfidence = 3
|
||||||
|
|
||||||
|
type SealPollerAPI interface {
|
||||||
|
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error)
|
||||||
|
StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||||
|
ChainHead(context.Context) (*types.TipSet, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type SealPoller struct {
|
||||||
|
db *harmonydb.DB
|
||||||
|
api SealPollerAPI
|
||||||
|
|
||||||
|
pollers [numPollers]promise.Promise[harmonytask.AddTaskFunc]
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPoller(db *harmonydb.DB, api SealPollerAPI) *SealPoller {
|
||||||
|
return &SealPoller{
|
||||||
|
db: db,
|
||||||
|
api: api,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) RunPoller(ctx context.Context) {
|
||||||
|
ticker := time.NewTicker(sealPollerInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
if err := s.poll(ctx); err != nil {
|
||||||
|
log.Errorw("polling failed", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NOTE: TaskIDs are ONLY set while the tasks are executing or waiting to execute.
|
||||||
|
This means that there are ~4 states each task can be in:
|
||||||
|
* Not run, and dependencies not solved (dependencies are 'After' fields of previous stages), task is null, After is false
|
||||||
|
* Not run, and dependencies solved, task is null, After is false
|
||||||
|
* Running or queued, task is set, After is false
|
||||||
|
* Finished, task is null, After is true
|
||||||
|
*/
|
||||||
|
|
||||||
|
type pollTask struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
|
||||||
|
TaskSDR *int64 `db:"task_id_sdr"`
|
||||||
|
AfterSDR bool `db:"after_sdr"`
|
||||||
|
|
||||||
|
TaskTreeD *int64 `db:"task_id_tree_d"`
|
||||||
|
AfterTreeD bool `db:"after_tree_d"`
|
||||||
|
|
||||||
|
TaskTreeC *int64 `db:"task_id_tree_c"`
|
||||||
|
AfterTreeC bool `db:"after_tree_c"`
|
||||||
|
|
||||||
|
TaskTreeR *int64 `db:"task_id_tree_r"`
|
||||||
|
AfterTreeR bool `db:"after_tree_r"`
|
||||||
|
|
||||||
|
TaskPrecommitMsg *int64 `db:"task_id_precommit_msg"`
|
||||||
|
AfterPrecommitMsg bool `db:"after_precommit_msg"`
|
||||||
|
|
||||||
|
AfterPrecommitMsgSuccess bool `db:"after_precommit_msg_success"`
|
||||||
|
SeedEpoch *int64 `db:"seed_epoch"`
|
||||||
|
|
||||||
|
TaskPoRep *int64 `db:"task_id_porep"`
|
||||||
|
PoRepProof []byte `db:"porep_proof"`
|
||||||
|
AfterPoRep bool `db:"after_porep"`
|
||||||
|
|
||||||
|
TaskFinalize *int64 `db:"task_id_finalize"`
|
||||||
|
AfterFinalize bool `db:"after_finalize"`
|
||||||
|
|
||||||
|
TaskMoveStorage *int64 `db:"task_id_move_storage"`
|
||||||
|
AfterMoveStorage bool `db:"after_move_storage"`
|
||||||
|
|
||||||
|
TaskCommitMsg *int64 `db:"task_id_commit_msg"`
|
||||||
|
AfterCommitMsg bool `db:"after_commit_msg"`
|
||||||
|
|
||||||
|
AfterCommitMsgSuccess bool `db:"after_commit_msg_success"`
|
||||||
|
|
||||||
|
Failed bool `db:"failed"`
|
||||||
|
FailedReason string `db:"failed_reason"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) poll(ctx context.Context) error {
|
||||||
|
var tasks []pollTask
|
||||||
|
|
||||||
|
err := s.db.Select(ctx, &tasks, `SELECT
|
||||||
|
sp_id, sector_number,
|
||||||
|
task_id_sdr, after_sdr,
|
||||||
|
task_id_tree_d, after_tree_d,
|
||||||
|
task_id_tree_c, after_tree_c,
|
||||||
|
task_id_tree_r, after_tree_r,
|
||||||
|
task_id_precommit_msg, after_precommit_msg,
|
||||||
|
after_precommit_msg_success, seed_epoch,
|
||||||
|
task_id_porep, porep_proof, after_porep,
|
||||||
|
task_id_finalize, after_finalize,
|
||||||
|
task_id_move_storage, after_move_storage,
|
||||||
|
task_id_commit_msg, after_commit_msg,
|
||||||
|
after_commit_msg_success,
|
||||||
|
failed, failed_reason
|
||||||
|
FROM sectors_sdr_pipeline WHERE after_commit_msg_success != TRUE OR after_move_storage != TRUE`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, task := range tasks {
|
||||||
|
task := task
|
||||||
|
if task.Failed {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := s.api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting chain head: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.pollStartSDR(ctx, task)
|
||||||
|
s.pollStartSDRTrees(ctx, task)
|
||||||
|
s.pollStartPrecommitMsg(ctx, task)
|
||||||
|
s.mustPoll(s.pollPrecommitMsgLanded(ctx, task))
|
||||||
|
s.pollStartPoRep(ctx, task, ts)
|
||||||
|
s.pollStartFinalize(ctx, task, ts)
|
||||||
|
s.pollStartMoveStorage(ctx, task)
|
||||||
|
s.pollStartCommitMsg(ctx, task)
|
||||||
|
s.mustPoll(s.pollCommitMsgLanded(ctx, task))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollStartSDR(ctx context.Context, task pollTask) {
|
||||||
|
if !task.AfterSDR && task.TaskSDR == nil && s.pollers[pollerSDR].IsSet() {
|
||||||
|
s.pollers[pollerSDR].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
||||||
|
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_sdr = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_sdr IS NULL`, id, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t pollTask) afterSDR() bool {
|
||||||
|
return t.AfterSDR
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollStartSDRTrees(ctx context.Context, task pollTask) {
|
||||||
|
if !task.AfterTreeD && !task.AfterTreeC && !task.AfterTreeR &&
|
||||||
|
task.TaskTreeD == nil && task.TaskTreeC == nil && task.TaskTreeR == nil &&
|
||||||
|
s.pollers[pollerTrees].IsSet() && task.AfterSDR {
|
||||||
|
|
||||||
|
s.pollers[pollerTrees].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
||||||
|
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_tree_d = $1, task_id_tree_c = $1, task_id_tree_r = $1
|
||||||
|
WHERE sp_id = $2 AND sector_number = $3 AND after_sdr = TRUE AND task_id_tree_d IS NULL AND task_id_tree_c IS NULL AND task_id_tree_r IS NULL`, id, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t pollTask) afterTrees() bool {
|
||||||
|
return t.AfterTreeD && t.AfterTreeC && t.AfterTreeR && t.afterSDR()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t pollTask) afterPrecommitMsg() bool {
|
||||||
|
return t.AfterPrecommitMsg && t.afterTrees()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t pollTask) afterPrecommitMsgSuccess() bool {
|
||||||
|
return t.AfterPrecommitMsgSuccess && t.afterPrecommitMsg()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollStartPoRep(ctx context.Context, task pollTask, ts *types.TipSet) {
|
||||||
|
if s.pollers[pollerPoRep].IsSet() && task.afterPrecommitMsgSuccess() && task.SeedEpoch != nil &&
|
||||||
|
task.TaskPoRep == nil && !task.AfterPoRep &&
|
||||||
|
ts.Height() >= abi.ChainEpoch(*task.SeedEpoch+seedEpochConfidence) {
|
||||||
|
|
||||||
|
s.pollers[pollerPoRep].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
||||||
|
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_porep = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_porep IS NULL`, id, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t pollTask) afterPoRep() bool {
|
||||||
|
return t.AfterPoRep && t.afterPrecommitMsgSuccess()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollStartFinalize(ctx context.Context, task pollTask, ts *types.TipSet) {
|
||||||
|
if s.pollers[pollerFinalize].IsSet() && task.afterPoRep() && !task.AfterFinalize && task.TaskFinalize == nil {
|
||||||
|
s.pollers[pollerFinalize].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
||||||
|
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_finalize = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_finalize IS NULL`, id, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t pollTask) afterFinalize() bool {
|
||||||
|
return t.AfterFinalize && t.afterPoRep()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollStartMoveStorage(ctx context.Context, task pollTask) {
|
||||||
|
if s.pollers[pollerMoveStorage].IsSet() && task.afterFinalize() && !task.AfterMoveStorage && task.TaskMoveStorage == nil {
|
||||||
|
s.pollers[pollerMoveStorage].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
||||||
|
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_move_storage = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_move_storage IS NULL`, id, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) mustPoll(err error) {
|
||||||
|
if err != nil {
|
||||||
|
log.Errorw("poller operation failed", "error", err)
|
||||||
|
}
|
||||||
|
}
|
108
provider/lpseal/poller_commit_msg.go
Normal file
108
provider/lpseal/poller_commit_msg.go
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *SealPoller) pollStartCommitMsg(ctx context.Context, task pollTask) {
|
||||||
|
if task.afterPoRep() && len(task.PoRepProof) > 0 && task.TaskCommitMsg == nil && !task.AfterCommitMsg && s.pollers[pollerCommitMsg].IsSet() {
|
||||||
|
s.pollers[pollerCommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
||||||
|
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_commit_msg = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_commit_msg IS NULL`, id, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollCommitMsgLanded(ctx context.Context, task pollTask) error {
|
||||||
|
if task.AfterCommitMsg && !task.AfterCommitMsgSuccess && s.pollers[pollerCommitMsg].IsSet() {
|
||||||
|
var execResult []dbExecResult
|
||||||
|
|
||||||
|
err := s.db.Select(ctx, &execResult, `SELECT spipeline.precommit_msg_cid, spipeline.commit_msg_cid, executed_tsk_cid, executed_tsk_epoch, executed_msg_cid, executed_rcpt_exitcode, executed_rcpt_gas_used
|
||||||
|
FROM sectors_sdr_pipeline spipeline
|
||||||
|
JOIN message_waits ON spipeline.commit_msg_cid = message_waits.signed_message_cid
|
||||||
|
WHERE sp_id = $1 AND sector_number = $2 AND executed_tsk_epoch IS NOT NULL`, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorw("failed to query message_waits", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(execResult) > 0 {
|
||||||
|
maddr, err := address.NewIDAddress(uint64(task.SpID))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if exitcode.ExitCode(execResult[0].ExecutedRcptExitCode) != exitcode.Ok {
|
||||||
|
return s.pollCommitMsgFail(ctx, task, execResult[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
si, err := s.api.StateSectorGetInfo(ctx, maddr, abi.SectorNumber(task.SectorNumber), types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("get sector info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if si == nil {
|
||||||
|
log.Errorw("todo handle missing sector info (not found after cron)", "sp", task.SpID, "sector", task.SectorNumber, "exec_epoch", execResult[0].ExecutedTskEpoch, "exec_tskcid", execResult[0].ExecutedTskCID, "msg_cid", execResult[0].ExecutedMsgCID)
|
||||||
|
// todo handdle missing sector info (not found after cron)
|
||||||
|
} else {
|
||||||
|
// yay!
|
||||||
|
|
||||||
|
_, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET
|
||||||
|
after_commit_msg_success = TRUE, commit_msg_tsk = $1
|
||||||
|
WHERE sp_id = $2 AND sector_number = $3 AND after_commit_msg_success = FALSE`,
|
||||||
|
execResult[0].ExecutedTskCID, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollCommitMsgFail(ctx context.Context, task pollTask, execResult dbExecResult) error {
|
||||||
|
switch exitcode.ExitCode(execResult.ExecutedRcptExitCode) {
|
||||||
|
case exitcode.SysErrInsufficientFunds:
|
||||||
|
fallthrough
|
||||||
|
case exitcode.SysErrOutOfGas:
|
||||||
|
// just retry
|
||||||
|
return s.pollRetryCommitMsgSend(ctx, task, execResult)
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("commit message failed with exit code %s", exitcode.ExitCode(execResult.ExecutedRcptExitCode))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollRetryCommitMsgSend(ctx context.Context, task pollTask, execResult dbExecResult) error {
|
||||||
|
if execResult.CommitMsgCID == nil {
|
||||||
|
return xerrors.Errorf("commit msg cid was nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// make the pipeline entry seem like precommit send didn't happen, next poll loop will retry
|
||||||
|
|
||||||
|
_, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET
|
||||||
|
commit_msg_cid = NULL, task_id_commit_msg = NULL
|
||||||
|
WHERE commit_msg_cid = $1 AND sp_id = $2 AND sector_number = $3 AND after_commit_msg_success = FALSE`,
|
||||||
|
*execResult.CommitMsgCID, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("update sectors_sdr_pipeline to retry precommit msg send: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
119
provider/lpseal/poller_precommit_msg.go
Normal file
119
provider/lpseal/poller_precommit_msg.go
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *SealPoller) pollStartPrecommitMsg(ctx context.Context, task pollTask) {
|
||||||
|
if task.TaskPrecommitMsg == nil && !task.AfterPrecommitMsg && task.afterTrees() && s.pollers[pollerPrecommitMsg].IsSet() {
|
||||||
|
s.pollers[pollerPrecommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
||||||
|
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_precommit_msg = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_precommit_msg IS NULL AND after_tree_r = TRUE AND after_tree_d = TRUE`, id, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type dbExecResult struct {
|
||||||
|
PrecommitMsgCID *string `db:"precommit_msg_cid"`
|
||||||
|
CommitMsgCID *string `db:"commit_msg_cid"`
|
||||||
|
|
||||||
|
ExecutedTskCID string `db:"executed_tsk_cid"`
|
||||||
|
ExecutedTskEpoch int64 `db:"executed_tsk_epoch"`
|
||||||
|
ExecutedMsgCID string `db:"executed_msg_cid"`
|
||||||
|
|
||||||
|
ExecutedRcptExitCode int64 `db:"executed_rcpt_exitcode"`
|
||||||
|
ExecutedRcptGasUsed int64 `db:"executed_rcpt_gas_used"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollPrecommitMsgLanded(ctx context.Context, task pollTask) error {
|
||||||
|
if task.AfterPrecommitMsg && !task.AfterPrecommitMsgSuccess {
|
||||||
|
var execResult []dbExecResult
|
||||||
|
|
||||||
|
err := s.db.Select(ctx, &execResult, `SELECT spipeline.precommit_msg_cid, spipeline.commit_msg_cid, executed_tsk_cid, executed_tsk_epoch, executed_msg_cid, executed_rcpt_exitcode, executed_rcpt_gas_used
|
||||||
|
FROM sectors_sdr_pipeline spipeline
|
||||||
|
JOIN message_waits ON spipeline.precommit_msg_cid = message_waits.signed_message_cid
|
||||||
|
WHERE sp_id = $1 AND sector_number = $2 AND executed_tsk_epoch IS NOT NULL`, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorw("failed to query message_waits", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(execResult) > 0 {
|
||||||
|
if exitcode.ExitCode(execResult[0].ExecutedRcptExitCode) != exitcode.Ok {
|
||||||
|
return s.pollPrecommitMsgFail(ctx, task, execResult[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
maddr, err := address.NewIDAddress(uint64(task.SpID))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pci, err := s.api.StateSectorPreCommitInfo(ctx, maddr, abi.SectorNumber(task.SectorNumber), types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("get precommit info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pci != nil {
|
||||||
|
randHeight := pci.PreCommitEpoch + policy.GetPreCommitChallengeDelay()
|
||||||
|
|
||||||
|
_, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET
|
||||||
|
seed_epoch = $1, precommit_msg_tsk = $2, after_precommit_msg_success = TRUE
|
||||||
|
WHERE sp_id = $3 AND sector_number = $4 AND seed_epoch IS NULL`,
|
||||||
|
randHeight, execResult[0].ExecutedTskCID, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
|
||||||
|
}
|
||||||
|
} // todo handle missing precommit info (eg expired precommit)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollPrecommitMsgFail(ctx context.Context, task pollTask, execResult dbExecResult) error {
|
||||||
|
switch exitcode.ExitCode(execResult.ExecutedRcptExitCode) {
|
||||||
|
case exitcode.SysErrInsufficientFunds:
|
||||||
|
fallthrough
|
||||||
|
case exitcode.SysErrOutOfGas:
|
||||||
|
// just retry
|
||||||
|
return s.pollRetryPrecommitMsgSend(ctx, task, execResult)
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("precommit message failed with exit code %s", exitcode.ExitCode(execResult.ExecutedRcptExitCode))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) pollRetryPrecommitMsgSend(ctx context.Context, task pollTask, execResult dbExecResult) error {
|
||||||
|
if execResult.PrecommitMsgCID == nil {
|
||||||
|
return xerrors.Errorf("precommit msg cid was nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// make the pipeline entry seem like precommit send didn't happen, next poll loop will retry
|
||||||
|
|
||||||
|
_, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET
|
||||||
|
precommit_msg_cid = NULL, task_id_precommit_msg = NULL
|
||||||
|
WHERE precommit_msg_cid = $1 AND sp_id = $2 AND sector_number = $3 AND after_precommit_msg_success = FALSE`,
|
||||||
|
*execResult.PrecommitMsgCID, task.SpID, task.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("update sectors_sdr_pipeline to retry precommit msg send: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
127
provider/lpseal/sector_num_alloc.go
Normal file
127
provider/lpseal/sector_num_alloc.go
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AllocAPI interface {
|
||||||
|
StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func AllocateSectorNumbers(ctx context.Context, a AllocAPI, db *harmonydb.DB, maddr address.Address, count int, txcb ...func(*harmonydb.Tx, []abi.SectorNumber) (bool, error)) ([]abi.SectorNumber, error) {
|
||||||
|
chainAlloc, err := a.StateMinerAllocated(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting on-chain allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mid, err := address.IDFromAddress(maddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting miner id: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var res []abi.SectorNumber
|
||||||
|
|
||||||
|
comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||||
|
res = nil // reset result in case of retry
|
||||||
|
|
||||||
|
// query from db, if exists unmarsal to bitfield
|
||||||
|
var dbAllocated bitfield.BitField
|
||||||
|
var rawJson []byte
|
||||||
|
|
||||||
|
err = tx.QueryRow("SELECT COALESCE(allocated, '[0]') from sectors_allocated_numbers sa FULL OUTER JOIN (SELECT 1) AS d ON TRUE WHERE sp_id = $1 OR sp_id IS NULL", mid).Scan(&rawJson)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("querying allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rawJson != nil {
|
||||||
|
err = dbAllocated.UnmarshalJSON(rawJson)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := dbAllocated.UnmarshalJSON(rawJson); err != nil {
|
||||||
|
return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
merged, err := bitfield.MergeBitFields(*chainAlloc, dbAllocated)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("merging allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
allAssignable, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{Runs: []rlepluslazy.Run{
|
||||||
|
{
|
||||||
|
Val: true,
|
||||||
|
Len: abi.MaxSectorNumber,
|
||||||
|
},
|
||||||
|
}})
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("creating assignable sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
inverted, err := bitfield.SubtractBitField(allAssignable, merged)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("subtracting allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
toAlloc, err := inverted.Slice(0, uint64(count))
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting slice of allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = toAlloc.ForEach(func(u uint64) error {
|
||||||
|
res = append(res, abi.SectorNumber(u))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("iterating allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
toPersist, err := bitfield.MergeBitFields(merged, toAlloc)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("merging allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawJson, err = toPersist.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("marshaling allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tx.Exec("INSERT INTO sectors_allocated_numbers(sp_id, allocated) VALUES($1, $2) ON CONFLICT(sp_id) DO UPDATE SET allocated = $2", mid, rawJson)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("persisting allocated sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, f := range txcb {
|
||||||
|
commit, err = f(tx, res)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("executing tx callback %d: %w", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !commit {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, harmonydb.OptionRetry())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("allocating sector numbers: %w", err)
|
||||||
|
}
|
||||||
|
if !comm {
|
||||||
|
return nil, xerrors.Errorf("allocating sector numbers: commit failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
152
provider/lpseal/task_finalize.go
Normal file
152
provider/lpseal/task_finalize.go
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpffi"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FinalizeTask struct {
|
||||||
|
max int
|
||||||
|
sp *SealPoller
|
||||||
|
sc *lpffi.SealCalls
|
||||||
|
db *harmonydb.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFinalizeTask(max int, sp *SealPoller, sc *lpffi.SealCalls, db *harmonydb.DB) *FinalizeTask {
|
||||||
|
return &FinalizeTask{
|
||||||
|
max: max,
|
||||||
|
sp: sp,
|
||||||
|
sc: sc,
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FinalizeTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
||||||
|
var tasks []struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
RegSealProof int64 `db:"reg_seal_proof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
err = f.db.Select(ctx, &tasks, `
|
||||||
|
SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_finalize = $1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting task: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tasks) != 1 {
|
||||||
|
return false, xerrors.Errorf("expected one task")
|
||||||
|
}
|
||||||
|
task := tasks[0]
|
||||||
|
|
||||||
|
var keepUnsealed bool
|
||||||
|
|
||||||
|
if err := f.db.QueryRow(ctx, `SELECT COALESCE(BOOL_OR(NOT data_delete_on_finalize), FALSE) FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, task.SpID, task.SectorNumber).Scan(&keepUnsealed); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sector := storiface.SectorRef{
|
||||||
|
ID: abi.SectorID{
|
||||||
|
Miner: abi.ActorID(task.SpID),
|
||||||
|
Number: abi.SectorNumber(task.SectorNumber),
|
||||||
|
},
|
||||||
|
ProofType: abi.RegisteredSealProof(task.RegSealProof),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = f.sc.FinalizeSector(ctx, sector, keepUnsealed)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("finalizing sector: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set after_finalize
|
||||||
|
_, err = f.db.Exec(ctx, `update sectors_sdr_pipeline set after_finalize=true where task_id_finalize=$1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("updating task: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
||||||
|
var tasks []struct {
|
||||||
|
TaskID harmonytask.TaskID `db:"task_id_finalize"`
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
StorageID string `db:"storage_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if 4 != storiface.FTCache {
|
||||||
|
panic("storiface.FTCache != 4")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
indIDs := make([]int64, len(ids))
|
||||||
|
for i, id := range ids {
|
||||||
|
indIDs[i] = int64(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := f.db.Select(ctx, &tasks, `
|
||||||
|
SELECT p.task_id_finalize, p.sp_id, p.sector_number, l.storage_id FROM sectors_sdr_pipeline p
|
||||||
|
INNER JOIN sector_location l ON p.sp_id = l.miner_id AND p.sector_number = l.sector_num
|
||||||
|
WHERE task_id_finalize = ANY ($1) AND l.sector_filetype = 4
|
||||||
|
`, indIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting tasks: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := f.sc.LocalStorage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting local storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
acceptables := map[harmonytask.TaskID]bool{}
|
||||||
|
|
||||||
|
for _, t := range ids {
|
||||||
|
acceptables[t] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range tasks {
|
||||||
|
if _, ok := acceptables[t.TaskID]; !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range ls {
|
||||||
|
if string(l.ID) == t.StorageID {
|
||||||
|
return &t.TaskID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FinalizeTask) TypeDetails() harmonytask.TaskTypeDetails {
|
||||||
|
return harmonytask.TaskTypeDetails{
|
||||||
|
Max: f.max,
|
||||||
|
Name: "Finalize",
|
||||||
|
Cost: resources.Resources{
|
||||||
|
Cpu: 1,
|
||||||
|
Gpu: 0,
|
||||||
|
Ram: 100 << 20,
|
||||||
|
},
|
||||||
|
MaxFailures: 10,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FinalizeTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||||
|
f.sp.pollers[pollerFinalize].Set(taskFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ harmonytask.TaskInterface = &FinalizeTask{}
|
155
provider/lpseal/task_movestorage.go
Normal file
155
provider/lpseal/task_movestorage.go
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpffi"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MoveStorageTask struct {
|
||||||
|
sp *SealPoller
|
||||||
|
sc *lpffi.SealCalls
|
||||||
|
db *harmonydb.DB
|
||||||
|
|
||||||
|
max int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMoveStorageTask(sp *SealPoller, sc *lpffi.SealCalls, db *harmonydb.DB, max int) *MoveStorageTask {
|
||||||
|
return &MoveStorageTask{
|
||||||
|
max: max,
|
||||||
|
sp: sp,
|
||||||
|
sc: sc,
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
||||||
|
var tasks []struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
RegSealProof int64 `db:"reg_seal_proof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
err = m.db.Select(ctx, &tasks, `
|
||||||
|
SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_move_storage = $1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting task: %w", err)
|
||||||
|
}
|
||||||
|
if len(tasks) != 1 {
|
||||||
|
return false, xerrors.Errorf("expected one task")
|
||||||
|
}
|
||||||
|
task := tasks[0]
|
||||||
|
|
||||||
|
sector := storiface.SectorRef{
|
||||||
|
ID: abi.SectorID{
|
||||||
|
Miner: abi.ActorID(task.SpID),
|
||||||
|
Number: abi.SectorNumber(task.SectorNumber),
|
||||||
|
},
|
||||||
|
ProofType: abi.RegisteredSealProof(task.RegSealProof),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = m.sc.MoveStorage(ctx, sector)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("moving storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = m.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET after_move_storage = true WHERE task_id_move_storage = $1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("updating task: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MoveStorageTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
/*
|
||||||
|
|
||||||
|
var tasks []struct {
|
||||||
|
TaskID harmonytask.TaskID `db:"task_id_finalize"`
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
StorageID string `db:"storage_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
indIDs := make([]int64, len(ids))
|
||||||
|
for i, id := range ids {
|
||||||
|
indIDs[i] = int64(id)
|
||||||
|
}
|
||||||
|
err := m.db.Select(ctx, &tasks, `
|
||||||
|
select p.task_id_move_storage, p.sp_id, p.sector_number, l.storage_id from sectors_sdr_pipeline p
|
||||||
|
inner join sector_location l on p.sp_id=l.miner_id and p.sector_number=l.sector_num
|
||||||
|
where task_id_move_storage in ($1) and l.sector_filetype=4`, indIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting tasks: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ls, err := m.sc.LocalStorage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting local storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
acceptables := map[harmonytask.TaskID]bool{}
|
||||||
|
|
||||||
|
for _, t := range ids {
|
||||||
|
acceptables[t] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range tasks {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
todo some smarts
|
||||||
|
* yield a schedule cycle/s if we have moves already in progress
|
||||||
|
*/
|
||||||
|
|
||||||
|
////
|
||||||
|
ls, err := m.sc.LocalStorage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting local storage: %w", err)
|
||||||
|
}
|
||||||
|
var haveStorage bool
|
||||||
|
for _, l := range ls {
|
||||||
|
if l.CanStore {
|
||||||
|
haveStorage = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveStorage {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
id := ids[0]
|
||||||
|
return &id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MoveStorageTask) TypeDetails() harmonytask.TaskTypeDetails {
|
||||||
|
return harmonytask.TaskTypeDetails{
|
||||||
|
Max: m.max,
|
||||||
|
Name: "MoveStorage",
|
||||||
|
Cost: resources.Resources{
|
||||||
|
Cpu: 1,
|
||||||
|
Gpu: 0,
|
||||||
|
Ram: 128 << 20,
|
||||||
|
},
|
||||||
|
MaxFailures: 10,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MoveStorageTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||||
|
m.sp.pollers[pollerMoveStorage].Set(taskFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ harmonytask.TaskInterface = &MoveStorageTask{}
|
164
provider/lpseal/task_porep.go
Normal file
164
provider/lpseal/task_porep.go
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpffi"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PoRepAPI interface {
|
||||||
|
ChainHead(context.Context) (*types.TipSet, error)
|
||||||
|
StateGetRandomnessFromBeacon(context.Context, crypto.DomainSeparationTag, abi.ChainEpoch, []byte, types.TipSetKey) (abi.Randomness, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PoRepTask struct {
|
||||||
|
db *harmonydb.DB
|
||||||
|
api PoRepAPI
|
||||||
|
sp *SealPoller
|
||||||
|
sc *lpffi.SealCalls
|
||||||
|
|
||||||
|
max int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPoRepTask(db *harmonydb.DB, api PoRepAPI, sp *SealPoller, sc *lpffi.SealCalls, maxPoRep int) *PoRepTask {
|
||||||
|
return &PoRepTask{
|
||||||
|
db: db,
|
||||||
|
api: api,
|
||||||
|
sp: sp,
|
||||||
|
sc: sc,
|
||||||
|
max: maxPoRep,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PoRepTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
var sectorParamsArr []struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"`
|
||||||
|
TicketEpoch abi.ChainEpoch `db:"ticket_epoch"`
|
||||||
|
TicketValue []byte `db:"ticket_value"`
|
||||||
|
SeedEpoch abi.ChainEpoch `db:"seed_epoch"`
|
||||||
|
SealedCID string `db:"tree_r_cid"`
|
||||||
|
UnsealedCID string `db:"tree_d_cid"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p.db.Select(ctx, §orParamsArr, `
|
||||||
|
SELECT sp_id, sector_number, reg_seal_proof, ticket_epoch, ticket_value, seed_epoch, tree_r_cid, tree_d_cid
|
||||||
|
FROM sectors_sdr_pipeline
|
||||||
|
WHERE task_id_porep = $1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if len(sectorParamsArr) != 1 {
|
||||||
|
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
|
||||||
|
}
|
||||||
|
sectorParams := sectorParamsArr[0]
|
||||||
|
|
||||||
|
sealed, err := cid.Parse(sectorParams.SealedCID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to parse sealed cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsealed, err := cid.Parse(sectorParams.UnsealedCID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to parse unsealed cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := p.api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to get chain head: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
maddr, err := address.NewIDAddress(uint64(sectorParams.SpID))
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to create miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := maddr.MarshalCBOR(buf); err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to marshal miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rand, err := p.api.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, sectorParams.SeedEpoch, buf.Bytes(), ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to get randomness for computing seal proof: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sr := storiface.SectorRef{
|
||||||
|
ID: abi.SectorID{
|
||||||
|
Miner: abi.ActorID(sectorParams.SpID),
|
||||||
|
Number: abi.SectorNumber(sectorParams.SectorNumber),
|
||||||
|
},
|
||||||
|
ProofType: sectorParams.RegSealProof,
|
||||||
|
}
|
||||||
|
|
||||||
|
// COMPUTE THE PROOF!
|
||||||
|
|
||||||
|
proof, err := p.sc.PoRepSnark(ctx, sr, sealed, unsealed, sectorParams.TicketValue, abi.InteractiveSealRandomness(rand))
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to compute seal proof: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// store success!
|
||||||
|
n, err := p.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
|
||||||
|
SET after_porep = TRUE, seed_value = $3, porep_proof = $4
|
||||||
|
WHERE sp_id = $1 AND sector_number = $2`,
|
||||||
|
sectorParams.SpID, sectorParams.SectorNumber, []byte(rand), proof)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("store sdr success: updated %d rows", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PoRepTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
||||||
|
// todo sort by priority
|
||||||
|
|
||||||
|
id := ids[0]
|
||||||
|
return &id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PoRepTask) TypeDetails() harmonytask.TaskTypeDetails {
|
||||||
|
res := harmonytask.TaskTypeDetails{
|
||||||
|
Max: p.max,
|
||||||
|
Name: "PoRep",
|
||||||
|
Cost: resources.Resources{
|
||||||
|
Cpu: 1,
|
||||||
|
Gpu: 1,
|
||||||
|
Ram: 50 << 30, // todo correct value
|
||||||
|
MachineID: 0,
|
||||||
|
},
|
||||||
|
MaxFailures: 5,
|
||||||
|
Follows: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDevnet {
|
||||||
|
res.Cost.Ram = 1 << 30
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PoRepTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||||
|
p.sp.pollers[pollerPoRep].Set(taskFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ harmonytask.TaskInterface = &PoRepTask{}
|
220
provider/lpseal/task_sdr.go
Normal file
220
provider/lpseal/task_sdr.go
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-commp-utils/nonffi"
|
||||||
|
"github.com/filecoin-project/go-commp-utils/zerocomm"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpffi"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
var isDevnet = build.BlockDelaySecs < 30
|
||||||
|
|
||||||
|
type SDRAPI interface {
|
||||||
|
ChainHead(context.Context) (*types.TipSet, error)
|
||||||
|
StateGetRandomnessFromTickets(context.Context, crypto.DomainSeparationTag, abi.ChainEpoch, []byte, types.TipSetKey) (abi.Randomness, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type SDRTask struct {
|
||||||
|
api SDRAPI
|
||||||
|
db *harmonydb.DB
|
||||||
|
sp *SealPoller
|
||||||
|
|
||||||
|
sc *lpffi.SealCalls
|
||||||
|
|
||||||
|
max int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSDRTask(api SDRAPI, db *harmonydb.DB, sp *SealPoller, sc *lpffi.SealCalls, maxSDR int) *SDRTask {
|
||||||
|
return &SDRTask{
|
||||||
|
api: api,
|
||||||
|
db: db,
|
||||||
|
sp: sp,
|
||||||
|
sc: sc,
|
||||||
|
max: maxSDR,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SDRTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
var sectorParamsArr []struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.db.Select(ctx, §orParamsArr, `
|
||||||
|
SELECT sp_id, sector_number, reg_seal_proof
|
||||||
|
FROM sectors_sdr_pipeline
|
||||||
|
WHERE task_id_sdr = $1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting sector params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sectorParamsArr) != 1 {
|
||||||
|
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
|
||||||
|
}
|
||||||
|
sectorParams := sectorParamsArr[0]
|
||||||
|
|
||||||
|
var pieces []struct {
|
||||||
|
PieceIndex int64 `db:"piece_index"`
|
||||||
|
PieceCID string `db:"piece_cid"`
|
||||||
|
PieceSize int64 `db:"piece_size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.db.Select(ctx, &pieces, `
|
||||||
|
SELECT piece_index, piece_cid, piece_size
|
||||||
|
FROM sectors_sdr_initial_pieces
|
||||||
|
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting pieces: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize, err := sectorParams.RegSealProof.SectorSize()
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting sector size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var commd cid.Cid
|
||||||
|
|
||||||
|
if len(pieces) > 0 {
|
||||||
|
pieceInfos := make([]abi.PieceInfo, len(pieces))
|
||||||
|
for i, p := range pieces {
|
||||||
|
c, err := cid.Parse(p.PieceCID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("parsing piece cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pieceInfos[i] = abi.PieceInfo{
|
||||||
|
Size: abi.PaddedPieceSize(p.PieceSize),
|
||||||
|
PieceCID: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("computing CommD: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
commd = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded())
|
||||||
|
}
|
||||||
|
|
||||||
|
sref := storiface.SectorRef{
|
||||||
|
ID: abi.SectorID{
|
||||||
|
Miner: abi.ActorID(sectorParams.SpID),
|
||||||
|
Number: abi.SectorNumber(sectorParams.SectorNumber),
|
||||||
|
},
|
||||||
|
ProofType: sectorParams.RegSealProof,
|
||||||
|
}
|
||||||
|
|
||||||
|
// get ticket
|
||||||
|
maddr, err := address.NewIDAddress(uint64(sectorParams.SpID))
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FAIL: api may be down
|
||||||
|
// FAIL-RESP: rely on harmony retry
|
||||||
|
ticket, ticketEpoch, err := s.getTicket(ctx, maddr)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting ticket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// do the SDR!!
|
||||||
|
|
||||||
|
// FAIL: storage may not have enough space
|
||||||
|
// FAIL-RESP: rely on harmony retry
|
||||||
|
|
||||||
|
// LATEFAIL: compute error in sdr
|
||||||
|
// LATEFAIL-RESP: Check in Trees task should catch this; Will retry computing
|
||||||
|
// Trees; After one retry, it should return the sector to the
|
||||||
|
// SDR stage; max number of retries should be configurable
|
||||||
|
|
||||||
|
err = s.sc.GenerateSDR(ctx, sref, ticket, commd)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("generating sdr: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// store success!
|
||||||
|
n, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
|
||||||
|
SET after_sdr = true, ticket_epoch = $3, ticket_value = $4
|
||||||
|
WHERE sp_id = $1 AND sector_number = $2`,
|
||||||
|
sectorParams.SpID, sectorParams.SectorNumber, ticketEpoch, []byte(ticket))
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("store sdr success: updated %d rows", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SDRTask) getTicket(ctx context.Context, maddr address.Address) (abi.SealRandomness, abi.ChainEpoch, error) {
|
||||||
|
ts, err := s.api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, xerrors.Errorf("getting chain head: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ticketEpoch := ts.Height() - policy.SealRandomnessLookback
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := maddr.MarshalCBOR(buf); err != nil {
|
||||||
|
return nil, 0, xerrors.Errorf("marshaling miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rand, err := s.api.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_SealRandomness, ticketEpoch, buf.Bytes(), ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, xerrors.Errorf("getting randomness from tickets: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return abi.SealRandomness(rand), ticketEpoch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SDRTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
||||||
|
// todo check storage (reserve too?)
|
||||||
|
|
||||||
|
id := ids[0]
|
||||||
|
return &id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SDRTask) TypeDetails() harmonytask.TaskTypeDetails {
|
||||||
|
res := harmonytask.TaskTypeDetails{
|
||||||
|
Max: s.max,
|
||||||
|
Name: "SDR",
|
||||||
|
Cost: resources.Resources{ // todo offset for prefetch?
|
||||||
|
Cpu: 4, // todo multicore sdr
|
||||||
|
Gpu: 0,
|
||||||
|
Ram: 54 << 30,
|
||||||
|
},
|
||||||
|
MaxFailures: 2,
|
||||||
|
Follows: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDevnet {
|
||||||
|
res.Cost.Ram = 1 << 30
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SDRTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||||
|
s.sp.pollers[pollerSDR].Set(taskFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ harmonytask.TaskInterface = &SDRTask{}
|
178
provider/lpseal/task_submit_commit.go
Normal file
178
provider/lpseal/task_submit_commit.go
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpmessage"
|
||||||
|
"github.com/filecoin-project/lotus/provider/multictladdr"
|
||||||
|
"github.com/filecoin-project/lotus/storage/ctladdr"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SubmitCommitAPI interface {
|
||||||
|
ChainHead(context.Context) (*types.TipSet, error)
|
||||||
|
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
|
||||||
|
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error)
|
||||||
|
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error)
|
||||||
|
ctladdr.NodeApi
|
||||||
|
}
|
||||||
|
|
||||||
|
type SubmitCommitTask struct {
|
||||||
|
sp *SealPoller
|
||||||
|
db *harmonydb.DB
|
||||||
|
api SubmitCommitAPI
|
||||||
|
|
||||||
|
sender *lpmessage.Sender
|
||||||
|
as *multictladdr.MultiAddressSelector
|
||||||
|
|
||||||
|
maxFee types.FIL
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSubmitCommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitCommitAPI, sender *lpmessage.Sender, as *multictladdr.MultiAddressSelector, maxFee types.FIL) *SubmitCommitTask {
|
||||||
|
return &SubmitCommitTask{
|
||||||
|
sp: sp,
|
||||||
|
db: db,
|
||||||
|
api: api,
|
||||||
|
sender: sender,
|
||||||
|
as: as,
|
||||||
|
|
||||||
|
maxFee: maxFee,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SubmitCommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
var sectorParamsArr []struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
Proof []byte `db:"porep_proof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.db.Select(ctx, §orParamsArr, `
|
||||||
|
SELECT sp_id, sector_number, porep_proof
|
||||||
|
FROM sectors_sdr_pipeline
|
||||||
|
WHERE task_id_commit_msg = $1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting sector params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sectorParamsArr) != 1 {
|
||||||
|
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
|
||||||
|
}
|
||||||
|
sectorParams := sectorParamsArr[0]
|
||||||
|
|
||||||
|
maddr, err := address.NewIDAddress(uint64(sectorParams.SpID))
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := miner.ProveCommitSectorParams{
|
||||||
|
SectorNumber: abi.SectorNumber(sectorParams.SectorNumber),
|
||||||
|
Proof: sectorParams.Proof,
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := new(bytes.Buffer)
|
||||||
|
if err := params.MarshalCBOR(enc); err != nil {
|
||||||
|
return false, xerrors.Errorf("could not serialize commit params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := s.api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting chain head: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting miner info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pci, err := s.api.StateSectorPreCommitInfo(ctx, maddr, abi.SectorNumber(sectorParams.SectorNumber), ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting precommit info: %w", err)
|
||||||
|
}
|
||||||
|
if pci == nil {
|
||||||
|
return false, xerrors.Errorf("precommit info not found on chain")
|
||||||
|
}
|
||||||
|
|
||||||
|
collateral, err := s.api.StateMinerInitialPledgeCollateral(ctx, maddr, pci.Info, ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting initial pledge collateral: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
collateral = big.Sub(collateral, pci.PreCommitDeposit)
|
||||||
|
if collateral.LessThan(big.Zero()) {
|
||||||
|
collateral = big.Zero()
|
||||||
|
}
|
||||||
|
|
||||||
|
a, _, err := s.as.AddressFor(ctx, s.api, maddr, mi, api.CommitAddr, collateral, big.Zero())
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting address for precommit: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
To: maddr,
|
||||||
|
From: a,
|
||||||
|
Method: builtin.MethodsMiner.ProveCommitSector, // todo ddo provecommit3
|
||||||
|
Params: enc.Bytes(),
|
||||||
|
Value: collateral, // todo config for pulling from miner balance!!
|
||||||
|
}
|
||||||
|
|
||||||
|
mss := &api.MessageSendSpec{
|
||||||
|
MaxFee: abi.TokenAmount(s.maxFee),
|
||||||
|
}
|
||||||
|
|
||||||
|
mcid, err := s.sender.Send(ctx, msg, mss, "commit")
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("pushing message to mpool: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET commit_msg_cid = $1, after_commit_msg = TRUE WHERE sp_id = $2 AND sector_number = $3`, mcid, sectorParams.SpID, sectorParams.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("updating commit_msg_cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.db.Exec(ctx, `INSERT INTO message_waits (signed_message_cid) VALUES ($1)`, mcid)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("inserting into message_waits: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SubmitCommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
||||||
|
id := ids[0]
|
||||||
|
return &id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SubmitCommitTask) TypeDetails() harmonytask.TaskTypeDetails {
|
||||||
|
return harmonytask.TaskTypeDetails{
|
||||||
|
Max: 128,
|
||||||
|
Name: "CommitSubmit",
|
||||||
|
Cost: resources.Resources{
|
||||||
|
Cpu: 0,
|
||||||
|
Gpu: 0,
|
||||||
|
Ram: 1 << 20,
|
||||||
|
},
|
||||||
|
MaxFailures: 16,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SubmitCommitTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||||
|
s.sp.pollers[pollerCommitMsg].Set(taskFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ harmonytask.TaskInterface = &SubmitCommitTask{}
|
214
provider/lpseal/task_submit_precommit.go
Normal file
214
provider/lpseal/task_submit_precommit.go
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpmessage"
|
||||||
|
"github.com/filecoin-project/lotus/provider/multictladdr"
|
||||||
|
"github.com/filecoin-project/lotus/storage/ctladdr"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SubmitPrecommitTaskApi interface {
|
||||||
|
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error)
|
||||||
|
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
|
||||||
|
ctladdr.NodeApi
|
||||||
|
}
|
||||||
|
|
||||||
|
type SubmitPrecommitTask struct {
|
||||||
|
sp *SealPoller
|
||||||
|
db *harmonydb.DB
|
||||||
|
api SubmitPrecommitTaskApi
|
||||||
|
sender *lpmessage.Sender
|
||||||
|
as *multictladdr.MultiAddressSelector
|
||||||
|
|
||||||
|
maxFee types.FIL
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSubmitPrecommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitPrecommitTaskApi, sender *lpmessage.Sender, as *multictladdr.MultiAddressSelector, maxFee types.FIL) *SubmitPrecommitTask {
|
||||||
|
return &SubmitPrecommitTask{
|
||||||
|
sp: sp,
|
||||||
|
db: db,
|
||||||
|
api: api,
|
||||||
|
sender: sender,
|
||||||
|
as: as,
|
||||||
|
|
||||||
|
maxFee: maxFee,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
var sectorParamsArr []struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"`
|
||||||
|
TicketEpoch abi.ChainEpoch `db:"ticket_epoch"`
|
||||||
|
SealedCID string `db:"tree_r_cid"`
|
||||||
|
UnsealedCID string `db:"tree_d_cid"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.db.Select(ctx, §orParamsArr, `
|
||||||
|
SELECT sp_id, sector_number, reg_seal_proof, ticket_epoch, tree_r_cid, tree_d_cid
|
||||||
|
FROM sectors_sdr_pipeline
|
||||||
|
WHERE task_id_precommit_msg = $1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting sector params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sectorParamsArr) != 1 {
|
||||||
|
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
|
||||||
|
}
|
||||||
|
sectorParams := sectorParamsArr[0]
|
||||||
|
|
||||||
|
maddr, err := address.NewIDAddress(uint64(sectorParams.SpID))
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting miner address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sealedCID, err := cid.Parse(sectorParams.SealedCID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("parsing sealed CID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsealedCID, err := cid.Parse(sectorParams.UnsealedCID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("parsing unsealed CID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := miner.PreCommitSectorBatchParams2{}
|
||||||
|
|
||||||
|
expiration := sectorParams.TicketEpoch + miner12.MaxSectorExpirationExtension
|
||||||
|
|
||||||
|
params.Sectors = append(params.Sectors, miner.SectorPreCommitInfo{
|
||||||
|
SealProof: sectorParams.RegSealProof,
|
||||||
|
SectorNumber: abi.SectorNumber(sectorParams.SectorNumber),
|
||||||
|
SealedCID: sealedCID,
|
||||||
|
SealRandEpoch: sectorParams.TicketEpoch,
|
||||||
|
Expiration: expiration,
|
||||||
|
})
|
||||||
|
|
||||||
|
{
|
||||||
|
var pieces []struct {
|
||||||
|
PieceIndex int64 `db:"piece_index"`
|
||||||
|
PieceCID string `db:"piece_cid"`
|
||||||
|
PieceSize int64 `db:"piece_size"`
|
||||||
|
|
||||||
|
F05DealID int64 `db:"f05_deal_id"`
|
||||||
|
F05DealEndEpoch int64 `db:"f05_deal_end_epoch"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.db.Select(ctx, &pieces, `
|
||||||
|
SELECT piece_index, piece_cid, piece_size, f05_deal_id, f05_deal_end_epoch
|
||||||
|
FROM sectors_sdr_initial_pieces
|
||||||
|
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting pieces: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pieces) > 1 {
|
||||||
|
return false, xerrors.Errorf("too many pieces") // todo support multiple pieces
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pieces) > 0 {
|
||||||
|
params.Sectors[0].UnsealedCid = &unsealedCID
|
||||||
|
params.Sectors[0].Expiration = abi.ChainEpoch(pieces[0].F05DealEndEpoch)
|
||||||
|
|
||||||
|
for _, p := range pieces {
|
||||||
|
params.Sectors[0].DealIDs = append(params.Sectors[0].DealIDs, abi.DealID(p.F05DealID))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var pbuf bytes.Buffer
|
||||||
|
if err := params.MarshalCBOR(&pbuf); err != nil {
|
||||||
|
return false, xerrors.Errorf("serializing params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
collateral, err := s.api.StateMinerPreCommitDepositForPower(ctx, maddr, params.Sectors[0], types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting precommit deposit: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting miner info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a, _, err := s.as.AddressFor(ctx, s.api, maddr, mi, api.PreCommitAddr, collateral, big.Zero())
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting address for precommit: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
To: maddr,
|
||||||
|
From: a,
|
||||||
|
Method: builtin.MethodsMiner.PreCommitSectorBatch2,
|
||||||
|
Params: pbuf.Bytes(),
|
||||||
|
Value: collateral, // todo config for pulling from miner balance!!
|
||||||
|
}
|
||||||
|
|
||||||
|
mss := &api.MessageSendSpec{
|
||||||
|
MaxFee: abi.TokenAmount(s.maxFee),
|
||||||
|
}
|
||||||
|
|
||||||
|
mcid, err := s.sender.Send(ctx, msg, mss, "precommit")
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("sending message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set precommit_msg_cid
|
||||||
|
_, err = s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
|
||||||
|
SET precommit_msg_cid = $1, after_precommit_msg = TRUE
|
||||||
|
WHERE task_id_precommit_msg = $2`, mcid, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("updating precommit_msg_cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.db.Exec(ctx, `INSERT INTO message_waits (signed_message_cid) VALUES ($1)`, mcid)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("inserting into message_waits: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SubmitPrecommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
||||||
|
id := ids[0]
|
||||||
|
return &id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SubmitPrecommitTask) TypeDetails() harmonytask.TaskTypeDetails {
|
||||||
|
return harmonytask.TaskTypeDetails{
|
||||||
|
Max: 1024,
|
||||||
|
Name: "PreCommitSubmit",
|
||||||
|
Cost: resources.Resources{
|
||||||
|
Cpu: 0,
|
||||||
|
Gpu: 0,
|
||||||
|
Ram: 1 << 20,
|
||||||
|
},
|
||||||
|
MaxFailures: 16,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SubmitPrecommitTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||||
|
s.sp.pollers[pollerPrecommitMsg].Set(taskFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ harmonytask.TaskInterface = &SubmitPrecommitTask{}
|
256
provider/lpseal/task_trees.go
Normal file
256
provider/lpseal/task_trees.go
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-commp-utils/nonffi"
|
||||||
|
"github.com/filecoin-project/go-commp-utils/zerocomm"
|
||||||
|
"github.com/filecoin-project/go-padreader"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
|
||||||
|
"github.com/filecoin-project/lotus/lib/harmony/resources"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpffi"
|
||||||
|
"github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TreesTask struct {
|
||||||
|
sp *SealPoller
|
||||||
|
db *harmonydb.DB
|
||||||
|
sc *lpffi.SealCalls
|
||||||
|
|
||||||
|
max int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTreesTask(sp *SealPoller, db *harmonydb.DB, sc *lpffi.SealCalls, maxTrees int) *TreesTask {
|
||||||
|
return &TreesTask{
|
||||||
|
sp: sp,
|
||||||
|
db: db,
|
||||||
|
sc: sc,
|
||||||
|
|
||||||
|
max: maxTrees,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreesTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
var sectorParamsArr []struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = t.db.Select(ctx, §orParamsArr, `
|
||||||
|
SELECT sp_id, sector_number, reg_seal_proof
|
||||||
|
FROM sectors_sdr_pipeline
|
||||||
|
WHERE task_id_tree_r = $1 AND task_id_tree_c = $1 AND task_id_tree_d = $1`, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting sector params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sectorParamsArr) != 1 {
|
||||||
|
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
|
||||||
|
}
|
||||||
|
sectorParams := sectorParamsArr[0]
|
||||||
|
|
||||||
|
var pieces []struct {
|
||||||
|
PieceIndex int64 `db:"piece_index"`
|
||||||
|
PieceCID string `db:"piece_cid"`
|
||||||
|
PieceSize int64 `db:"piece_size"`
|
||||||
|
|
||||||
|
DataUrl *string `db:"data_url"`
|
||||||
|
DataHeaders *[]byte `db:"data_headers"`
|
||||||
|
DataRawSize *int64 `db:"data_raw_size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = t.db.Select(ctx, &pieces, `
|
||||||
|
SELECT piece_index, piece_cid, piece_size, data_url, data_headers, data_raw_size
|
||||||
|
FROM sectors_sdr_initial_pieces
|
||||||
|
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting pieces: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize, err := sectorParams.RegSealProof.SectorSize()
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("getting sector size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var commd cid.Cid
|
||||||
|
var dataReader io.Reader
|
||||||
|
var unpaddedData bool
|
||||||
|
|
||||||
|
if len(pieces) > 0 {
|
||||||
|
pieceInfos := make([]abi.PieceInfo, len(pieces))
|
||||||
|
pieceReaders := make([]io.Reader, len(pieces))
|
||||||
|
|
||||||
|
for i, p := range pieces {
|
||||||
|
// make pieceInfo
|
||||||
|
c, err := cid.Parse(p.PieceCID)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("parsing piece cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pieceInfos[i] = abi.PieceInfo{
|
||||||
|
Size: abi.PaddedPieceSize(p.PieceSize),
|
||||||
|
PieceCID: c,
|
||||||
|
}
|
||||||
|
|
||||||
|
// make pieceReader
|
||||||
|
if p.DataUrl != nil {
|
||||||
|
pieceReaders[i], _ = padreader.New(&UrlPieceReader{
|
||||||
|
Url: *p.DataUrl,
|
||||||
|
RawSize: *p.DataRawSize,
|
||||||
|
}, uint64(*p.DataRawSize))
|
||||||
|
} else { // padding piece (w/o fr32 padding, added in TreeD)
|
||||||
|
pieceReaders[i] = nullreader.NewNullReader(abi.PaddedPieceSize(p.PieceSize).Unpadded())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("computing CommD: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dataReader = io.MultiReader(pieceReaders...)
|
||||||
|
unpaddedData = true
|
||||||
|
} else {
|
||||||
|
commd = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded())
|
||||||
|
dataReader = nullreader.NewNullReader(abi.UnpaddedPieceSize(ssize))
|
||||||
|
unpaddedData = false // nullreader includes fr32 zero bits
|
||||||
|
}
|
||||||
|
|
||||||
|
sref := storiface.SectorRef{
|
||||||
|
ID: abi.SectorID{
|
||||||
|
Miner: abi.ActorID(sectorParams.SpID),
|
||||||
|
Number: abi.SectorNumber(sectorParams.SectorNumber),
|
||||||
|
},
|
||||||
|
ProofType: sectorParams.RegSealProof,
|
||||||
|
}
|
||||||
|
|
||||||
|
// D
|
||||||
|
treeUnsealed, err := t.sc.TreeD(ctx, sref, abi.PaddedPieceSize(ssize), dataReader, unpaddedData)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("computing tree d: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// R / C
|
||||||
|
sealed, unsealed, err := t.sc.TreeRC(ctx, sref, commd)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("computing tree r and c: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if unsealed != treeUnsealed {
|
||||||
|
return false, xerrors.Errorf("tree-d and tree-r/c unsealed CIDs disagree")
|
||||||
|
}
|
||||||
|
|
||||||
|
// todo synth porep
|
||||||
|
|
||||||
|
// todo porep challenge check
|
||||||
|
|
||||||
|
n, err := t.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
|
||||||
|
SET after_tree_r = true, after_tree_c = true, after_tree_d = true, tree_r_cid = $3, tree_d_cid = $4
|
||||||
|
WHERE sp_id = $1 AND sector_number = $2`,
|
||||||
|
sectorParams.SpID, sectorParams.SectorNumber, sealed, unsealed)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("store sdr-trees success: updating pipeline: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return false, xerrors.Errorf("store sdr-trees success: updated %d rows", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreesTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
|
||||||
|
// todo reserve storage
|
||||||
|
|
||||||
|
id := ids[0]
|
||||||
|
return &id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreesTask) TypeDetails() harmonytask.TaskTypeDetails {
|
||||||
|
return harmonytask.TaskTypeDetails{
|
||||||
|
Max: t.max,
|
||||||
|
Name: "SDRTrees",
|
||||||
|
Cost: resources.Resources{
|
||||||
|
Cpu: 1,
|
||||||
|
Gpu: 1,
|
||||||
|
Ram: 8000 << 20, // todo
|
||||||
|
},
|
||||||
|
MaxFailures: 3,
|
||||||
|
Follows: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreesTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||||
|
t.sp.pollers[pollerTrees].Set(taskFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
type UrlPieceReader struct {
|
||||||
|
Url string
|
||||||
|
RawSize int64 // the exact number of bytes read, if we read more or less that's an error
|
||||||
|
|
||||||
|
readSoFar int64
|
||||||
|
active io.ReadCloser // auto-closed on EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *UrlPieceReader) Read(p []byte) (n int, err error) {
|
||||||
|
// Check if we have already read the required amount of data
|
||||||
|
if u.readSoFar >= u.RawSize {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// If 'active' is nil, initiate the HTTP request
|
||||||
|
if u.active == nil {
|
||||||
|
resp, err := http.Get(u.Url)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set 'active' to the response body
|
||||||
|
u.active = resp.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the maximum number of bytes we can read without exceeding RawSize
|
||||||
|
toRead := u.RawSize - u.readSoFar
|
||||||
|
if int64(len(p)) > toRead {
|
||||||
|
p = p[:toRead]
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = u.active.Read(p)
|
||||||
|
|
||||||
|
// Update the number of bytes read so far
|
||||||
|
u.readSoFar += int64(n)
|
||||||
|
|
||||||
|
// If the number of bytes read exceeds RawSize, return an error
|
||||||
|
if u.readSoFar > u.RawSize {
|
||||||
|
return n, xerrors.New("read beyond the specified RawSize")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If EOF is reached, close the reader
|
||||||
|
if err == io.EOF {
|
||||||
|
cerr := u.active.Close()
|
||||||
|
if cerr != nil {
|
||||||
|
log.Errorf("error closing http piece reader: %s", cerr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we're below the RawSize, return an unexpected EOF error
|
||||||
|
if u.readSoFar < u.RawSize {
|
||||||
|
log.Errorw("unexpected EOF", "readSoFar", u.readSoFar, "rawSize", u.RawSize, "url", u.Url)
|
||||||
|
return n, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ harmonytask.TaskInterface = &TreesTask{}
|
74
provider/lpseal/task_trees_test.go
Normal file
74
provider/lpseal/task_trees_test.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
package lpseal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestUrlPieceReader_Read tests various scenarios of reading data from UrlPieceReader
|
||||||
|
func TestUrlPieceReader_Read(t *testing.T) {
|
||||||
|
// Create a test server
|
||||||
|
testData := "This is a test string."
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, err := io.WriteString(w, testData)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
rawSize int64
|
||||||
|
expected string
|
||||||
|
expectError bool
|
||||||
|
expectEOF bool
|
||||||
|
}{
|
||||||
|
{"ReadExact", int64(len(testData)), testData, false, true},
|
||||||
|
{"ReadLess", 10, testData[:10], false, false},
|
||||||
|
{"ReadMore", int64(len(testData)) + 10, "", true, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
reader := UrlPieceReader{
|
||||||
|
Url: ts.URL,
|
||||||
|
RawSize: tt.rawSize,
|
||||||
|
}
|
||||||
|
buffer, err := io.ReadAll(&reader)
|
||||||
|
if err != nil {
|
||||||
|
if (err != io.EOF && !tt.expectError) || (err == io.EOF && !tt.expectEOF) {
|
||||||
|
t.Errorf("Read() error = %v, expectError %v, expectEOF %v", err, tt.expectError, tt.expectEOF)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if got := string(buffer); got != tt.expected {
|
||||||
|
t.Errorf("Read() got = %v, expected %v", got, tt.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestUrlPieceReader_Read_Error tests the error handling of UrlPieceReader
|
||||||
|
func TestUrlPieceReader_Read_Error(t *testing.T) {
|
||||||
|
// Simulate a server that returns an error
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
http.Error(w, "error", http.StatusInternalServerError)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
reader := UrlPieceReader{
|
||||||
|
Url: ts.URL,
|
||||||
|
RawSize: 100,
|
||||||
|
}
|
||||||
|
buffer := make([]byte, 200)
|
||||||
|
|
||||||
|
_, err := reader.Read(buffer)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Expected an error, but got nil")
|
||||||
|
}
|
||||||
|
}
|
93
provider/lpweb/hapi/robust_rpc.go
Normal file
93
provider/lpweb/hapi/robust_rpc.go
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
package hapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api/client"
|
||||||
|
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (a *app) watchRpc() {
|
||||||
|
ticker := time.NewTicker(watchInterval)
|
||||||
|
for {
|
||||||
|
err := a.updateRpc(context.TODO())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorw("updating rpc info", "error", err)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type minimalApiInfo struct {
|
||||||
|
Apis struct {
|
||||||
|
ChainApiInfo []string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) updateRpc(ctx context.Context) error {
|
||||||
|
rpcInfos := map[string]minimalApiInfo{} // config name -> api info
|
||||||
|
confNameToAddr := map[string]string{} // config name -> api address
|
||||||
|
|
||||||
|
err := forEachConfig[minimalApiInfo](a, func(name string, info minimalApiInfo) error {
|
||||||
|
if len(info.Apis.ChainApiInfo) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcInfos[name] = info
|
||||||
|
|
||||||
|
for _, addr := range info.Apis.ChainApiInfo {
|
||||||
|
ai := cliutil.ParseApiInfo(addr)
|
||||||
|
confNameToAddr[name] = ai.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
apiInfos := map[string][]byte{} // api address -> token
|
||||||
|
|
||||||
|
// for dedup by address
|
||||||
|
for _, info := range rpcInfos {
|
||||||
|
ai := cliutil.ParseApiInfo(info.Apis.ChainApiInfo[0])
|
||||||
|
apiInfos[ai.Addr] = ai.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
a.rpcInfoLk.Lock()
|
||||||
|
|
||||||
|
// todo improve this shared rpc logic
|
||||||
|
if a.workingApi == nil {
|
||||||
|
for addr, token := range apiInfos {
|
||||||
|
ai := cliutil.APIInfo{
|
||||||
|
Addr: addr,
|
||||||
|
Token: token,
|
||||||
|
}
|
||||||
|
|
||||||
|
da, err := ai.DialArgs("v1")
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ah := ai.AuthHeader()
|
||||||
|
|
||||||
|
v1api, closer, err := client.NewFullNodeRPCV1(ctx, da, ah)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
closer()
|
||||||
|
}()
|
||||||
|
|
||||||
|
a.workingApi = v1api
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
a.rpcInfoLk.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -25,10 +25,17 @@ func Routes(r *mux.Router, deps *deps.Deps) error {
|
|||||||
t: t,
|
t: t,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
go a.watchRpc()
|
||||||
|
go a.watchActor()
|
||||||
|
|
||||||
r.HandleFunc("/simpleinfo/actorsummary", a.actorSummary)
|
r.HandleFunc("/simpleinfo/actorsummary", a.actorSummary)
|
||||||
r.HandleFunc("/simpleinfo/machines", a.indexMachines)
|
r.HandleFunc("/simpleinfo/machines", a.indexMachines)
|
||||||
r.HandleFunc("/simpleinfo/tasks", a.indexTasks)
|
r.HandleFunc("/simpleinfo/tasks", a.indexTasks)
|
||||||
r.HandleFunc("/simpleinfo/taskhistory", a.indexTasksHistory)
|
r.HandleFunc("/simpleinfo/taskhistory", a.indexTasksHistory)
|
||||||
|
r.HandleFunc("/simpleinfo/pipeline-porep", a.indexPipelinePorep)
|
||||||
|
|
||||||
|
// pipeline-porep page
|
||||||
|
r.HandleFunc("/simpleinfo/pipeline-porep/sectors", a.pipelinePorepSectors)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,6 +8,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -15,6 +18,9 @@ type app struct {
|
|||||||
db *harmonydb.DB
|
db *harmonydb.DB
|
||||||
t *template.Template
|
t *template.Template
|
||||||
|
|
||||||
|
rpcInfoLk sync.Mutex
|
||||||
|
workingApi v1api.FullNode
|
||||||
|
|
||||||
actorInfoLk sync.Mutex
|
actorInfoLk sync.Mutex
|
||||||
actorInfos []actorInfo
|
actorInfos []actorInfo
|
||||||
}
|
}
|
||||||
@ -77,11 +83,22 @@ func (a *app) indexTasksHistory(w http.ResponseWriter, r *http.Request) {
|
|||||||
a.executeTemplate(w, "cluster_task_history", s)
|
a.executeTemplate(w, "cluster_task_history", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *app) indexPipelinePorep(w http.ResponseWriter, r *http.Request) {
|
||||||
|
s, err := a.porepPipelineSummary(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("porep pipeline summary: %v", err)
|
||||||
|
http.Error(w, "internal server error", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
a.executeTemplate(w, "pipeline_porep", s)
|
||||||
|
}
|
||||||
|
|
||||||
var templateDev = os.Getenv("LOTUS_WEB_DEV") == "1"
|
var templateDev = os.Getenv("LOTUS_WEB_DEV") == "1"
|
||||||
|
|
||||||
func (a *app) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
|
func (a *app) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
|
||||||
if templateDev {
|
if templateDev {
|
||||||
fs := os.DirFS("./cmd/lotus-provider/web/hapi/web")
|
fs := os.DirFS("./provider/lpweb/hapi/web")
|
||||||
a.t = template.Must(template.ParseFS(fs, "*"))
|
a.t = template.Must(template.ParseFS(fs, "*"))
|
||||||
}
|
}
|
||||||
if err := a.t.ExecuteTemplate(w, name, data); err != nil {
|
if err := a.t.ExecuteTemplate(w, name, data); err != nil {
|
||||||
@ -107,7 +124,7 @@ type taskHistorySummary struct {
|
|||||||
Name string
|
Name string
|
||||||
TaskID int64
|
TaskID int64
|
||||||
|
|
||||||
Posted, Start, End string
|
Posted, Start, Queued, Took string
|
||||||
|
|
||||||
Result bool
|
Result bool
|
||||||
Err string
|
Err string
|
||||||
@ -139,7 +156,7 @@ func (a *app) clusterMachineSummary(ctx context.Context) ([]machineSummary, erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) clusterTaskSummary(ctx context.Context) ([]taskSummary, error) {
|
func (a *app) clusterTaskSummary(ctx context.Context) ([]taskSummary, error) {
|
||||||
rows, err := a.db.Query(ctx, "SELECT id, name, update_time, owner_id FROM harmony_task")
|
rows, err := a.db.Query(ctx, "SELECT id, name, update_time, owner_id FROM harmony_task order by update_time asc, owner_id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err // Handle error
|
return nil, err // Handle error
|
||||||
}
|
}
|
||||||
@ -177,11 +194,67 @@ func (a *app) clusterTaskHistorySummary(ctx context.Context) ([]taskHistorySumma
|
|||||||
return nil, err // Handle error
|
return nil, err // Handle error
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Posted = posted.Round(time.Second).Format("02 Jan 06 15:04")
|
t.Posted = posted.Local().Round(time.Second).Format("02 Jan 06 15:04")
|
||||||
t.Start = start.Round(time.Second).Format("02 Jan 06 15:04")
|
t.Start = start.Local().Round(time.Second).Format("02 Jan 06 15:04")
|
||||||
t.End = end.Round(time.Second).Format("02 Jan 06 15:04")
|
//t.End = end.Local().Round(time.Second).Format("02 Jan 06 15:04")
|
||||||
|
|
||||||
|
t.Queued = start.Sub(posted).Round(time.Second).String()
|
||||||
|
if t.Queued == "0s" {
|
||||||
|
t.Queued = start.Sub(posted).Round(time.Millisecond).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Took = end.Sub(start).Round(time.Second).String()
|
||||||
|
if t.Took == "0s" {
|
||||||
|
t.Took = end.Sub(start).Round(time.Millisecond).String()
|
||||||
|
}
|
||||||
|
|
||||||
summaries = append(summaries, t)
|
summaries = append(summaries, t)
|
||||||
}
|
}
|
||||||
return summaries, nil
|
return summaries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type porepPipelineSummary struct {
|
||||||
|
Actor string
|
||||||
|
|
||||||
|
CountSDR int
|
||||||
|
CountTrees int
|
||||||
|
CountPrecommitMsg int
|
||||||
|
CountWaitSeed int
|
||||||
|
CountPoRep int
|
||||||
|
CountCommitMsg int
|
||||||
|
CountDone int
|
||||||
|
CountFailed int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) porepPipelineSummary(ctx context.Context) ([]porepPipelineSummary, error) {
|
||||||
|
rows, err := a.db.Query(ctx, `
|
||||||
|
SELECT
|
||||||
|
sp_id,
|
||||||
|
COUNT(*) FILTER (WHERE after_sdr = false) as CountSDR,
|
||||||
|
COUNT(*) FILTER (WHERE (after_tree_d = false OR after_tree_c = false OR after_tree_r = false) AND after_sdr = true) as CountTrees,
|
||||||
|
COUNT(*) FILTER (WHERE after_tree_r = true and after_precommit_msg = false) as CountPrecommitMsg,
|
||||||
|
COUNT(*) FILTER (WHERE after_precommit_msg_success = false AND after_precommit_msg = true) as CountWaitSeed,
|
||||||
|
COUNT(*) FILTER (WHERE after_porep = false AND after_precommit_msg_success = true) as CountPoRep,
|
||||||
|
COUNT(*) FILTER (WHERE after_commit_msg_success = false AND after_porep = true) as CountCommitMsg,
|
||||||
|
COUNT(*) FILTER (WHERE after_commit_msg_success = true) as CountDone,
|
||||||
|
COUNT(*) FILTER (WHERE failed = true) as CountFailed
|
||||||
|
FROM
|
||||||
|
sectors_sdr_pipeline
|
||||||
|
GROUP BY sp_id`)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("query: %w", err)
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var summaries []porepPipelineSummary
|
||||||
|
for rows.Next() {
|
||||||
|
var summary porepPipelineSummary
|
||||||
|
if err := rows.Scan(&summary.Actor, &summary.CountSDR, &summary.CountTrees, &summary.CountPrecommitMsg, &summary.CountWaitSeed, &summary.CountPoRep, &summary.CountCommitMsg, &summary.CountDone, &summary.CountFailed); err != nil {
|
||||||
|
return nil, xerrors.Errorf("scan: %w", err)
|
||||||
|
}
|
||||||
|
summary.Actor = "f0" + summary.Actor
|
||||||
|
|
||||||
|
summaries = append(summaries, summary)
|
||||||
|
}
|
||||||
|
return summaries, nil
|
||||||
|
}
|
||||||
|
199
provider/lpweb/hapi/simpleinfo_pipeline_porep.go
Normal file
199
provider/lpweb/hapi/simpleinfo_pipeline_porep.go
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
package hapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/must"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ChainBlockCache = must.One(lru.New[blockstore.MhString, blocks.Block](4096))
|
||||||
|
|
||||||
|
func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
|
type PipelineTask struct {
|
||||||
|
SpID int64 `db:"sp_id"`
|
||||||
|
SectorNumber int64 `db:"sector_number"`
|
||||||
|
|
||||||
|
CreateTime time.Time `db:"create_time"`
|
||||||
|
|
||||||
|
TaskSDR *int64 `db:"task_id_sdr"`
|
||||||
|
AfterSDR bool `db:"after_sdr"`
|
||||||
|
|
||||||
|
TaskTreeD *int64 `db:"task_id_tree_d"`
|
||||||
|
AfterTreeD bool `db:"after_tree_d"`
|
||||||
|
|
||||||
|
TaskTreeC *int64 `db:"task_id_tree_c"`
|
||||||
|
AfterTreeC bool `db:"after_tree_c"`
|
||||||
|
|
||||||
|
TaskTreeR *int64 `db:"task_id_tree_r"`
|
||||||
|
AfterTreeR bool `db:"after_tree_r"`
|
||||||
|
|
||||||
|
TaskPrecommitMsg *int64 `db:"task_id_precommit_msg"`
|
||||||
|
AfterPrecommitMsg bool `db:"after_precommit_msg"`
|
||||||
|
|
||||||
|
AfterPrecommitMsgSuccess bool `db:"after_precommit_msg_success"`
|
||||||
|
SeedEpoch *int64 `db:"seed_epoch"`
|
||||||
|
|
||||||
|
TaskPoRep *int64 `db:"task_id_porep"`
|
||||||
|
PoRepProof []byte `db:"porep_proof"`
|
||||||
|
AfterPoRep bool `db:"after_porep"`
|
||||||
|
|
||||||
|
TaskFinalize *int64 `db:"task_id_finalize"`
|
||||||
|
AfterFinalize bool `db:"after_finalize"`
|
||||||
|
|
||||||
|
TaskMoveStorage *int64 `db:"task_id_move_storage"`
|
||||||
|
AfterMoveStorage bool `db:"after_move_storage"`
|
||||||
|
|
||||||
|
TaskCommitMsg *int64 `db:"task_id_commit_msg"`
|
||||||
|
AfterCommitMsg bool `db:"after_commit_msg"`
|
||||||
|
|
||||||
|
AfterCommitMsgSuccess bool `db:"after_commit_msg_success"`
|
||||||
|
|
||||||
|
Failed bool `db:"failed"`
|
||||||
|
FailedReason string `db:"failed_reason"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var tasks []PipelineTask
|
||||||
|
|
||||||
|
err := a.db.Select(ctx, &tasks, `SELECT
|
||||||
|
sp_id, sector_number,
|
||||||
|
create_time,
|
||||||
|
task_id_sdr, after_sdr,
|
||||||
|
task_id_tree_d, after_tree_d,
|
||||||
|
task_id_tree_c, after_tree_c,
|
||||||
|
task_id_tree_r, after_tree_r,
|
||||||
|
task_id_precommit_msg, after_precommit_msg,
|
||||||
|
after_precommit_msg_success, seed_epoch,
|
||||||
|
task_id_porep, porep_proof, after_porep,
|
||||||
|
task_id_finalize, after_finalize,
|
||||||
|
task_id_move_storage, after_move_storage,
|
||||||
|
task_id_commit_msg, after_commit_msg,
|
||||||
|
after_commit_msg_success,
|
||||||
|
failed, failed_reason
|
||||||
|
FROM sectors_sdr_pipeline order by sp_id, sector_number`) // todo where constrain list
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to fetch pipeline tasks: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type sectorListEntry struct {
|
||||||
|
PipelineTask
|
||||||
|
|
||||||
|
Address address.Address
|
||||||
|
CreateTime string
|
||||||
|
AfterSeed bool
|
||||||
|
|
||||||
|
ChainAlloc, ChainSector, ChainActive, ChainUnproven, ChainFaulty bool
|
||||||
|
}
|
||||||
|
|
||||||
|
head, err := a.workingApi.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to fetch chain head: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
epoch := head.Height()
|
||||||
|
|
||||||
|
stor := store.ActorStore(ctx, blockstore.NewReadCachedBlockstore(blockstore.NewAPIBlockstore(a.workingApi), ChainBlockCache))
|
||||||
|
|
||||||
|
type minerBitfields struct {
|
||||||
|
alloc, sectorSet, active, unproven, faulty bitfield.BitField
|
||||||
|
}
|
||||||
|
minerBitfieldCache := map[address.Address]minerBitfields{}
|
||||||
|
|
||||||
|
sectorList := make([]sectorListEntry, 0, len(tasks))
|
||||||
|
for _, task := range tasks {
|
||||||
|
task := task
|
||||||
|
|
||||||
|
task.CreateTime = task.CreateTime.Local()
|
||||||
|
|
||||||
|
addr, err := address.NewIDAddress(uint64(task.SpID))
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to create actor address: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mbf, ok := minerBitfieldCache[addr]
|
||||||
|
if !ok {
|
||||||
|
act, err := a.workingApi.StateGetActor(ctx, addr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to load actor: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mas, err := miner.Load(stor, act)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to load miner actor: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
activeSectors, err := miner.AllPartSectors(mas, miner.Partition.ActiveSectors)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to load active sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to load all sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
unproved, err := miner.AllPartSectors(mas, miner.Partition.UnprovenSectors)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to load unproven sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
faulty, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to load faulty sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
alloc, err := mas.GetAllocatedSectors()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, xerrors.Errorf("failed to load allocated sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mbf = minerBitfields{
|
||||||
|
alloc: *alloc,
|
||||||
|
sectorSet: allSectors,
|
||||||
|
active: activeSectors,
|
||||||
|
unproven: unproved,
|
||||||
|
faulty: faulty,
|
||||||
|
}
|
||||||
|
minerBitfieldCache[addr] = mbf
|
||||||
|
}
|
||||||
|
|
||||||
|
afterSeed := task.SeedEpoch != nil && *task.SeedEpoch <= int64(epoch)
|
||||||
|
|
||||||
|
sectorList = append(sectorList, sectorListEntry{
|
||||||
|
PipelineTask: task,
|
||||||
|
Address: addr,
|
||||||
|
CreateTime: task.CreateTime.Format(time.DateTime),
|
||||||
|
AfterSeed: afterSeed,
|
||||||
|
|
||||||
|
ChainAlloc: must.One(mbf.alloc.IsSet(uint64(task.SectorNumber))),
|
||||||
|
ChainSector: must.One(mbf.sectorSet.IsSet(uint64(task.SectorNumber))),
|
||||||
|
ChainActive: must.One(mbf.active.IsSet(uint64(task.SectorNumber))),
|
||||||
|
ChainUnproven: must.One(mbf.unproven.IsSet(uint64(task.SectorNumber))),
|
||||||
|
ChainFaulty: must.One(mbf.faulty.IsSet(uint64(task.SectorNumber))),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
a.executeTemplate(w, "pipeline_porep_sectors", sectorList)
|
||||||
|
}
|
186
provider/lpweb/hapi/watch_actor.go
Normal file
186
provider/lpweb/hapi/watch_actor.go
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
package hapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/BurntSushi/toml"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const watchInterval = time.Second * 10
|
||||||
|
|
||||||
|
func (a *app) watchActor() {
|
||||||
|
ticker := time.NewTicker(watchInterval)
|
||||||
|
for {
|
||||||
|
err := a.updateActor(context.TODO())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorw("updating rpc info", "error", err)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type minimalActorInfo struct {
|
||||||
|
Addresses []struct {
|
||||||
|
MinerAddresses []string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) updateActor(ctx context.Context) error {
|
||||||
|
a.rpcInfoLk.Lock()
|
||||||
|
api := a.workingApi
|
||||||
|
a.rpcInfoLk.Unlock()
|
||||||
|
|
||||||
|
if api == nil {
|
||||||
|
log.Warnw("no working api yet")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var actorInfos []actorInfo
|
||||||
|
|
||||||
|
confNameToAddr := map[address.Address][]string{} // address -> config names
|
||||||
|
|
||||||
|
err := forEachConfig[minimalActorInfo](a, func(name string, info minimalActorInfo) error {
|
||||||
|
for _, aset := range info.Addresses {
|
||||||
|
for _, addr := range aset.MinerAddresses {
|
||||||
|
a, err := address.NewFromString(addr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parsing address: %w", err)
|
||||||
|
}
|
||||||
|
confNameToAddr[a] = append(confNameToAddr[a], name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for addr, cnames := range confNameToAddr {
|
||||||
|
p, err := api.StateMinerPower(ctx, addr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting miner power: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dls, err := api.StateMinerDeadlines(ctx, addr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting deadlines: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
outDls := []actorDeadline{}
|
||||||
|
|
||||||
|
for dlidx := range dls {
|
||||||
|
p, err := api.StateMinerPartitions(ctx, addr, uint64(dlidx), types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting partition: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dl := actorDeadline{
|
||||||
|
Empty: false,
|
||||||
|
Current: false, // todo
|
||||||
|
Proven: false,
|
||||||
|
PartFaulty: false,
|
||||||
|
Faulty: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
var live, faulty uint64
|
||||||
|
|
||||||
|
for _, part := range p {
|
||||||
|
l, err := part.LiveSectors.Count()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting live sectors: %w", err)
|
||||||
|
}
|
||||||
|
live += l
|
||||||
|
|
||||||
|
f, err := part.FaultySectors.Count()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting faulty sectors: %w", err)
|
||||||
|
}
|
||||||
|
faulty += f
|
||||||
|
}
|
||||||
|
|
||||||
|
dl.Empty = live == 0
|
||||||
|
dl.Proven = live > 0 && faulty == 0
|
||||||
|
dl.PartFaulty = faulty > 0
|
||||||
|
dl.Faulty = faulty > 0 && faulty == live
|
||||||
|
|
||||||
|
outDls = append(outDls, dl)
|
||||||
|
}
|
||||||
|
|
||||||
|
pd, err := api.StateMinerProvingDeadline(ctx, addr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting proving deadline: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(outDls) != 48 {
|
||||||
|
return xerrors.Errorf("expected 48 deadlines, got %d", len(outDls))
|
||||||
|
}
|
||||||
|
|
||||||
|
outDls[pd.Index].Current = true
|
||||||
|
|
||||||
|
actorInfos = append(actorInfos, actorInfo{
|
||||||
|
Address: addr.String(),
|
||||||
|
CLayers: cnames,
|
||||||
|
QualityAdjustedPower: types.DeciStr(p.MinerPower.QualityAdjPower),
|
||||||
|
RawBytePower: types.DeciStr(p.MinerPower.RawBytePower),
|
||||||
|
Deadlines: outDls,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(actorInfos, func(i, j int) bool {
|
||||||
|
return actorInfos[i].Address < actorInfos[j].Address
|
||||||
|
})
|
||||||
|
|
||||||
|
a.actorInfoLk.Lock()
|
||||||
|
a.actorInfos = actorInfos
|
||||||
|
a.actorInfoLk.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) loadConfigs(ctx context.Context) (map[string]string, error) {
|
||||||
|
rows, err := a.db.Query(ctx, `SELECT title, config FROM harmony_config`)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting db configs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
configs := make(map[string]string)
|
||||||
|
for rows.Next() {
|
||||||
|
var title, config string
|
||||||
|
if err := rows.Scan(&title, &config); err != nil {
|
||||||
|
return nil, xerrors.Errorf("scanning db configs: %w", err)
|
||||||
|
}
|
||||||
|
configs[title] = config
|
||||||
|
}
|
||||||
|
|
||||||
|
return configs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func forEachConfig[T any](a *app, cb func(name string, v T) error) error {
|
||||||
|
confs, err := a.loadConfigs(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tomlStr := range confs {
|
||||||
|
var info T
|
||||||
|
if err := toml.Unmarshal([]byte(tomlStr), &info); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling %s config: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cb(name, info); err != nil {
|
||||||
|
return xerrors.Errorf("cb: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -6,9 +6,14 @@
|
|||||||
<td>{{.CompletedBy}}</td>
|
<td>{{.CompletedBy}}</td>
|
||||||
<td>{{.Posted}}</td>
|
<td>{{.Posted}}</td>
|
||||||
<td>{{.Start}}</td>
|
<td>{{.Start}}</td>
|
||||||
<td>{{.End}}</td>
|
<td>{{.Queued}}</td>
|
||||||
|
<td>{{.Took}}</td>
|
||||||
<td>{{if .Result}}<span class="success">success</span>{{else}}<span class="error">error</span>{{end}}</td>
|
<td>{{if .Result}}<span class="success">success</span>{{else}}<span class="error">error</span>{{end}}</td>
|
||||||
<td>{{.Err}}</td>
|
<td style="max-width: 25vh">
|
||||||
|
<div style="overflow: hidden; white-space: nowrap; text-overflow: ellipsis" title="{{.Err}}">
|
||||||
|
{{.Err}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
{{end}}
|
{{end}}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
138
provider/lpweb/hapi/web/pipeline_porep_sectors.gohtml
Normal file
138
provider/lpweb/hapi/web/pipeline_porep_sectors.gohtml
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
{{define "pipeline_porep_sectors"}}
|
||||||
|
{{range .}}
|
||||||
|
<tr>
|
||||||
|
<td>{{.Address}}</td>
|
||||||
|
<td rowspan="2">{{.CreateTime}}</td>
|
||||||
|
<td rowspan="2">
|
||||||
|
<table class="porep-state">
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td class="{{if ne .TaskSDR nil}}pipeline-active{{end}} {{if .AfterSDR}}pipeline-success{{end}}">
|
||||||
|
<div>SDR</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterSDR}}done{{else}}
|
||||||
|
{{if ne .TaskSDR nil}}T:{{.TaskSDR}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td class="{{if ne .TaskTreeC nil}}pipeline-active{{end}} {{if .AfterTreeC}}pipeline-success{{end}}">
|
||||||
|
<div>TreeC</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterTreeC}}done{{else}}
|
||||||
|
{{if ne .TaskTreeC nil}}T:{{.TaskTreeC}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td rowspan="2" class="{{if ne .TaskPrecommitMsg nil}}pipeline-active{{end}} {{if .AfterPrecommitMsg}}pipeline-success{{end}}">
|
||||||
|
<div>PComm Msg</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterPrecommitMsg}}done{{else}}
|
||||||
|
{{if ne .TaskPrecommitMsg nil}}T:{{.TaskPrecommitMsg}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td rowspan="2" class="{{if .AfterPrecommitMsg}}pipeline-active{{end}} {{if .AfterPrecommitMsgSuccess}}pipeline-success{{end}}">
|
||||||
|
<div>PComm Wait</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterPrecommitMsgSuccess}}done{{else}}
|
||||||
|
--
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td rowspan="2" class="{{if .AfterPrecommitMsgSuccess}}pipeline-active{{end}} {{if .AfterSeed}}pipeline-success{{end}}">
|
||||||
|
<div>Wait Seed</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterSeed}}done{{else}}
|
||||||
|
{{if ne .SeedEpoch nil}}@{{.SeedEpoch}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td rowspan="2" class="{{if ne .TaskPoRep nil}}pipeline-active{{end}} {{if .AfterPoRep}}pipeline-success{{end}}">
|
||||||
|
<div>PoRep</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterPoRep}}done{{else}}
|
||||||
|
{{if ne .TaskPoRep nil}}T:{{.TaskPoRep}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td class="{{if ne .TaskFinalize nil}}pipeline-active{{end}} {{if .AfterFinalize}}pipeline-success{{end}}">
|
||||||
|
<div>Clear Cache</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterFinalize}}done{{else}}
|
||||||
|
{{if ne .TaskFinalize nil}}T:{{.TaskFinalize}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td class="{{if ne .TaskMoveStorage nil}}pipeline-active{{end}} {{if .AfterMoveStorage}}pipeline-success{{end}}">
|
||||||
|
<div>Move Storage</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterMoveStorage}}done{{else}}
|
||||||
|
{{if ne .TaskMoveStorage nil}}T:{{.TaskMoveStorage}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td class="{{if .ChainSector}}pipeline-success{{else}}{{if .ChainAlloc}}pipeline-active{{else}}pipeline-failed{{end}}{{end}}">
|
||||||
|
<div>On Chain</div>
|
||||||
|
<div>{{if .ChainSector}}yes{{else}}{{if .ChainAlloc}}allocated{{else}}no{{end}}{{end}}</div>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td class="{{if ne .TaskTreeD nil}}pipeline-active{{end}} {{if .AfterTreeD}}pipeline-success{{end}}">
|
||||||
|
<div>TreeD</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterTreeD}}done{{else}}
|
||||||
|
{{if ne .TaskTreeD nil}}T:{{.TaskTreeD}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td class="{{if ne .TaskTreeR nil}}pipeline-active{{end}} {{if .AfterTreeR}}pipeline-success{{end}}">
|
||||||
|
<div>TreeR</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterTreeR}}done{{else}}
|
||||||
|
{{if ne .TaskTreeR nil}}T:{{.TaskTreeR}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<!-- PC-S -->
|
||||||
|
<!-- PC-W -->
|
||||||
|
<!-- WS -->
|
||||||
|
<!-- PoRep -->
|
||||||
|
<td class="{{if ne .TaskCommitMsg nil}}pipeline-active{{end}} {{if .AfterCommitMsg}}pipeline-success{{end}}">
|
||||||
|
<div>Commit Msg</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterCommitMsg}}done{{else}}
|
||||||
|
{{if ne .TaskCommitMsg nil}}T:{{.TaskCommitMsg}}{{else}}--{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td class="{{if .AfterCommitMsg}}pipeline-active{{end}} {{if .AfterCommitMsgSuccess}}pipeline-success{{end}}">
|
||||||
|
<div>Commit Wait</div>
|
||||||
|
<div>
|
||||||
|
{{if .AfterCommitMsgSuccess}}done{{else}}
|
||||||
|
--
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td class="{{if .ChainActive}}pipeline-success{{else}}pipeline-failed{{end}}">
|
||||||
|
<div>Active</div>
|
||||||
|
<div>{{if .ChainActive}}yes{{else}}
|
||||||
|
{{if .ChainUnproven}}unproven{{else}}
|
||||||
|
{{if .ChainFaulty}}faulty{{else}}no{{end}}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</td>
|
||||||
|
<td rowspan="2">
|
||||||
|
<a href="#">DETAILS</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
{{.SectorNumber}}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
15
provider/lpweb/hapi/web/pipline_porep.gohtml
Normal file
15
provider/lpweb/hapi/web/pipline_porep.gohtml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{{define "pipeline_porep"}}
|
||||||
|
{{range .}}
|
||||||
|
<tr>
|
||||||
|
<td><b>{{.Actor}}</b></td>
|
||||||
|
<td {{if ne .CountSDR 0}}class="success"{{end}}>{{.CountSDR}}</td>
|
||||||
|
<td {{if ne .CountTrees 0}}class="success"{{end}}>{{.CountTrees}}</td>
|
||||||
|
<td {{if ne .CountPrecommitMsg 0}}class="success"{{end}}>{{.CountPrecommitMsg}}</td>
|
||||||
|
<td {{if ne .CountWaitSeed 0}}class="success"{{end}}>{{.CountWaitSeed}}</td>
|
||||||
|
<td {{if ne .CountPoRep 0}}class="success"{{end}}>{{.CountPoRep}}</td>
|
||||||
|
<td {{if ne .CountCommitMsg 0}}class="success"{{end}}>{{.CountCommitMsg}}</td>
|
||||||
|
<td>{{.CountDone}}</td>
|
||||||
|
<td>{{.CountFailed}}</td>
|
||||||
|
</tr>
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
@ -39,12 +39,9 @@ func GetSrv(ctx context.Context, deps *deps.Deps) (*http.Server, error) {
|
|||||||
}
|
}
|
||||||
api.Routes(mx.PathPrefix("/api").Subrouter(), deps)
|
api.Routes(mx.PathPrefix("/api").Subrouter(), deps)
|
||||||
|
|
||||||
basePath := basePath
|
|
||||||
|
|
||||||
var static fs.FS = static
|
var static fs.FS = static
|
||||||
if webDev {
|
if webDev {
|
||||||
basePath = "cmd/lotus-provider/web/static"
|
static = os.DirFS("./provider/lpweb")
|
||||||
static = os.DirFS(basePath)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mx.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
mx.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -65,9 +65,6 @@ window.customElements.define('chain-connectivity', class MyElement extends LitEl
|
|||||||
<td>${item.Version}</td>
|
<td>${item.Version}</td>
|
||||||
</tr>
|
</tr>
|
||||||
`)}
|
`)}
|
||||||
<tr>
|
|
||||||
<td colspan="4">Data incoming...</td>
|
|
||||||
</tr>
|
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>`
|
</table>`
|
||||||
});
|
});
|
||||||
|
@ -3,78 +3,9 @@
|
|||||||
<title>Lotus Provider Cluster Overview</title>
|
<title>Lotus Provider Cluster Overview</title>
|
||||||
<script src="https://unpkg.com/htmx.org@1.9.5" integrity="sha384-xcuj3WpfgjlKF+FXhSQFQ0ZNr39ln+hwjN3npfM9VBnUskLolQAcN80McRIVOPuO" crossorigin="anonymous"></script>
|
<script src="https://unpkg.com/htmx.org@1.9.5" integrity="sha384-xcuj3WpfgjlKF+FXhSQFQ0ZNr39ln+hwjN3npfM9VBnUskLolQAcN80McRIVOPuO" crossorigin="anonymous"></script>
|
||||||
<script type="module" src="chain-connectivity.js"></script>
|
<script type="module" src="chain-connectivity.js"></script>
|
||||||
|
<link rel="stylesheet" href="main.css">
|
||||||
|
<link rel='stylesheet' href='https://cdn.jsdelivr.net/npm/hack-font@3.3.0/build/web/hack-subset.css'>
|
||||||
<style>
|
<style>
|
||||||
html, body {
|
|
||||||
background: #0f0f0f;
|
|
||||||
color: #ffffff;
|
|
||||||
padding: 0;
|
|
||||||
margin: 0;
|
|
||||||
|
|
||||||
font-family: monospace;
|
|
||||||
}
|
|
||||||
|
|
||||||
table td, table th {
|
|
||||||
font-size: 13px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.app-head {
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
.head-left {
|
|
||||||
display: inline-block;
|
|
||||||
}
|
|
||||||
.head-right {
|
|
||||||
display: inline-block;
|
|
||||||
float: right;
|
|
||||||
}
|
|
||||||
|
|
||||||
table {
|
|
||||||
border-collapse: collapse;
|
|
||||||
}
|
|
||||||
|
|
||||||
table td, table th {
|
|
||||||
border-left: 1px solid #f0f0f0;
|
|
||||||
padding: 1px 5px;
|
|
||||||
}
|
|
||||||
|
|
||||||
table tr td:first-child, table tr th:first-child {
|
|
||||||
border-left: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
a:link {
|
|
||||||
color: #cfc;
|
|
||||||
}
|
|
||||||
|
|
||||||
a:visited {
|
|
||||||
color: #dfa;
|
|
||||||
}
|
|
||||||
|
|
||||||
a:hover {
|
|
||||||
color: #af7;
|
|
||||||
}
|
|
||||||
|
|
||||||
.success {
|
|
||||||
color: green;
|
|
||||||
}
|
|
||||||
.warning {
|
|
||||||
color: yellow;
|
|
||||||
}
|
|
||||||
.error {
|
|
||||||
color: red;
|
|
||||||
}
|
|
||||||
|
|
||||||
.dash-tile {
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
padding: 0.75rem;
|
|
||||||
background: #3f3f3f;
|
|
||||||
|
|
||||||
& b {
|
|
||||||
padding-bottom: 0.5rem;
|
|
||||||
color: deeppink;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.deadline-box {
|
.deadline-box {
|
||||||
display: grid;
|
display: grid;
|
||||||
grid-template-columns: repeat(16, auto);
|
grid-template-columns: repeat(16, auto);
|
||||||
@ -121,22 +52,6 @@
|
|||||||
<chain-connectivity></chain-connectivity>
|
<chain-connectivity></chain-connectivity>
|
||||||
</div>
|
</div>
|
||||||
<hr>
|
<hr>
|
||||||
<div class="info-block">
|
|
||||||
<h2>Actor Summary</h2>
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Address</th>
|
|
||||||
<th>Config Layers</th>
|
|
||||||
<th>QaP</th>
|
|
||||||
<th>Deadlines</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody hx-get="/hapi/simpleinfo/actorsummary" hx-trigger="load,every 5s">
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
<hr>
|
|
||||||
<div class="info-block">
|
<div class="info-block">
|
||||||
<h2>Cluster Machines</h2>
|
<h2>Cluster Machines</h2>
|
||||||
<table>
|
<table>
|
||||||
@ -153,6 +68,43 @@
|
|||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
<hr>
|
<hr>
|
||||||
|
<div class="info-block">
|
||||||
|
<h2><a href="/pipeline_porep.html">PoRep Pipeline</a></h2>
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Address</th>
|
||||||
|
<th>SDR</th>
|
||||||
|
<th>Trees</th>
|
||||||
|
<th>Precommit Msg</th>
|
||||||
|
<th>Wait Seed</th>
|
||||||
|
<th>PoRep</th>
|
||||||
|
<th>Commit Msg</th>
|
||||||
|
<th>Done</th>
|
||||||
|
<th>Failed</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody hx-get="/hapi/simpleinfo/pipeline-porep" hx-trigger="load,every 5s">
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
<hr>
|
||||||
|
<div class="info-block">
|
||||||
|
<h2>Actor Summary</h2>
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Address</th>
|
||||||
|
<th>Config Layers</th>
|
||||||
|
<th>QaP</th>
|
||||||
|
<th>Deadlines</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody hx-get="/hapi/simpleinfo/actorsummary" hx-trigger="load,every 5s">
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
<hr>
|
||||||
<div class="info-block">
|
<div class="info-block">
|
||||||
<h2>Recently Finished Tasks</h2>
|
<h2>Recently Finished Tasks</h2>
|
||||||
<table>
|
<table>
|
||||||
@ -163,12 +115,13 @@
|
|||||||
<th>Executor</th>
|
<th>Executor</th>
|
||||||
<th>Posted</th>
|
<th>Posted</th>
|
||||||
<th>Start</th>
|
<th>Start</th>
|
||||||
<th>End</th>
|
<th>Queued</th>
|
||||||
|
<th>Took</th>
|
||||||
<th>Outcome</th>
|
<th>Outcome</th>
|
||||||
<th>Message</th>
|
<th>Message</th>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody hx-get="/hapi/simpleinfo/taskhistory" hx-trigger="load, every 5s">
|
<tbody hx-get="/hapi/simpleinfo/taskhistory" hx-trigger="load, every 2s">
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
@ -178,13 +131,13 @@
|
|||||||
<table>
|
<table>
|
||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th>Task</th>
|
<th style="min-width: 128px">Task</th>
|
||||||
<th>ID</th>
|
<th>ID</th>
|
||||||
<th>Posted</th>
|
<th>Posted</th>
|
||||||
<th>Owner</th>
|
<th>Owner</th>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody hx-get="/hapi/simpleinfo/tasks" hx-trigger="load,every 5s">
|
<tbody hx-get="/hapi/simpleinfo/tasks" hx-trigger="load,every 1s">
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
|
70
provider/lpweb/static/main.css
Normal file
70
provider/lpweb/static/main.css
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
html, body {
|
||||||
|
background: #0f0f0f;
|
||||||
|
color: #ffffff;
|
||||||
|
padding: 0;
|
||||||
|
margin: 0;
|
||||||
|
|
||||||
|
font-family: 'Hack', monospace;
|
||||||
|
}
|
||||||
|
|
||||||
|
table td, table th {
|
||||||
|
font-size: 13px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.app-head {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
.head-left {
|
||||||
|
display: inline-block;
|
||||||
|
}
|
||||||
|
.head-right {
|
||||||
|
display: inline-block;
|
||||||
|
float: right;
|
||||||
|
}
|
||||||
|
|
||||||
|
table {
|
||||||
|
border-collapse: collapse;
|
||||||
|
}
|
||||||
|
|
||||||
|
table td, table th {
|
||||||
|
border-left: 1px solid #f0f0f0;
|
||||||
|
padding: 1px 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
table tr td:first-child, table tr th:first-child {
|
||||||
|
border-left: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:link {
|
||||||
|
color: #cfc;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:visited {
|
||||||
|
color: #dfa;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:hover {
|
||||||
|
color: #af7;
|
||||||
|
}
|
||||||
|
|
||||||
|
.success {
|
||||||
|
color: greenyellow;
|
||||||
|
}
|
||||||
|
.warning {
|
||||||
|
color: yellow;
|
||||||
|
}
|
||||||
|
.error {
|
||||||
|
color: red;
|
||||||
|
}
|
||||||
|
|
||||||
|
.dash-tile {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
padding: 0.75rem;
|
||||||
|
background: #3f3f3f;
|
||||||
|
|
||||||
|
& b {
|
||||||
|
padding-bottom: 0.5rem;
|
||||||
|
color: deeppink;
|
||||||
|
}
|
||||||
|
}
|
84
provider/lpweb/static/pipeline_porep.html
Normal file
84
provider/lpweb/static/pipeline_porep.html
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>Lotus Provider PoRep Pipeline</title>
|
||||||
|
<script src="https://unpkg.com/htmx.org@1.9.5" integrity="sha384-xcuj3WpfgjlKF+FXhSQFQ0ZNr39ln+hwjN3npfM9VBnUskLolQAcN80McRIVOPuO" crossorigin="anonymous"></script>
|
||||||
|
<script type="module" src="chain-connectivity.js"></script>
|
||||||
|
<link rel="stylesheet" href="main.css">
|
||||||
|
<link rel='stylesheet' href='https://cdn.jsdelivr.net/npm/hack-font@3.3.0/build/web/hack-subset.css'>
|
||||||
|
<style>
|
||||||
|
.porep-pipeline-table, .porep-state {
|
||||||
|
color: #d0d0d0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.porep-pipeline-table td, .porep-pipeline-table th {
|
||||||
|
border-left: none;
|
||||||
|
border-collapse: collapse;
|
||||||
|
}
|
||||||
|
|
||||||
|
.porep-pipeline-table tr:nth-child(odd) {
|
||||||
|
border-top: 6px solid #999999;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
.porep-pipeline-table tr:first-child, .porep-pipeline-table tr:first-child {
|
||||||
|
border-top: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.porep-state {
|
||||||
|
border-collapse: collapse;
|
||||||
|
}
|
||||||
|
|
||||||
|
.porep-state td, .porep-state th {
|
||||||
|
border-left: 1px solid #f0f0f0;
|
||||||
|
border-right: 1px solid #f0f0f0;
|
||||||
|
|
||||||
|
padding: 1px 5px;
|
||||||
|
|
||||||
|
text-align: center;
|
||||||
|
font-size: 0.7em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.porep-state tr {
|
||||||
|
border-top: 1px solid #f0f0f0;
|
||||||
|
}
|
||||||
|
.porep-state tr:first-child {
|
||||||
|
border-top: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.pipeline-active {
|
||||||
|
background-color: #303060;
|
||||||
|
}
|
||||||
|
.pipeline-success {
|
||||||
|
background-color: #306030;
|
||||||
|
}
|
||||||
|
.pipeline-failed {
|
||||||
|
background-color: #603030;
|
||||||
|
}
|
||||||
|
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="app-head">
|
||||||
|
<div class="head-left">
|
||||||
|
<h1>Lotus Provider PoRep Pipeline</h1>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<hr/>
|
||||||
|
<div class="page">
|
||||||
|
<div class="info-block">
|
||||||
|
<h2>Sectors</h2>
|
||||||
|
<table class="porep-pipeline-table">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Sector ID</th>
|
||||||
|
<th>Create Time</th>
|
||||||
|
<th>State</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody hx-get="/hapi/simpleinfo/pipeline-porep/sectors" hx-trigger="load,every 3s">
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
@ -102,8 +102,10 @@ func NewWdPostTask(db *harmonydb.DB,
|
|||||||
max: max,
|
max: max,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := pcs.AddHandler(t.processHeadChange); err != nil {
|
if pcs != nil {
|
||||||
return nil, err
|
if err := pcs.AddHandler(t.processHeadChange); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return t, nil
|
return t, nil
|
||||||
@ -133,11 +135,34 @@ func (t *WdPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done
|
|||||||
|
|
||||||
deadline := wdpost.NewDeadlineInfo(abi.ChainEpoch(pps), dlIdx, head.Height())
|
deadline := wdpost.NewDeadlineInfo(abi.ChainEpoch(pps), dlIdx, head.Height())
|
||||||
|
|
||||||
if deadline.PeriodElapsed() {
|
var testTask *int
|
||||||
|
isTestTask := func() bool {
|
||||||
|
if testTask != nil {
|
||||||
|
return *testTask > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
testTask = new(int)
|
||||||
|
err := t.db.QueryRow(context.Background(), `SELECT COUNT(*) FROM harmony_test WHERE task_id = $1`, taskID).Scan(testTask)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("WdPostTask.Do() failed to queryRow: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return *testTask > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if deadline.PeriodElapsed() && !isTestTask() {
|
||||||
log.Errorf("WdPost removed stale task: %v %v", taskID, deadline)
|
log.Errorf("WdPost removed stale task: %v %v", taskID, deadline)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if deadline.Challenge > head.Height() {
|
||||||
|
if isTestTask() {
|
||||||
|
deadline = wdpost.NewDeadlineInfo(abi.ChainEpoch(pps)-deadline.WPoStProvingPeriod, dlIdx, head.Height()-deadline.WPoStProvingPeriod)
|
||||||
|
log.Warnw("Test task is in the future, adjusting to past", "taskID", taskID, "deadline", deadline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
maddr, err := address.NewIDAddress(spID)
|
maddr, err := address.NewIDAddress(spID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("WdPostTask.Do() failed to NewIDAddress: %v", err)
|
log.Errorf("WdPostTask.Do() failed to NewIDAddress: %v", err)
|
||||||
@ -161,11 +186,7 @@ func (t *WdPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done
|
|||||||
return false, xerrors.Errorf("marshaling PoSt: %w", err)
|
return false, xerrors.Errorf("marshaling PoSt: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
testTaskIDCt := 0
|
if isTestTask() {
|
||||||
if err = t.db.QueryRow(context.Background(), `SELECT COUNT(*) FROM harmony_test WHERE task_id = $1`, taskID).Scan(&testTaskIDCt); err != nil {
|
|
||||||
return false, xerrors.Errorf("querying for test task: %w", err)
|
|
||||||
}
|
|
||||||
if testTaskIDCt == 1 {
|
|
||||||
// Do not send test tasks to the chain but to harmony_test & stdout.
|
// Do not send test tasks to the chain but to harmony_test & stdout.
|
||||||
|
|
||||||
data, err := json.MarshalIndent(map[string]any{
|
data, err := json.MarshalIndent(map[string]any{
|
||||||
@ -241,7 +262,6 @@ func (t *WdPostTask) CanAccept(ids []harmonytask.TaskID, te *harmonytask.TaskEng
|
|||||||
PartitionIndex uint64
|
PartitionIndex uint64
|
||||||
|
|
||||||
dlInfo *dline.Info `pgx:"-"`
|
dlInfo *dline.Info `pgx:"-"`
|
||||||
openTs *types.TipSet
|
|
||||||
}
|
}
|
||||||
var tasks []wdTaskDef
|
var tasks []wdTaskDef
|
||||||
|
|
||||||
@ -263,13 +283,9 @@ func (t *WdPostTask) CanAccept(ids []harmonytask.TaskID, te *harmonytask.TaskEng
|
|||||||
tasks[i].dlInfo = wdpost.NewDeadlineInfo(tasks[i].ProvingPeriodStart, tasks[i].DeadlineIndex, ts.Height())
|
tasks[i].dlInfo = wdpost.NewDeadlineInfo(tasks[i].ProvingPeriodStart, tasks[i].DeadlineIndex, ts.Height())
|
||||||
|
|
||||||
if tasks[i].dlInfo.PeriodElapsed() {
|
if tasks[i].dlInfo.PeriodElapsed() {
|
||||||
|
// note: Those may be test tasks
|
||||||
return &tasks[i].TaskID, nil
|
return &tasks[i].TaskID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks[i].openTs, err = t.api.ChainGetTipSetAfterHeight(context.Background(), tasks[i].dlInfo.Open, ts.Key())
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("getting task open tipset: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo fix the block below
|
// todo fix the block below
|
||||||
|
@ -77,8 +77,10 @@ func NewWdPostRecoverDeclareTask(sender *lpmessage.Sender,
|
|||||||
actors: actors,
|
actors: actors,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := pcs.AddHandler(t.processHeadChange); err != nil {
|
if pcs != nil {
|
||||||
return nil, err
|
if err := pcs.AddHandler(t.processHeadChange); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return t, nil
|
return t, nil
|
||||||
|
@ -62,8 +62,10 @@ func NewWdPostSubmitTask(pcs *chainsched.ProviderChainSched, send *lpmessage.Sen
|
|||||||
as: as,
|
as: as,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := pcs.AddHandler(res.processHeadChange); err != nil {
|
if pcs != nil {
|
||||||
return nil, err
|
if err := pcs.AddHandler(res.processHeadChange); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, nil
|
return res, nil
|
||||||
|
@ -107,13 +107,13 @@ func (t *WinPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (don
|
|||||||
// First query to fetch from mining_tasks
|
// First query to fetch from mining_tasks
|
||||||
err = t.db.QueryRow(ctx, `SELECT sp_id, epoch, base_compute_time FROM mining_tasks WHERE task_id = $1`, taskID).Scan(&details.SpID, &details.Epoch, &details.CompTime)
|
err = t.db.QueryRow(ctx, `SELECT sp_id, epoch, base_compute_time FROM mining_tasks WHERE task_id = $1`, taskID).Scan(&details.SpID, &details.Epoch, &details.CompTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, xerrors.Errorf("query mining base info fail: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Second query to fetch from mining_base_block
|
// Second query to fetch from mining_base_block
|
||||||
rows, err := t.db.Query(ctx, `SELECT block_cid FROM mining_base_block WHERE task_id = $1`, taskID)
|
rows, err := t.db.Query(ctx, `SELECT block_cid FROM mining_base_block WHERE task_id = $1`, taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, xerrors.Errorf("query mining base blocks fail: %w", err)
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ func (t *WinPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (don
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := rows.Err(); err != nil {
|
if err := rows.Err(); err != nil {
|
||||||
return false, err
|
return false, xerrors.Errorf("query mining base blocks fail (rows.Err): %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// construct base
|
// construct base
|
||||||
|
@ -180,14 +180,12 @@ func (dbi *DBIndex) StorageAttach(ctx context.Context, si storiface.StorageInfo,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
retryWait := time.Millisecond * 100
|
|
||||||
retryAttachStorage:
|
|
||||||
// Single transaction to attach storage which is not present in the DB
|
// Single transaction to attach storage which is not present in the DB
|
||||||
_, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
_, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||||
var urls sql.NullString
|
var urls sql.NullString
|
||||||
var storageId sql.NullString
|
var storageId sql.NullString
|
||||||
err = tx.QueryRow(
|
err = tx.QueryRow(
|
||||||
"Select storage_id, urls FROM storage_path WHERE storage_id = $1", string(si.ID)).Scan(&storageId, &urls)
|
"SELECT storage_id, urls FROM storage_path WHERE storage_id = $1", string(si.ID)).Scan(&storageId, &urls)
|
||||||
if err != nil && !strings.Contains(err.Error(), "no rows in result set") {
|
if err != nil && !strings.Contains(err.Error(), "no rows in result set") {
|
||||||
return false, xerrors.Errorf("storage attach select fails: %v", err)
|
return false, xerrors.Errorf("storage attach select fails: %v", err)
|
||||||
}
|
}
|
||||||
@ -202,7 +200,7 @@ retryAttachStorage:
|
|||||||
currUrls = union(currUrls, si.URLs)
|
currUrls = union(currUrls, si.URLs)
|
||||||
|
|
||||||
_, err = tx.Exec(
|
_, err = tx.Exec(
|
||||||
"UPDATE storage_path set urls=$1, weight=$2, max_storage=$3, can_seal=$4, can_store=$5, groups=$6, allow_to=$7, allow_types=$8, deny_types=$9 WHERE storage_id=$10",
|
"UPDATE storage_path set urls=$1, weight=$2, max_storage=$3, can_seal=$4, can_store=$5, groups=$6, allow_to=$7, allow_types=$8, deny_types=$9, last_heartbeat=NOW() WHERE storage_id=$10",
|
||||||
strings.Join(currUrls, ","),
|
strings.Join(currUrls, ","),
|
||||||
si.Weight,
|
si.Weight,
|
||||||
si.MaxStorage,
|
si.MaxStorage,
|
||||||
@ -223,7 +221,7 @@ retryAttachStorage:
|
|||||||
// Insert storage id
|
// Insert storage id
|
||||||
_, err = tx.Exec(
|
_, err = tx.Exec(
|
||||||
"INSERT INTO storage_path "+
|
"INSERT INTO storage_path "+
|
||||||
"Values($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)",
|
"Values($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, NOW())",
|
||||||
si.ID,
|
si.ID,
|
||||||
strings.Join(si.URLs, ","),
|
strings.Join(si.URLs, ","),
|
||||||
si.Weight,
|
si.Weight,
|
||||||
@ -238,23 +236,14 @@ retryAttachStorage:
|
|||||||
st.Available,
|
st.Available,
|
||||||
st.FSAvailable,
|
st.FSAvailable,
|
||||||
st.Reserved,
|
st.Reserved,
|
||||||
st.Used,
|
st.Used)
|
||||||
time.Now())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("StorageAttach insert fails: %v", err)
|
return false, xerrors.Errorf("StorageAttach insert fails: %v", err)
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
}, harmonydb.OptionRetry())
|
||||||
if err != nil {
|
|
||||||
if harmonydb.IsErrSerialization(err) {
|
|
||||||
time.Sleep(retryWait)
|
|
||||||
retryWait *= 2
|
|
||||||
goto retryAttachStorage
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dbi *DBIndex) StorageDetach(ctx context.Context, id storiface.ID, url string) error {
|
func (dbi *DBIndex) StorageDetach(ctx context.Context, id storiface.ID, url string) error {
|
||||||
@ -290,8 +279,6 @@ func (dbi *DBIndex) StorageDetach(ctx context.Context, id storiface.ID, url stri
|
|||||||
|
|
||||||
log.Warnw("Dropping sector path endpoint", "path", id, "url", url)
|
log.Warnw("Dropping sector path endpoint", "path", id, "url", url)
|
||||||
} else {
|
} else {
|
||||||
retryWait := time.Millisecond * 100
|
|
||||||
retryDropPath:
|
|
||||||
// Single transaction to drop storage path and sector decls which have this as a storage path
|
// Single transaction to drop storage path and sector decls which have this as a storage path
|
||||||
_, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
_, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||||
// Drop storage path completely
|
// Drop storage path completely
|
||||||
@ -306,40 +293,42 @@ func (dbi *DBIndex) StorageDetach(ctx context.Context, id storiface.ID, url stri
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
}, harmonydb.OptionRetry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if harmonydb.IsErrSerialization(err) {
|
|
||||||
time.Sleep(retryWait)
|
|
||||||
retryWait *= 2
|
|
||||||
goto retryDropPath
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Warnw("Dropping sector storage", "path", id)
|
log.Warnw("Dropping sector storage", "path", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dbi *DBIndex) StorageReportHealth(ctx context.Context, id storiface.ID, report storiface.HealthReport) error {
|
func (dbi *DBIndex) StorageReportHealth(ctx context.Context, id storiface.ID, report storiface.HealthReport) error {
|
||||||
|
retryWait := time.Millisecond * 20
|
||||||
var canSeal, canStore bool
|
retryReportHealth:
|
||||||
err := dbi.harmonyDB.QueryRow(ctx,
|
_, err := dbi.harmonyDB.Exec(ctx,
|
||||||
"SELECT can_seal, can_store FROM storage_path WHERE storage_id=$1", id).Scan(&canSeal, &canStore)
|
"UPDATE storage_path set capacity=$1, available=$2, fs_available=$3, reserved=$4, used=$5, last_heartbeat=NOW() where storage_id=$6",
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("Querying for storage id %s fails with err %v", id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = dbi.harmonyDB.Exec(ctx,
|
|
||||||
"UPDATE storage_path set capacity=$1, available=$2, fs_available=$3, reserved=$4, used=$5, last_heartbeat=$6",
|
|
||||||
report.Stat.Capacity,
|
report.Stat.Capacity,
|
||||||
report.Stat.Available,
|
report.Stat.Available,
|
||||||
report.Stat.FSAvailable,
|
report.Stat.FSAvailable,
|
||||||
report.Stat.Reserved,
|
report.Stat.Reserved,
|
||||||
report.Stat.Used,
|
report.Stat.Used,
|
||||||
time.Now())
|
id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("updating storage health in DB fails with err: %v", err)
|
//return xerrors.Errorf("updating storage health in DB fails with err: %v", err)
|
||||||
|
if harmonydb.IsErrSerialization(err) {
|
||||||
|
time.Sleep(retryWait)
|
||||||
|
retryWait *= 2
|
||||||
|
goto retryReportHealth
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var canSeal, canStore bool
|
||||||
|
err = dbi.harmonyDB.QueryRow(ctx,
|
||||||
|
"SELECT can_seal, can_store FROM storage_path WHERE storage_id=$1", id).Scan(&canSeal, &canStore)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Querying for storage id %s fails with err %v", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if report.Stat.Capacity > 0 {
|
if report.Stat.Capacity > 0 {
|
||||||
@ -386,8 +375,6 @@ func (dbi *DBIndex) StorageDeclareSector(ctx context.Context, storageID storifac
|
|||||||
return xerrors.Errorf("invalid filetype")
|
return xerrors.Errorf("invalid filetype")
|
||||||
}
|
}
|
||||||
|
|
||||||
retryWait := time.Millisecond * 100
|
|
||||||
retryStorageDeclareSector:
|
|
||||||
_, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
_, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
|
||||||
var currPrimary sql.NullBool
|
var currPrimary sql.NullBool
|
||||||
err = tx.QueryRow(
|
err = tx.QueryRow(
|
||||||
@ -420,17 +407,9 @@ retryStorageDeclareSector:
|
|||||||
}
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
}, harmonydb.OptionRetry())
|
||||||
if err != nil {
|
|
||||||
if harmonydb.IsErrSerialization(err) {
|
|
||||||
time.Sleep(retryWait)
|
|
||||||
retryWait *= 2
|
|
||||||
goto retryStorageDeclareSector
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dbi *DBIndex) StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error {
|
func (dbi *DBIndex) StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error {
|
||||||
@ -574,9 +553,9 @@ func (dbi *DBIndex) StorageFindSector(ctx context.Context, s abi.SectorID, ft st
|
|||||||
FROM storage_path
|
FROM storage_path
|
||||||
WHERE can_seal=true
|
WHERE can_seal=true
|
||||||
and available >= $1
|
and available >= $1
|
||||||
and NOW()-last_heartbeat < $2
|
and NOW()-($2 * INTERVAL '1 second') < last_heartbeat
|
||||||
and heartbeat_err is null`,
|
and heartbeat_err is null`,
|
||||||
spaceReq, SkippedHeartbeatThresh)
|
spaceReq, SkippedHeartbeatThresh.Seconds())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("Selecting allowfetch storage paths from DB fails err: %v", err)
|
return nil, xerrors.Errorf("Selecting allowfetch storage paths from DB fails err: %v", err)
|
||||||
}
|
}
|
||||||
@ -713,12 +692,12 @@ func (dbi *DBIndex) StorageBestAlloc(ctx context.Context, allocate storiface.Sec
|
|||||||
deny_types
|
deny_types
|
||||||
FROM storage_path
|
FROM storage_path
|
||||||
WHERE available >= $1
|
WHERE available >= $1
|
||||||
and NOW()-last_heartbeat < $2
|
and NOW()-($2 * INTERVAL '1 second') < last_heartbeat
|
||||||
and heartbeat_err is null
|
and heartbeat_err is null
|
||||||
and ($3 and can_seal = TRUE or $4 and can_store = TRUE)
|
and (($3 and can_seal = TRUE) or ($4 and can_store = TRUE))
|
||||||
order by (available::numeric * weight) desc`,
|
order by (available::numeric * weight) desc`,
|
||||||
spaceReq,
|
spaceReq,
|
||||||
SkippedHeartbeatThresh,
|
SkippedHeartbeatThresh.Seconds(),
|
||||||
pathType == storiface.PathSealing,
|
pathType == storiface.PathSealing,
|
||||||
pathType == storiface.PathStorage,
|
pathType == storiface.PathStorage,
|
||||||
)
|
)
|
||||||
@ -841,7 +820,7 @@ func (dbi *DBIndex) lock(ctx context.Context, sector abi.SectorID, read storifac
|
|||||||
}
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
}, harmonydb.OptionRetry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -55,6 +56,7 @@ func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET")
|
mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET")
|
||||||
mux.HandleFunc("/remote/vanilla/single", handler.generateSingleVanillaProof).Methods("POST")
|
mux.HandleFunc("/remote/vanilla/single", handler.generateSingleVanillaProof).Methods("POST")
|
||||||
|
mux.HandleFunc("/remote/vanilla/porep", handler.generatePoRepVanillaProof).Methods("POST")
|
||||||
mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET")
|
mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET")
|
||||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET")
|
mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET")
|
||||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE")
|
mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE")
|
||||||
@ -312,6 +314,31 @@ func (handler *FetchHandler) generateSingleVanillaProof(w http.ResponseWriter, r
|
|||||||
http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(vanilla))
|
http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(vanilla))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PoRepVanillaParams struct {
|
||||||
|
Sector storiface.SectorRef
|
||||||
|
Sealed cid.Cid
|
||||||
|
Unsealed cid.Cid
|
||||||
|
Ticket abi.SealRandomness
|
||||||
|
Seed abi.InteractiveSealRandomness
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *FetchHandler) generatePoRepVanillaProof(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var params PoRepVanillaParams
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
|
||||||
|
http.Error(w, err.Error(), 500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
vanilla, err := handler.Local.GeneratePoRepVanillaProof(r.Context(), params.Sector, params.Sealed, params.Unsealed, params.Ticket, params.Seed)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), 500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/octet-stream")
|
||||||
|
http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(vanilla))
|
||||||
|
}
|
||||||
|
|
||||||
func FileTypeFromString(t string) (storiface.SectorFileType, error) {
|
func FileTypeFromString(t string) (storiface.SectorFileType, error) {
|
||||||
switch t {
|
switch t {
|
||||||
case storiface.FTUnsealed.String():
|
case storiface.FTUnsealed.String():
|
||||||
|
@ -4,6 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||||
@ -48,4 +50,5 @@ type Store interface {
|
|||||||
Reserve(ctx context.Context, sid storiface.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error)
|
Reserve(ctx context.Context, sid storiface.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error)
|
||||||
|
|
||||||
GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, si storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error)
|
GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, si storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error)
|
||||||
|
GeneratePoRepVanillaProof(ctx context.Context, sr storiface.SectorRef, sealed, unsealed cid.Cid, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
@ -809,4 +810,27 @@ func (st *Local) GenerateSingleVanillaProof(ctx context.Context, minerID abi.Act
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (st *Local) GeneratePoRepVanillaProof(ctx context.Context, sr storiface.SectorRef, sealed, unsealed cid.Cid, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness) ([]byte, error) {
|
||||||
|
src, _, err := st.AcquireSector(ctx, sr, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("acquire sector: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if src.Sealed == "" || src.Cache == "" {
|
||||||
|
return nil, errPathNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize, err := sr.ProofType.SectorSize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting sector size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
secPiece := []abi.PieceInfo{{
|
||||||
|
Size: abi.PaddedPieceSize(ssize),
|
||||||
|
PieceCID: unsealed,
|
||||||
|
}}
|
||||||
|
|
||||||
|
return ffi.SealCommitPhase1(sr.ProofType, sealed, unsealed, src.Cache, src.Sealed, sr.ID.Number, sr.ID.Miner, ticket, seed, secPiece)
|
||||||
|
}
|
||||||
|
|
||||||
var _ Store = &Local{}
|
var _ Store = &Local{}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
|
||||||
abi "github.com/filecoin-project/go-state-types/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
@ -70,6 +71,21 @@ func (mr *MockStoreMockRecorder) FsStat(arg0, arg1 interface{}) *gomock.Call {
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FsStat", reflect.TypeOf((*MockStore)(nil).FsStat), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FsStat", reflect.TypeOf((*MockStore)(nil).FsStat), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GeneratePoRepVanillaProof mocks base method.
|
||||||
|
func (m *MockStore) GeneratePoRepVanillaProof(arg0 context.Context, arg1 storiface.SectorRef, arg2, arg3 cid.Cid, arg4 abi.SealRandomness, arg5 abi.InteractiveSealRandomness) ([]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GeneratePoRepVanillaProof", arg0, arg1, arg2, arg3, arg4, arg5)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GeneratePoRepVanillaProof indicates an expected call of GeneratePoRepVanillaProof.
|
||||||
|
func (mr *MockStoreMockRecorder) GeneratePoRepVanillaProof(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GeneratePoRepVanillaProof", reflect.TypeOf((*MockStore)(nil).GeneratePoRepVanillaProof), arg0, arg1, arg2, arg3, arg4, arg5)
|
||||||
|
}
|
||||||
|
|
||||||
// GenerateSingleVanillaProof mocks base method.
|
// GenerateSingleVanillaProof mocks base method.
|
||||||
func (m *MockStore) GenerateSingleVanillaProof(arg0 context.Context, arg1 abi.ActorID, arg2 storiface.PostSectorChallenge, arg3 abi.RegisteredPoStProof) ([]byte, error) {
|
func (m *MockStore) GenerateSingleVanillaProof(arg0 context.Context, arg1 abi.ActorID, arg2 storiface.PostSectorChallenge, arg3 abi.RegisteredPoStProof) ([]byte, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
@ -17,6 +17,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
@ -397,46 +398,48 @@ func (r *Remote) FsStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, er
|
|||||||
return fsutil.FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id)
|
return fsutil.FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
rl, err := url.Parse(si.URLs[0])
|
for _, urlStr := range si.URLs {
|
||||||
if err != nil {
|
rl, err := url.Parse(urlStr)
|
||||||
return fsutil.FsStat{}, xerrors.Errorf("failed to parse url: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rl.Path = gopath.Join(rl.Path, "stat", string(id))
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", rl.String(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return fsutil.FsStat{}, xerrors.Errorf("request: %w", err)
|
|
||||||
}
|
|
||||||
req.Header = r.auth
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return fsutil.FsStat{}, xerrors.Errorf("do request: %w", err)
|
|
||||||
}
|
|
||||||
switch resp.StatusCode {
|
|
||||||
case 200:
|
|
||||||
break
|
|
||||||
case 404:
|
|
||||||
return fsutil.FsStat{}, errPathNotFound
|
|
||||||
case 500:
|
|
||||||
b, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err)
|
log.Warnw("failed to parse URL", "url", urlStr, "error", err)
|
||||||
|
continue // Try the next URL
|
||||||
}
|
}
|
||||||
|
|
||||||
return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b))
|
rl.Path = gopath.Join(rl.Path, "stat", string(id))
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", rl.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnw("creating request failed", "url", rl.String(), "error", err)
|
||||||
|
continue // Try the next URL
|
||||||
|
}
|
||||||
|
req.Header = r.auth
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnw("request failed", "url", rl.String(), "error", err)
|
||||||
|
continue // Try the next URL
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode == 200 {
|
||||||
|
var out fsutil.FsStat
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
log.Warnw("decoding response failed", "url", rl.String(), "error", err)
|
||||||
|
continue // Try the next URL
|
||||||
|
}
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
return out, nil // Successfully decoded, return the result
|
||||||
|
}
|
||||||
|
|
||||||
|
// non-200 status code
|
||||||
|
b, _ := io.ReadAll(resp.Body) // Best-effort read the body for logging
|
||||||
|
log.Warnw("request to endpoint failed", "url", rl.String(), "statusCode", resp.StatusCode, "response", string(b))
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
// Continue to try the next URL, don't return here as we want to try all URLs
|
||||||
}
|
}
|
||||||
|
|
||||||
var out fsutil.FsStat
|
return fsutil.FsStat{}, xerrors.Errorf("all endpoints failed for remote storage %s", id)
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
|
|
||||||
return fsutil.FsStat{}, xerrors.Errorf("decoding fsstat: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resp.Body.Close() // nolint
|
|
||||||
|
|
||||||
return out, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) readRemote(ctx context.Context, url string, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) {
|
func (r *Remote) readRemote(ctx context.Context, url string, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) {
|
||||||
@ -782,13 +785,17 @@ func (r *Remote) GenerateSingleVanillaProof(ctx context.Context, minerID abi.Act
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
merr := xerrors.Errorf("sector not found")
|
||||||
|
|
||||||
for _, info := range si {
|
for _, info := range si {
|
||||||
for _, u := range info.BaseURLs {
|
for _, u := range info.BaseURLs {
|
||||||
url := fmt.Sprintf("%s/vanilla/single", u)
|
url := fmt.Sprintf("%s/vanilla/single", u)
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", url, strings.NewReader(string(jreq)))
|
req, err := http.NewRequest("POST", url, strings.NewReader(string(jreq)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("request: %w", err)
|
merr = multierror.Append(merr, xerrors.Errorf("request: %w", err))
|
||||||
|
log.Warnw("GenerateSingleVanillaProof request failed", "url", url, "error", err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.auth != nil {
|
if r.auth != nil {
|
||||||
@ -798,7 +805,9 @@ func (r *Remote) GenerateSingleVanillaProof(ctx context.Context, minerID abi.Act
|
|||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
resp, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("do request: %w", err)
|
merr = multierror.Append(merr, xerrors.Errorf("do request: %w", err))
|
||||||
|
log.Warnw("GenerateSingleVanillaProof do request failed", "url", url, "error", err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
@ -808,14 +817,18 @@ func (r *Remote) GenerateSingleVanillaProof(ctx context.Context, minerID abi.Act
|
|||||||
}
|
}
|
||||||
body, err := io.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("resp.Body ReadAll: %w", err)
|
merr = multierror.Append(merr, xerrors.Errorf("resp.Body ReadAll: %w", err))
|
||||||
|
log.Warnw("GenerateSingleVanillaProof read response body failed", "url", url, "error", err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
if err := resp.Body.Close(); err != nil {
|
||||||
log.Error("response close: ", err)
|
log.Error("response close: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, xerrors.Errorf("non-200 code from %s: '%s'", url, strings.TrimSpace(string(body)))
|
merr = multierror.Append(merr, xerrors.Errorf("non-200 code from %s: '%s'", url, strings.TrimSpace(string(body))))
|
||||||
|
log.Warnw("GenerateSingleVanillaProof non-200 code from remote", "code", resp.StatusCode, "url", url, "body", string(body))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
@ -824,17 +837,109 @@ func (r *Remote) GenerateSingleVanillaProof(ctx context.Context, minerID abi.Act
|
|||||||
log.Error("response close: ", err)
|
log.Error("response close: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, xerrors.Errorf("resp.Body ReadAll: %w", err)
|
merr = multierror.Append(merr, xerrors.Errorf("resp.Body ReadAll: %w", err))
|
||||||
|
log.Warnw("GenerateSingleVanillaProof read response body failed", "url", url, "error", err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
|
||||||
return body, nil
|
return body, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, xerrors.Errorf("sector not found")
|
return nil, merr
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Store = &Remote{}
|
func (r *Remote) GeneratePoRepVanillaProof(ctx context.Context, sr storiface.SectorRef, sealed, unsealed cid.Cid, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness) ([]byte, error) {
|
||||||
|
// Attempt to generate the proof locally first
|
||||||
|
p, err := r.local.GeneratePoRepVanillaProof(ctx, sr, sealed, unsealed, ticket, seed)
|
||||||
|
if err != errPathNotFound {
|
||||||
|
return p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define the file types to look for based on the sector's state
|
||||||
|
ft := storiface.FTSealed | storiface.FTCache
|
||||||
|
|
||||||
|
// Find sector information
|
||||||
|
si, err := r.index.StorageFindSector(ctx, sr.ID, ft, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("finding sector %d failed: %w", sr.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare request parameters
|
||||||
|
requestParams := PoRepVanillaParams{
|
||||||
|
Sector: sr,
|
||||||
|
Sealed: sealed,
|
||||||
|
Unsealed: unsealed,
|
||||||
|
Ticket: ticket,
|
||||||
|
Seed: seed,
|
||||||
|
}
|
||||||
|
jreq, err := json.Marshal(requestParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
merr := xerrors.Errorf("sector not found")
|
||||||
|
|
||||||
|
// Iterate over all found sector locations
|
||||||
|
for _, info := range si {
|
||||||
|
for _, u := range info.BaseURLs {
|
||||||
|
url := fmt.Sprintf("%s/vanilla/porep", u)
|
||||||
|
|
||||||
|
// Create and send the request
|
||||||
|
req, err := http.NewRequest("POST", url, strings.NewReader(string(jreq)))
|
||||||
|
if err != nil {
|
||||||
|
merr = multierror.Append(merr, xerrors.Errorf("request: %w", err))
|
||||||
|
log.Warnw("GeneratePoRepVanillaProof request failed", "url", url, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set auth headers if available
|
||||||
|
if r.auth != nil {
|
||||||
|
req.Header = r.auth.Clone()
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
|
// Execute the request
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
merr = multierror.Append(merr, xerrors.Errorf("do request: %w", err))
|
||||||
|
log.Warnw("GeneratePoRepVanillaProof do request failed", "url", url, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle non-OK status codes
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
|
log.Debugw("reading vanilla proof from remote not-found response", "url", url, "store", info.ID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
merr = multierror.Append(merr, xerrors.Errorf("non-200 code from %s: '%s'", url, strings.TrimSpace(string(body))))
|
||||||
|
log.Warnw("GeneratePoRepVanillaProof non-200 code from remote", "code", resp.StatusCode, "url", url, "body", string(body))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the response body
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
merr = multierror.Append(merr, xerrors.Errorf("resp.Body ReadAll: %w", err))
|
||||||
|
log.Warnw("GeneratePoRepVanillaProof read response body failed", "url", url, "error", err)
|
||||||
|
}
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
|
||||||
|
// Return the proof if successful
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the accumulated error if the proof was not generated
|
||||||
|
return nil, merr
|
||||||
|
}
|
||||||
|
|
||||||
type funcCloser func() error
|
type funcCloser func() error
|
||||||
|
|
||||||
|
@ -520,7 +520,7 @@ func (sb *Sealer) regenerateSectorKey(ctx context.Context, sector storiface.Sect
|
|||||||
// prepare SDR params
|
// prepare SDR params
|
||||||
commp, err := commcid.CIDToDataCommitmentV1(keyDataCid)
|
commp, err := commcid.CIDToDataCommitmentV1(keyDataCid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("computing commP: %w", err)
|
return xerrors.Errorf("computing commK: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commp)
|
replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commp)
|
||||||
|
@ -69,6 +69,10 @@ func Pad(in, out []byte) {
|
|||||||
pad(in, out)
|
pad(in, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PadSingle(in, out []byte) {
|
||||||
|
pad(in, out)
|
||||||
|
}
|
||||||
|
|
||||||
func pad(in, out []byte) {
|
func pad(in, out []byte) {
|
||||||
chunks := len(out) / 128
|
chunks := len(out) / 128
|
||||||
for chunk := 0; chunk < chunks; chunk++ {
|
for chunk := 0; chunk < chunks; chunk++ {
|
||||||
|
@ -102,15 +102,37 @@ func OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialFil
|
|||||||
return nil, xerrors.Errorf("openning partial file '%s': %w", path, err)
|
return nil, xerrors.Errorf("openning partial file '%s': %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
st, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("stat '%s': %w", path, err)
|
||||||
|
}
|
||||||
|
if st.Size() < int64(maxPieceSize) {
|
||||||
|
return nil, xerrors.Errorf("sector file '%s' was smaller than the sector size %d < %d", path, st.Size(), maxPieceSize)
|
||||||
|
}
|
||||||
|
if st.Size() == int64(maxPieceSize) {
|
||||||
|
log.Debugw("no partial file trailer, assuming fully allocated", "path", path)
|
||||||
|
|
||||||
|
allAlloc := &rlepluslazy.RunSliceIterator{Runs: []rlepluslazy.Run{{Val: true, Len: uint64(maxPieceSize)}}}
|
||||||
|
enc, err := rlepluslazy.EncodeRuns(allAlloc, []byte{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("encoding full allocation: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rle, err := rlepluslazy.FromBuf(enc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("decoding full allocation: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &PartialFile{
|
||||||
|
maxPiece: maxPieceSize,
|
||||||
|
path: path,
|
||||||
|
allocated: rle,
|
||||||
|
file: f,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var rle rlepluslazy.RLE
|
var rle rlepluslazy.RLE
|
||||||
err = func() error {
|
err = func() error {
|
||||||
st, err := f.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("stat '%s': %w", path, err)
|
|
||||||
}
|
|
||||||
if st.Size() < int64(maxPieceSize) {
|
|
||||||
return xerrors.Errorf("sector file '%s' was smaller than the sector size %d < %d", path, st.Size(), maxPieceSize)
|
|
||||||
}
|
|
||||||
// read trailer
|
// read trailer
|
||||||
var tlen [4]byte
|
var tlen [4]byte
|
||||||
_, err = f.ReadAt(tlen[:], st.Size()-int64(len(tlen)))
|
_, err = f.ReadAt(tlen[:], st.Size()-int64(len(tlen)))
|
||||||
|
@ -6,7 +6,8 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
)
|
)
|
||||||
|
|
||||||
var dataFilePrefix = "sc-02-data-"
|
const dataFilePrefix = "sc-02-data-"
|
||||||
|
const TreeDName = dataFilePrefix + "tree-d.dat"
|
||||||
|
|
||||||
func LayerFileName(layer int) string {
|
func LayerFileName(layer int) string {
|
||||||
return fmt.Sprintf("%slayer-%d.dat", dataFilePrefix, layer)
|
return fmt.Sprintf("%slayer-%d.dat", dataFilePrefix, layer)
|
||||||
|
@ -28,8 +28,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache}
|
|
||||||
|
|
||||||
type WorkerConfig struct {
|
type WorkerConfig struct {
|
||||||
TaskTypes []sealtasks.TaskType
|
TaskTypes []sealtasks.TaskType
|
||||||
NoSwap bool
|
NoSwap bool
|
||||||
@ -167,7 +165,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor
|
|||||||
return paths, func() {
|
return paths, func() {
|
||||||
releaseStorage()
|
releaseStorage()
|
||||||
|
|
||||||
for _, fileType := range pathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&allocate == 0 {
|
if fileType&allocate == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -180,16 +178,16 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *localWorkerPathProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||||
|
return (&localWorkerPathProvider{w: l.w, op: storiface.AcquireCopy}).AcquireSector(ctx, id, existing, allocate, ptype)
|
||||||
|
}
|
||||||
|
|
||||||
func FFIExec(opts ...ffiwrapper.FFIWrapperOpt) func(l *LocalWorker) (storiface.Storage, error) {
|
func FFIExec(opts ...ffiwrapper.FFIWrapperOpt) func(l *LocalWorker) (storiface.Storage, error) {
|
||||||
return func(l *LocalWorker) (storiface.Storage, error) {
|
return func(l *LocalWorker) (storiface.Storage, error) {
|
||||||
return ffiwrapper.New(&localWorkerPathProvider{w: l}, opts...)
|
return ffiwrapper.New(&localWorkerPathProvider{w: l}, opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *localWorkerPathProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
|
||||||
return (&localWorkerPathProvider{w: l.w, op: storiface.AcquireCopy}).AcquireSector(ctx, id, existing, allocate, ptype)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ReturnType string
|
type ReturnType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
Loading…
Reference in New Issue
Block a user