lotus/lib/sectorbuilder/sectorbuilder.go

451 lines
9.5 KiB
Go
Raw Normal View History

2019-07-27 00:45:27 +00:00
package sectorbuilder
import (
"fmt"
2019-09-23 10:50:28 +00:00
"io"
"os"
"strconv"
"sync"
2019-11-21 16:10:04 +00:00
"sync/atomic"
2019-07-27 00:45:27 +00:00
"unsafe"
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log"
2019-11-04 17:36:29 +00:00
"golang.org/x/xerrors"
2019-08-06 22:04:21 +00:00
"github.com/filecoin-project/lotus/chain/address"
"github.com/filecoin-project/lotus/node/modules/dtypes"
2019-07-27 00:45:27 +00:00
)
2019-11-04 17:36:29 +00:00
const PoStReservedWorkers = 1
2019-11-07 16:39:27 +00:00
const PoRepProofPartitions = 2
2019-11-04 17:36:29 +00:00
var lastSectorIdKey = datastore.NewKey("/sectorbuilder/last")
var log = logging.Logger("sectorbuilder")
type SectorSealingStatus = sectorbuilder.SectorSealingStatus
type StagedSectorMetadata = sectorbuilder.StagedSectorMetadata
2019-09-18 03:32:52 +00:00
type SortedSectorInfo = sectorbuilder.SortedSectorInfo
type SectorInfo = sectorbuilder.SectorInfo
2019-10-27 08:56:53 +00:00
type SealTicket = sectorbuilder.SealTicket
2019-10-30 18:10:29 +00:00
type SealSeed = sectorbuilder.SealSeed
type SealPreCommitOutput = sectorbuilder.SealPreCommitOutput
2019-10-30 18:10:29 +00:00
type SealCommitOutput = sectorbuilder.SealCommitOutput
type PublicPieceInfo = sectorbuilder.PublicPieceInfo
2019-10-27 08:56:53 +00:00
2019-11-07 16:39:27 +00:00
type RawSealPreCommitOutput = sectorbuilder.RawSealPreCommitOutput
2019-07-27 00:45:27 +00:00
const CommLen = sectorbuilder.CommitmentBytesLen
type SectorBuilder struct {
handle unsafe.Pointer
ds dtypes.MetadataDS
idLk sync.Mutex
ssize uint64
2019-11-04 17:36:29 +00:00
Miner address.Address
2019-11-07 16:39:27 +00:00
stagedDir string
sealedDir string
cacheDir string
2019-11-21 00:52:59 +00:00
sealLocal bool
2019-11-04 17:36:29 +00:00
rateLimit chan struct{}
2019-11-21 00:52:59 +00:00
sealTasks chan workerCall
2019-11-21 16:10:04 +00:00
taskCtr uint64
2019-11-21 00:52:59 +00:00
remoteLk sync.Mutex
2019-11-21 18:38:43 +00:00
remoteCtr int
remotes map[int]*remote
2019-11-21 00:52:59 +00:00
remoteResults map[uint64]chan<- SealRes
stopping chan struct{}
}
type SealRes struct {
Err error `json:"omitempty"`
Proof []byte `json:"omitempty"`
Rspco RawSealPreCommitOutput `json:"omitempty"`
}
type remote struct {
lk sync.Mutex
sealTasks chan<- WorkerTask
busy uint64 // only for metrics
2019-07-27 00:45:27 +00:00
}
type Config struct {
SectorSize uint64
Miner address.Address
WorkerThreads uint8
CacheDir string
2019-07-27 00:45:27 +00:00
SealedDir string
StagedDir string
MetadataDir string
}
func New(cfg *Config, ds dtypes.MetadataDS) (*SectorBuilder, error) {
2019-11-21 00:52:59 +00:00
if cfg.WorkerThreads < PoStReservedWorkers {
return nil, xerrors.Errorf("minimum worker threads is %d, specified %d", PoStReservedWorkers, cfg.WorkerThreads)
2019-11-04 17:36:29 +00:00
}
proverId := addressToProverID(cfg.Miner)
2019-07-27 00:45:27 +00:00
for _, dir := range []string{cfg.StagedDir, cfg.SealedDir, cfg.CacheDir, cfg.MetadataDir} {
if err := os.Mkdir(dir, 0755); err != nil {
if os.IsExist(err) {
continue
}
return nil, err
}
}
var lastUsedID uint64
b, err := ds.Get(lastSectorIdKey)
switch err {
case nil:
i, err := strconv.ParseInt(string(b), 10, 64)
if err != nil {
return nil, err
}
lastUsedID = uint64(i)
case datastore.ErrNotFound:
default:
return nil, err
}
sbp, err := sectorbuilder.InitSectorBuilder(cfg.SectorSize, PoRepProofPartitions, lastUsedID, cfg.MetadataDir, proverId, cfg.SealedDir, cfg.StagedDir, cfg.CacheDir, 16, cfg.WorkerThreads)
2019-07-27 00:45:27 +00:00
if err != nil {
return nil, err
}
2019-11-21 00:52:59 +00:00
rlimit := cfg.WorkerThreads - PoStReservedWorkers
sealLocal := rlimit > 0
if rlimit == 0 {
rlimit = 1
}
sb := &SectorBuilder{
2019-11-06 23:09:48 +00:00
handle: sbp,
ds: ds,
ssize: cfg.SectorSize,
2019-11-06 23:09:48 +00:00
2019-11-07 16:39:27 +00:00
stagedDir: cfg.StagedDir,
sealedDir: cfg.SealedDir,
cacheDir: cfg.CacheDir,
2019-11-21 00:52:59 +00:00
Miner: cfg.Miner,
sealLocal: sealLocal,
rateLimit: make(chan struct{}, rlimit),
2019-11-21 16:10:04 +00:00
taskCtr: 1,
2019-11-21 00:52:59 +00:00
sealTasks: make(chan workerCall),
remoteResults: map[uint64]chan<- SealRes{},
2019-11-21 18:38:43 +00:00
remotes: map[int]*remote{},
stopping: make(chan struct{}),
}
return sb, nil
2019-07-27 00:45:27 +00:00
}
func NewStandalone(cfg *Config) (*SectorBuilder, error) {
for _, dir := range []string{cfg.StagedDir, cfg.SealedDir, cfg.CacheDir, cfg.MetadataDir} {
2019-11-21 18:38:43 +00:00
if err := os.MkdirAll(dir, 0755); err != nil {
if os.IsExist(err) {
continue
}
return nil, err
}
}
return &SectorBuilder{
handle: nil,
ds: nil,
ssize: cfg.SectorSize,
Miner: cfg.Miner,
stagedDir: cfg.StagedDir,
sealedDir: cfg.SealedDir,
cacheDir: cfg.CacheDir,
2019-11-21 16:10:04 +00:00
sealLocal: true,
2019-11-21 16:10:04 +00:00
taskCtr: 1,
2019-11-21 18:38:43 +00:00
remotes: map[int]*remote{},
rateLimit: make(chan struct{}, cfg.WorkerThreads),
stopping: make(chan struct{}),
}, nil
}
2019-11-21 16:10:04 +00:00
func (sb *SectorBuilder) checkRateLimit() {
2019-11-04 17:36:29 +00:00
if cap(sb.rateLimit) == len(sb.rateLimit) {
2019-11-21 16:10:04 +00:00
log.Warn("rate-limiting local sectorbuilder call")
2019-11-04 17:36:29 +00:00
}
2019-11-21 16:10:04 +00:00
}
func (sb *SectorBuilder) RateLimit() func() {
sb.checkRateLimit()
2019-11-04 17:36:29 +00:00
sb.rateLimit <- struct{}{}
return func() {
<-sb.rateLimit
}
}
2019-11-21 16:23:42 +00:00
type WorkerStats struct {
2019-11-21 18:38:43 +00:00
LocalFree int
2019-11-21 16:23:42 +00:00
LocalReserved int
2019-11-21 18:38:43 +00:00
LocalTotal int
2019-11-21 16:23:42 +00:00
// todo: post in progress
RemotesTotal int
2019-11-21 18:38:43 +00:00
RemotesFree int
2019-11-21 16:23:42 +00:00
}
func (sb *SectorBuilder) WorkerStats() WorkerStats {
sb.remoteLk.Lock()
defer sb.remoteLk.Unlock()
remoteFree := len(sb.remotes)
for _, r := range sb.remotes {
if r.busy > 0 {
remoteFree--
}
}
return WorkerStats{
LocalFree: cap(sb.rateLimit) - len(sb.rateLimit),
LocalReserved: PoStReservedWorkers,
LocalTotal: cap(sb.rateLimit) + PoStReservedWorkers,
RemotesTotal: len(sb.remotes),
RemotesFree: remoteFree,
}
2019-11-08 18:15:13 +00:00
}
2019-10-21 11:58:41 +00:00
func addressToProverID(a address.Address) [32]byte {
var proverId [32]byte
copy(proverId[:], a.Payload())
return proverId
}
2019-07-27 00:45:27 +00:00
func (sb *SectorBuilder) Destroy() {
sectorbuilder.DestroySectorBuilder(sb.handle)
}
2019-11-07 16:39:27 +00:00
func (sb *SectorBuilder) AcquireSectorId() (uint64, error) {
sb.idLk.Lock()
defer sb.idLk.Unlock()
id, err := sectorbuilder.AcquireSectorId(sb.handle)
if err != nil {
return 0, err
}
err = sb.ds.Put(lastSectorIdKey, []byte(fmt.Sprint(id)))
if err != nil {
return 0, err
}
return id, nil
2019-11-07 16:39:27 +00:00
}
func (sb *SectorBuilder) AddPiece(pieceSize uint64, sectorId uint64, file io.Reader, existingPieceSizes []uint64) (PublicPieceInfo, error) {
ret := sb.RateLimit()
defer ret()
2019-09-23 10:50:28 +00:00
f, werr, err := toReadableFile(file, int64(pieceSize))
if err != nil {
2019-11-07 16:39:27 +00:00
return PublicPieceInfo{}, err
2019-09-23 10:50:28 +00:00
}
2019-11-07 16:39:27 +00:00
stagedFile, err := sb.stagedSectorFile(sectorId)
2019-09-23 10:50:28 +00:00
if err != nil {
2019-11-07 16:39:27 +00:00
return PublicPieceInfo{}, err
}
_, _, commP, err := sectorbuilder.StandaloneWriteWithAlignment(f, pieceSize, stagedFile, existingPieceSizes)
2019-11-07 16:39:27 +00:00
if err != nil {
return PublicPieceInfo{}, err
}
if err := stagedFile.Close(); err != nil {
return PublicPieceInfo{}, err
}
if err := f.Close(); err != nil {
return PublicPieceInfo{}, err
2019-09-23 10:50:28 +00:00
}
2019-11-07 16:39:27 +00:00
return PublicPieceInfo{
Size: pieceSize,
CommP: commP,
}, werr()
2019-07-27 00:45:27 +00:00
}
// TODO: should *really really* return an io.ReadCloser
func (sb *SectorBuilder) ReadPieceFromSealedSector(pieceKey string) ([]byte, error) {
ret := sb.RateLimit()
2019-11-04 17:36:29 +00:00
defer ret()
2019-07-27 00:45:27 +00:00
return sectorbuilder.ReadPieceFromSealedSector(sb.handle, pieceKey)
}
2019-11-21 16:10:04 +00:00
func (sb *SectorBuilder) sealPreCommitRemote(call workerCall) (RawSealPreCommitOutput, error) {
select {
case ret := <-call.ret:
return ret.Rspco, ret.Err
case <-sb.stopping:
return RawSealPreCommitOutput{}, xerrors.New("sectorbuilder stopped")
}
}
2019-11-07 16:39:27 +00:00
func (sb *SectorBuilder) SealPreCommit(sectorID uint64, ticket SealTicket, pieces []PublicPieceInfo) (RawSealPreCommitOutput, error) {
2019-11-21 16:10:04 +00:00
call := workerCall{
task: WorkerTask{
Type: WorkerPreCommit,
TaskID: atomic.AddUint64(&sb.taskCtr, 1),
SectorID: sectorID,
SealTicket: ticket,
Pieces: pieces,
},
ret: make(chan SealRes),
}
select { // prefer remote
case sb.sealTasks <- call:
return sb.sealPreCommitRemote(call)
default:
}
sb.checkRateLimit()
select { // use whichever is available
case sb.sealTasks <- call:
return sb.sealPreCommitRemote(call)
case sb.rateLimit <- struct{}{}:
}
// local
defer func() {
<-sb.rateLimit
}()
2019-11-04 17:36:29 +00:00
2019-11-07 16:39:27 +00:00
cacheDir, err := sb.sectorCacheDir(sectorID)
if err != nil {
return RawSealPreCommitOutput{}, err
}
2019-11-21 00:52:59 +00:00
sealedPath, err := sb.SealedSectorPath(sectorID)
2019-11-07 16:39:27 +00:00
if err != nil {
return RawSealPreCommitOutput{}, err
}
2019-11-04 17:36:29 +00:00
2019-11-07 19:54:24 +00:00
var sum uint64
for _, piece := range pieces {
sum += piece.Size
}
ussize := UserBytesForSectorSize(sb.ssize)
if sum != ussize {
return RawSealPreCommitOutput{}, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum))
2019-11-07 19:54:24 +00:00
}
2019-11-21 00:52:59 +00:00
stagedPath := sb.StagedSectorPath(sectorID)
2019-11-07 19:54:24 +00:00
rspco, err := sectorbuilder.StandaloneSealPreCommit(
2019-11-07 16:39:27 +00:00
sb.ssize,
PoRepProofPartitions,
cacheDir,
2019-11-07 19:54:24 +00:00
stagedPath,
2019-11-07 16:39:27 +00:00
sealedPath,
sectorID,
addressToProverID(sb.Miner),
ticket.TicketBytes,
pieces,
)
2019-11-07 19:54:24 +00:00
if err != nil {
return RawSealPreCommitOutput{}, xerrors.Errorf("presealing sector %d (%s): %w", sectorID, stagedPath, err)
}
return rspco, nil
2019-07-27 00:45:27 +00:00
}
2019-11-07 16:39:27 +00:00
func (sb *SectorBuilder) SealCommit(sectorID uint64, ticket SealTicket, seed SealSeed, pieces []PublicPieceInfo, pieceKeys []string, rspco RawSealPreCommitOutput) (proof []byte, err error) {
ret := sb.RateLimit()
2019-11-04 17:36:29 +00:00
defer ret()
2019-11-07 16:39:27 +00:00
cacheDir, err := sb.sectorCacheDir(sectorID)
if err != nil {
return nil, err
}
proof, err = sectorbuilder.StandaloneSealCommit(
sb.ssize,
PoRepProofPartitions,
cacheDir,
sectorID,
addressToProverID(sb.Miner),
ticket.TicketBytes,
seed.TicketBytes,
pieces,
rspco,
)
if err != nil {
return nil, xerrors.Errorf("StandaloneSealCommit: %w", err)
}
pmeta := make([]sectorbuilder.PieceMetadata, len(pieces))
for i, piece := range pieces {
pmeta[i] = sectorbuilder.PieceMetadata{
Key: pieceKeys[i],
Size: piece.Size,
CommP: piece.CommP,
}
}
2019-11-21 00:52:59 +00:00
sealedPath, err := sb.SealedSectorPath(sectorID)
2019-11-07 16:39:27 +00:00
if err != nil {
return nil, err
}
err = sectorbuilder.ImportSealedSector(
sb.handle,
sectorID,
cacheDir,
sealedPath,
ticket,
seed,
rspco.CommR,
rspco.CommD,
rspco.CommC,
rspco.CommRLast,
proof,
pmeta,
)
if err != nil {
return nil, xerrors.Errorf("ImportSealedSector: %w", err)
}
return proof, nil
}
2019-11-21 00:52:59 +00:00
func (sb *SectorBuilder) Stop() {
close(sb.stopping)
}