seal-worker: Handle cache
This commit is contained in:
parent
ed9279cf0c
commit
89556819ae
@ -2,8 +2,11 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
files "github.com/ipfs/go-ipfs-files"
|
||||||
"gopkg.in/cheggaaa/pb.v1"
|
"gopkg.in/cheggaaa/pb.v1"
|
||||||
"io"
|
"io"
|
||||||
|
"mime"
|
||||||
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -64,7 +67,7 @@ func acceptJobs(ctx context.Context, api api.StorageMiner, endpoint string, auth
|
|||||||
|
|
||||||
res := w.processTask(ctx, task)
|
res := w.processTask(ctx, task)
|
||||||
|
|
||||||
log.Infof("Task %d done, err: %s", task.TaskID, res.Err)
|
log.Infof("Task %d done, err: %+v", task.TaskID, res.GoErr)
|
||||||
|
|
||||||
if err := api.WorkerDone(ctx, task.TaskID, res); err != nil {
|
if err := api.WorkerDone(ctx, task.TaskID, res); err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
@ -97,11 +100,13 @@ func (w *worker) processTask(ctx context.Context, task sectorbuilder.WorkerTask)
|
|||||||
}
|
}
|
||||||
res.Rspco = rspco.ToJson()
|
res.Rspco = rspco.ToJson()
|
||||||
|
|
||||||
// TODO: push cache
|
|
||||||
|
|
||||||
if err := w.push("sealed", task.SectorID); err != nil {
|
if err := w.push("sealed", task.SectorID); err != nil {
|
||||||
return errRes(xerrors.Errorf("pushing precommited data: %w", err))
|
return errRes(xerrors.Errorf("pushing precommited data: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := w.push("cache", task.SectorID); err != nil {
|
||||||
|
return errRes(xerrors.Errorf("pushing precommited data: %w", err))
|
||||||
|
}
|
||||||
case sectorbuilder.WorkerCommit:
|
case sectorbuilder.WorkerCommit:
|
||||||
proof, err := w.sb.SealCommit(task.SectorID, task.SealTicket, task.SealSeed, task.Pieces, task.Rspco)
|
proof, err := w.sb.SealCommit(task.SectorID, task.SealTicket, task.SealSeed, task.Pieces, task.Rspco)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -110,7 +115,9 @@ func (w *worker) processTask(ctx context.Context, task sectorbuilder.WorkerTask)
|
|||||||
|
|
||||||
res.Proof = proof
|
res.Proof = proof
|
||||||
|
|
||||||
// TODO: Push cache
|
if err := w.push("cache", task.SectorID); err != nil {
|
||||||
|
return errRes(xerrors.Errorf("pushing precommited data: %w", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return res
|
return res
|
||||||
@ -120,56 +127,82 @@ func (w *worker) fetch(typ string, sectorID uint64) error {
|
|||||||
outname := filepath.Join(w.repo, typ, w.sb.SectorName(sectorID))
|
outname := filepath.Join(w.repo, typ, w.sb.SectorName(sectorID))
|
||||||
|
|
||||||
url := w.minerEndpoint + "/remote/" + typ + "/" + w.sb.SectorName(sectorID)
|
url := w.minerEndpoint + "/remote/" + typ + "/" + w.sb.SectorName(sectorID)
|
||||||
log.Infof("Fetch %s", url)
|
log.Infof("Fetch %s %s", typ, url)
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return xerrors.Errorf("request: %w", err)
|
||||||
}
|
}
|
||||||
req.Header = w.auth
|
req.Header = w.auth
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
resp, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return xerrors.Errorf("do request: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
out, err := os.Create(outname)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer out.Close()
|
|
||||||
|
|
||||||
bar := pb.New64(resp.ContentLength)
|
bar := pb.New64(resp.ContentLength)
|
||||||
bar.ShowPercent = true
|
bar.ShowPercent = true
|
||||||
bar.ShowSpeed = true
|
bar.ShowSpeed = true
|
||||||
bar.Units = pb.U_BYTES
|
bar.Units = pb.U_BYTES
|
||||||
|
|
||||||
|
barreader := bar.NewProxyReader(resp.Body)
|
||||||
|
|
||||||
bar.Start()
|
bar.Start()
|
||||||
defer bar.Finish()
|
defer bar.Finish()
|
||||||
|
|
||||||
_, err = io.Copy(out, bar.NewProxyReader(resp.Body))
|
mediatype, p, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||||
return err
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parse media type: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var file files.Node
|
||||||
|
switch mediatype {
|
||||||
|
case "multipart/form-data":
|
||||||
|
mpr := multipart.NewReader(barreader, p["boundary"])
|
||||||
|
|
||||||
|
file, err = files.NewFileFromPartReader(mpr, mediatype)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("call to NewFileFromPartReader failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "application/octet-stream":
|
||||||
|
file = files.NewReaderFile(barreader)
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("unknown content type: '%s'", mediatype)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTo is unhappy when things exist
|
||||||
|
if err := os.RemoveAll(outname); err != nil {
|
||||||
|
return xerrors.Errorf("removing dest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return files.WriteTo(file, outname)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *worker) push(typ string, sectorID uint64) error {
|
func (w *worker) push(typ string, sectorID uint64) error {
|
||||||
outname := filepath.Join(w.repo, typ, w.sb.SectorName(sectorID))
|
outname := filepath.Join(w.repo, typ, w.sb.SectorName(sectorID))
|
||||||
|
|
||||||
f, err := os.OpenFile(outname, os.O_RDONLY, 0644)
|
stat, err := os.Stat(outname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := files.NewSerialFile(outname, false, stat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
url := w.minerEndpoint + "/remote/" + typ + "/" + w.sb.SectorName(sectorID)
|
url := w.minerEndpoint + "/remote/" + typ + "/" + w.sb.SectorName(sectorID)
|
||||||
log.Infof("Push %s", url)
|
log.Infof("Push %s %s", typ, url)
|
||||||
|
|
||||||
fi, err := f.Stat()
|
sz, err := f.Size()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return xerrors.Errorf("getting size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bar := pb.New64(fi.Size())
|
bar := pb.New64(sz)
|
||||||
bar.ShowPercent = true
|
bar.ShowPercent = true
|
||||||
bar.ShowSpeed = true
|
bar.ShowSpeed = true
|
||||||
bar.Units = pb.U_BYTES
|
bar.Units = pb.U_BYTES
|
||||||
@ -177,11 +210,25 @@ func (w *worker) push(typ string, sectorID uint64) error {
|
|||||||
bar.Start()
|
bar.Start()
|
||||||
defer bar.Finish()
|
defer bar.Finish()
|
||||||
//todo set content size
|
//todo set content size
|
||||||
req, err := http.NewRequest("PUT", url, bar.NewProxyReader(f))
|
|
||||||
|
header := w.auth
|
||||||
|
|
||||||
|
var r io.Reader
|
||||||
|
r, file := f.(files.File)
|
||||||
|
if !file {
|
||||||
|
mfr := files.NewMultiFileReader(f.(files.Directory), true)
|
||||||
|
|
||||||
|
header.Set("Content-Type", "multipart/form-data; boundary="+mfr.Boundary())
|
||||||
|
r = mfr
|
||||||
|
} else {
|
||||||
|
header.Set("Content-Type", "application/octet-stream")
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("PUT", url, bar.NewProxyReader(r))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req.Header = w.auth
|
req.Header = header
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
resp, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -201,7 +248,10 @@ func (w *worker) fetchSector(sectorID uint64, typ sectorbuilder.WorkerTaskType)
|
|||||||
err = w.fetch("staged", sectorID)
|
err = w.fetch("staged", sectorID)
|
||||||
case sectorbuilder.WorkerCommit:
|
case sectorbuilder.WorkerCommit:
|
||||||
err = w.fetch("sealed", sectorID)
|
err = w.fetch("sealed", sectorID)
|
||||||
// todo: cache
|
if err != nil {
|
||||||
|
return xerrors.Errorf("fetch sealed: %w", err)
|
||||||
|
}
|
||||||
|
err = w.fetch("cache", sectorID)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("fetch failed: %w", err)
|
return xerrors.Errorf("fetch failed: %w", err)
|
||||||
@ -210,5 +260,5 @@ func (w *worker) fetchSector(sectorID uint64, typ sectorbuilder.WorkerTaskType)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func errRes(err error) sectorbuilder.SealRes {
|
func errRes(err error) sectorbuilder.SealRes {
|
||||||
return sectorbuilder.SealRes{Err: err.Error()}
|
return sectorbuilder.SealRes{Err: err.Error(), GoErr: err}
|
||||||
}
|
}
|
||||||
|
@ -44,23 +44,16 @@ func (sb *SectorBuilder) sectorCacheDir(sectorID uint64) (string, error) {
|
|||||||
return dir, err
|
return dir, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *SectorBuilder) OpenRemoteRead(typ string, sectorName string) (*os.File, error) {
|
func (sb *SectorBuilder) GetPath(typ string, sectorName string) (string, error) {
|
||||||
switch typ {
|
switch typ {
|
||||||
case "staged":
|
case "staged":
|
||||||
return os.OpenFile(filepath.Join(sb.stagedDir, sectorName), os.O_RDONLY, 0644)
|
return filepath.Join(sb.stagedDir, sectorName), nil
|
||||||
case "sealed":
|
case "sealed":
|
||||||
return os.OpenFile(filepath.Join(sb.sealedDir, sectorName), os.O_RDONLY, 0644)
|
return filepath.Join(sb.sealedDir, sectorName), nil
|
||||||
|
case "cache":
|
||||||
|
return filepath.Join(sb.cacheDir, sectorName), nil
|
||||||
default:
|
default:
|
||||||
return nil, xerrors.Errorf("unknown sector type for read: %s", typ)
|
return "", xerrors.Errorf("unknown sector type for write: %s", typ)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sb *SectorBuilder) OpenRemoteWrite(typ string, sectorName string) (*os.File, error) {
|
|
||||||
switch typ {
|
|
||||||
case "sealed":
|
|
||||||
return os.OpenFile(filepath.Join(sb.sealedDir, sectorName), os.O_WRONLY|os.O_CREATE, 0644)
|
|
||||||
default:
|
|
||||||
return nil, xerrors.Errorf("unknown sector type for write: %s", typ)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,6 +98,7 @@ func (rspco *JsonRSPCO) rspco() RawSealPreCommitOutput {
|
|||||||
|
|
||||||
type SealRes struct {
|
type SealRes struct {
|
||||||
Err string
|
Err string
|
||||||
|
GoErr error `json:"-"`
|
||||||
|
|
||||||
Proof []byte
|
Proof []byte
|
||||||
Rspco JsonRSPCO
|
Rspco JsonRSPCO
|
||||||
|
@ -3,17 +3,18 @@ package impl
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/address"
|
"github.com/filecoin-project/lotus/chain/address"
|
||||||
"github.com/filecoin-project/lotus/lib/sectorbuilder"
|
"github.com/filecoin-project/lotus/lib/sectorbuilder"
|
||||||
"github.com/filecoin-project/lotus/miner"
|
"github.com/filecoin-project/lotus/miner"
|
||||||
"github.com/filecoin-project/lotus/storage"
|
"github.com/filecoin-project/lotus/storage"
|
||||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
files "github.com/ipfs/go-ipfs-files"
|
||||||
|
"io"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StorageMinerAPI struct {
|
type StorageMinerAPI struct {
|
||||||
@ -48,22 +49,40 @@ func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
|
|||||||
func (sm *StorageMinerAPI) remoteGetSector(w http.ResponseWriter, r *http.Request) {
|
func (sm *StorageMinerAPI) remoteGetSector(w http.ResponseWriter, r *http.Request) {
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
|
|
||||||
fr, err := sm.SectorBuilder.OpenRemoteRead(vars["type"], vars["sname"])
|
path, err := sm.SectorBuilder.GetPath(vars["type"], vars["sname"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer fr.Close()
|
|
||||||
|
|
||||||
fi, err := fr.Stat()
|
stat, err := os.Stat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Length", fmt.Sprint(fi.Size()))
|
f, err := files.NewSerialFile(path, false, stat)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
w.WriteHeader(500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var rd io.Reader
|
||||||
|
rd, file := f.(files.File)
|
||||||
|
if !file {
|
||||||
|
mfr := files.NewMultiFileReader(f.(files.Directory), true)
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "multipart/form-data; boundary="+mfr.Boundary())
|
||||||
|
rd = mfr
|
||||||
|
} else {
|
||||||
|
w.Header().Set("Content-Type", "application/octet-stream")
|
||||||
|
}
|
||||||
|
|
||||||
w.WriteHeader(200)
|
w.WriteHeader(200)
|
||||||
if _, err := io.Copy(w, fr); err != nil {
|
if _, err := io.Copy(w, rd); err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -72,21 +91,58 @@ func (sm *StorageMinerAPI) remoteGetSector(w http.ResponseWriter, r *http.Reques
|
|||||||
func (sm *StorageMinerAPI) remotePutSector(w http.ResponseWriter, r *http.Request) {
|
func (sm *StorageMinerAPI) remotePutSector(w http.ResponseWriter, r *http.Request) {
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
|
|
||||||
fr, err := sm.SectorBuilder.OpenRemoteWrite(vars["type"], vars["sname"])
|
path, err := sm.SectorBuilder.GetPath(vars["type"], vars["sname"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer fr.Close()
|
|
||||||
|
|
||||||
w.WriteHeader(200)
|
var file files.Node
|
||||||
n, err := io.Copy(fr, r.Body)
|
|
||||||
|
mediatype, _, err := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Infof("received %s sector (%s): %d bytes", vars["type"], vars["sname"], n)
|
|
||||||
|
switch mediatype {
|
||||||
|
case "multipart/form-data":
|
||||||
|
mpr, err := r.MultipartReader()
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
w.WriteHeader(500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err = files.NewFileFromPartReader(mpr, mediatype)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
w.WriteHeader(500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
file = files.NewReaderFile(r.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTo is unhappy when things exist (also cleans up cache after Commit)
|
||||||
|
if err := os.RemoveAll(path); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
w.WriteHeader(500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := files.WriteTo(file, path); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
w.WriteHeader(500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(200)
|
||||||
|
|
||||||
|
log.Infof("received %s sector (%s): %d bytes", vars["type"], vars["sname"], r.ContentLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) WorkerStats(context.Context) (sectorbuilder.WorkerStats, error) {
|
func (sm *StorageMinerAPI) WorkerStats(context.Context) (sectorbuilder.WorkerStats, error) {
|
||||||
|
@ -2,4 +2,4 @@
|
|||||||
|
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
|
|
||||||
rm -rf ~/.lotus ~/.lotusstorage/ ~/.genesis-sectors
|
rm -rf ~/.lotus ~/.lotusstorage/ ~/.genesis-sectors ~/.lotusworker
|
||||||
|
Loading…
Reference in New Issue
Block a user