Merge pull request #134 from filecoin-project/feat/more-deals

More deals progress
This commit is contained in:
Łukasz Magiera 2019-08-13 19:32:58 +02:00 committed by GitHub
commit 3a7a21ee83
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 416 additions and 203 deletions

View File

@ -75,7 +75,7 @@ Pond is a graphical testbed for lotus. It can be used to spin up nodes, connect
them in a given topology, start them mining, and observe how they function over
time.
To try it out, run `make pond`, then run the `pond` binary that gets created.
To try it out, run `make pond`, then run `./pond run`.
Once it is running, visit localhost:2222 in your browser.
## Tracing

View File

@ -4,8 +4,8 @@ import (
"context"
"math"
"github.com/filecoin-project/go-lotus/api"
"github.com/filecoin-project/go-lotus/chain/address"
"github.com/filecoin-project/go-lotus/chain/wallet"
"github.com/filecoin-project/go-lotus/lib/sectorbuilder"
"github.com/filecoin-project/go-lotus/node/modules/dtypes"
@ -29,12 +29,14 @@ type MinerDeal struct {
Ref cid.Cid
SectorID uint64 // Set when State >= Staged
s inet.Stream
}
type Handler struct {
w *wallet.Wallet
sb *sectorbuilder.SectorBuilder
sb *sectorbuilder.SectorBuilder
full api.FullNode
// TODO: Use a custom protocol or graphsync in the future
// TODO: GC
@ -55,9 +57,10 @@ type dealUpdate struct {
newState DealState
id cid.Cid
err error
mut func(*MinerDeal)
}
func NewHandler(w *wallet.Wallet, ds dtypes.MetadataDS, sb *sectorbuilder.SectorBuilder, dag dtypes.StagingDAG) (*Handler, error) {
func NewHandler(ds dtypes.MetadataDS, sb *sectorbuilder.SectorBuilder, dag dtypes.StagingDAG, fullNode api.FullNode) (*Handler, error) {
addr, err := ds.Get(datastore.NewKey("miner-address"))
if err != nil {
return nil, err
@ -68,9 +71,9 @@ func NewHandler(w *wallet.Wallet, ds dtypes.MetadataDS, sb *sectorbuilder.Sector
}
return &Handler{
w: w,
sb: sb,
dag: dag,
full: fullNode,
conns: map[cid.Cid]inet.Stream{},
@ -136,6 +139,9 @@ func (h *Handler) onUpdated(ctx context.Context, update dealUpdate) {
var deal MinerDeal
err := h.deals.MutateMiner(update.id, func(d *MinerDeal) error {
d.State = update.newState
if update.mut != nil {
update.mut(d)
}
deal = *d
return nil
})
@ -150,7 +156,7 @@ func (h *Handler) onUpdated(ctx context.Context, update dealUpdate) {
case Staged:
h.handle(ctx, deal, h.staged, Sealing)
case Sealing:
log.Error("TODO")
h.handle(ctx, deal, h.sealing, Complete)
}
}

View File

@ -2,6 +2,9 @@ package deals
import (
"context"
"time"
"github.com/filecoin-project/go-lotus/lib/sectorbuilder"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipfs/go-merkledag"
@ -9,16 +12,17 @@ import (
"golang.org/x/xerrors"
)
type handlerFunc func(ctx context.Context, deal MinerDeal) error
type handlerFunc func(ctx context.Context, deal MinerDeal) (func(*MinerDeal), error)
func (h *Handler) handle(ctx context.Context, deal MinerDeal, cb handlerFunc, next DealState) {
go func() {
err := cb(ctx, deal)
mut, err := cb(ctx, deal)
select {
case h.updated <- dealUpdate{
newState: next,
id: deal.ProposalCid,
err: err,
mut: mut,
}:
case <-h.stop:
}
@ -27,14 +31,13 @@ func (h *Handler) handle(ctx context.Context, deal MinerDeal, cb handlerFunc, ne
// ACCEPTED
func (h *Handler) accept(ctx context.Context, deal MinerDeal) error {
log.Info("acc")
func (h *Handler) accept(ctx context.Context, deal MinerDeal) (func(*MinerDeal), error) {
switch deal.Proposal.SerializationMode {
//case SerializationRaw:
//case SerializationIPLD:
case SerializationUnixFs:
default:
return xerrors.Errorf("deal proposal with unsupported serialization: %s", deal.Proposal.SerializationMode)
return nil, xerrors.Errorf("deal proposal with unsupported serialization: %s", deal.Proposal.SerializationMode)
}
// TODO: check payment
@ -46,18 +49,17 @@ func (h *Handler) accept(ctx context.Context, deal MinerDeal) error {
Proposal: deal.ProposalCid,
})
if err != nil {
return err
return nil, err
}
return merkledag.FetchGraph(ctx, deal.Ref, h.dag)
return nil, merkledag.FetchGraph(ctx, deal.Ref, h.dag)
}
// STAGED
func (h *Handler) staged(ctx context.Context, deal MinerDeal) error {
func (h *Handler) staged(ctx context.Context, deal MinerDeal) (func(*MinerDeal), error) {
err := h.sendSignedResponse(StorageDealResponse{
State: Staged,
Message: "",
Proposal: deal.ProposalCid,
})
if err != nil {
@ -66,24 +68,24 @@ func (h *Handler) staged(ctx context.Context, deal MinerDeal) error {
root, err := h.dag.Get(ctx, deal.Ref)
if err != nil {
return xerrors.Errorf("failed to get file root for deal: %s", err)
return nil, xerrors.Errorf("failed to get file root for deal: %s", err)
}
// TODO: abstract this away into ReadSizeCloser + implement different modes
n, err := unixfile.NewUnixfsFile(ctx, h.dag, root)
if err != nil {
return xerrors.Errorf("cannot open unixfs file: %s", err)
return nil, xerrors.Errorf("cannot open unixfs file: %s", err)
}
uf, ok := n.(files.File)
if !ok {
// we probably got directory, unsupported for now
return xerrors.Errorf("unsupported unixfs type")
return nil, xerrors.Errorf("unsupported unixfs type")
}
size, err := uf.Size()
if err != nil {
return xerrors.Errorf("failed to get file size: %s", err)
return nil, xerrors.Errorf("failed to get file size: %s", err)
}
var sectorID uint64
@ -92,11 +94,77 @@ func (h *Handler) staged(ctx context.Context, deal MinerDeal) error {
return err
})
if err != nil {
return xerrors.Errorf("AddPiece failed: %s", err)
return nil, xerrors.Errorf("AddPiece failed: %s", err)
}
log.Warnf("New Sector: %d", sectorID)
return nil
return func(deal *MinerDeal) {
deal.SectorID = sectorID
}, nil
}
// SEALING
func getInclusionProof(ref string, status sectorbuilder.SectorSealingStatus) (PieceInclusionProof, error) {
for i, p := range status.Pieces {
if p.Key == ref {
return PieceInclusionProof{
Position: uint64(i),
ProofElements: p.InclusionProof,
}, nil
}
}
return PieceInclusionProof{}, xerrors.Errorf("pieceInclusionProof for %s in sector %d not found", ref, status.SectorID)
}
func (h *Handler) pollSectorSealed(deal MinerDeal) (status sectorbuilder.SectorSealingStatus, err error) {
loop:
for {
status, err = h.sb.SealStatus(deal.SectorID)
if err != nil {
return sectorbuilder.SectorSealingStatus{}, err
}
switch status.SealStatusCode {
case 0: // sealed
break loop
case 2: // failed
return sectorbuilder.SectorSealingStatus{}, xerrors.Errorf("sealing sector %d for deal %s (ref=%s) failed: %s", deal.SectorID, deal.ProposalCid, deal.Ref, status.SealErrorMsg)
case 1: // pending
if err := h.sb.SealAllStagedSectors(); err != nil {
return sectorbuilder.SectorSealingStatus{}, err
}
// start seal
fallthrough
case 3: // sealing
// wait
default:
return sectorbuilder.SectorSealingStatus{}, xerrors.Errorf("unknown SealStatusCode: %d", status.SectorID)
}
time.Sleep(3 * time.Second)
}
return status, nil
}
func (h *Handler) sealing(ctx context.Context, deal MinerDeal) (func(*MinerDeal), error) {
status, err := h.pollSectorSealed(deal)
if err != nil {
return nil, err
}
ip, err := getInclusionProof(deal.Ref.String(), status)
if err != nil {
return nil, err
}
err = h.sendSignedResponse(StorageDealResponse{
State: Sealing,
Proposal: deal.ProposalCid,
PieceInclusionProof: ip,
})
if err != nil {
log.Warnf("Sending deal response failed: %s", err)
}
return nil, nil
}

View File

@ -1,13 +1,17 @@
package deals
import (
"context"
"runtime"
"github.com/filecoin-project/go-lotus/chain/actors"
"github.com/filecoin-project/go-lotus/chain/address"
"github.com/filecoin-project/go-lotus/chain/types"
"github.com/filecoin-project/go-lotus/lib/cborrpc"
cbor "github.com/ipfs/go-ipld-cbor"
inet "github.com/libp2p/go-libp2p-core/network"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
inet "github.com/libp2p/go-libp2p-core/network"
"golang.org/x/xerrors"
)
@ -68,18 +72,21 @@ func (h *Handler) sendSignedResponse(resp StorageDealResponse) error {
return xerrors.Errorf("serializing response: %w", err)
}
def, err := h.w.ListAddrs()
if err != nil {
log.Error(err)
return xerrors.Errorf("listing wallet addresses: %w", err)
getworker := &types.Message{
To: h.actor,
From: h.actor,
Method: actors.MAMethods.GetWorkerAddr,
}
if len(def) != 1 {
// NOTE: If this ever happens for a good reason, implement this with GetWorker on the miner actor
// TODO: implement with GetWorker on the miner actor
return xerrors.Errorf("expected only 1 address in wallet, got %d", len(def))
r, err := h.full.ChainCall(context.TODO(), getworker, nil)
if err != nil {
return xerrors.Errorf("getting worker address: %w", err)
}
worker, err := address.NewFromBytes(r.Return)
if err != nil {
return err
}
sig, err := h.w.Sign(def[0], msg)
sig, err := h.full.WalletSign(context.TODO(), worker, msg)
if err != nil {
return xerrors.Errorf("failed to sign response message: %w", err)
}

View File

@ -65,7 +65,7 @@ type SignedStorageDealProposal struct {
type PieceInclusionProof struct {
Position uint64
ProofElements [32]byte
ProofElements []byte
}
type StorageDealResponse struct {

203
lotuspond/api.go Normal file
View File

@ -0,0 +1,203 @@
package main
import (
"fmt"
"github.com/filecoin-project/go-lotus/lib/jsonrpc"
"io"
"io/ioutil"
"os"
"os/exec"
"sync"
"sync/atomic"
"time"
"github.com/filecoin-project/go-lotus/node/repo"
"golang.org/x/xerrors"
)
type api struct {
cmds int32
running map[int32]runningNode
runningLk sync.Mutex
genesis string
}
type nodeInfo struct {
Repo string
ID int32
ApiPort int32
FullNode string // only for storage nodes
Storage bool
}
func (api *api) Spawn() (nodeInfo, error) {
dir, err := ioutil.TempDir(os.TempDir(), "lotus-")
if err != nil {
return nodeInfo{}, err
}
genParam := "--genesis=" + api.genesis
id := atomic.AddInt32(&api.cmds, 1)
if id == 1 {
// make genesis
genf, err := ioutil.TempFile(os.TempDir(), "lotus-genesis-")
if err != nil {
return nodeInfo{}, err
}
api.genesis = genf.Name()
genParam = "--lotus-make-random-genesis=" + api.genesis
if err := genf.Close(); err != nil {
return nodeInfo{}, err
}
}
errlogfile, err := os.OpenFile(dir+".err.log", os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nodeInfo{}, err
}
logfile, err := os.OpenFile(dir+".out.log", os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nodeInfo{}, err
}
cmd := exec.Command("./lotus", "daemon", genParam, "--api", fmt.Sprintf("%d", 2500+id))
cmd.Stderr = io.MultiWriter(os.Stderr, errlogfile)
cmd.Stdout = io.MultiWriter(os.Stdout, logfile)
cmd.Env = []string{"LOTUS_PATH=" + dir}
if err := cmd.Start(); err != nil {
return nodeInfo{}, err
}
info := nodeInfo{
Repo: dir,
ID: id,
ApiPort: 2500 + id,
}
api.runningLk.Lock()
api.running[id] = runningNode{
cmd: cmd,
meta: info,
stop: func() {
defer errlogfile.Close()
defer logfile.Close()
},
}
api.runningLk.Unlock()
time.Sleep(time.Millisecond * 750) // TODO: Something less terrible
return info, nil
}
func (api *api) Nodes() []nodeInfo {
api.runningLk.Lock()
out := make([]nodeInfo, 0, len(api.running))
for _, node := range api.running {
out = append(out, node.meta)
}
api.runningLk.Unlock()
return out
}
func (api *api) TokenFor(id int32) (string, error) {
api.runningLk.Lock()
defer api.runningLk.Unlock()
rnd, ok := api.running[id]
if !ok {
return "", xerrors.New("no running node with this ID")
}
r, err := repo.NewFS(rnd.meta.Repo)
if err != nil {
return "", err
}
t, err := r.APIToken()
if err != nil {
return "", err
}
return string(t), nil
}
func (api *api) SpawnStorage(fullNodeRepo string) (nodeInfo, error) {
dir, err := ioutil.TempDir(os.TempDir(), "lotus-storage-")
if err != nil {
return nodeInfo{}, err
}
errlogfile, err := os.OpenFile(dir+".err.log", os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nodeInfo{}, err
}
logfile, err := os.OpenFile(dir+".out.log", os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nodeInfo{}, err
}
id := atomic.AddInt32(&api.cmds, 1)
cmd := exec.Command("./lotus-storage-miner", "init")
cmd.Stderr = io.MultiWriter(os.Stderr, errlogfile)
cmd.Stdout = io.MultiWriter(os.Stdout, logfile)
cmd.Env = []string{"LOTUS_STORAGE_PATH=" + dir, "LOTUS_PATH=" + fullNodeRepo}
if err := cmd.Run(); err != nil {
return nodeInfo{}, err
}
time.Sleep(time.Millisecond * 300)
cmd = exec.Command("./lotus-storage-miner", "run", "--api", fmt.Sprintf("%d", 2500+id))
cmd.Stderr = io.MultiWriter(os.Stderr, errlogfile)
cmd.Stdout = io.MultiWriter(os.Stdout, logfile)
cmd.Env = []string{"LOTUS_STORAGE_PATH=" + dir, "LOTUS_PATH=" + fullNodeRepo}
if err := cmd.Start(); err != nil {
return nodeInfo{}, err
}
info := nodeInfo{
Repo: dir,
ID: id,
ApiPort: 2500 + id,
FullNode: fullNodeRepo,
Storage: true,
}
api.runningLk.Lock()
api.running[id] = runningNode{
cmd: cmd,
meta: info,
stop: func() {
defer errlogfile.Close()
defer logfile.Close()
},
}
api.runningLk.Unlock()
time.Sleep(time.Millisecond * 750) // TODO: Something less terrible
return info, nil
}
type client struct {
Nodes func() []nodeInfo
}
func apiClient() (*client, error) {
c := &client{}
if _, err := jsonrpc.NewClient("ws://"+listenAddr+"/rpc/v0", "Pond", c, nil); err != nil {
return nil, err
}
return c, nil
}

View File

@ -2,19 +2,14 @@ package main
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"strconv"
"github.com/filecoin-project/go-lotus/lib/jsonrpc"
"github.com/filecoin-project/go-lotus/node/repo"
"gopkg.in/urfave/cli.v2"
)
const listenAddr = "127.0.0.1:2222"
@ -26,185 +21,117 @@ type runningNode struct {
stop func()
}
type api struct {
cmds int32
running map[int32]runningNode
runningLk sync.Mutex
genesis string
}
type nodeInfo struct {
Repo string
ID int32
ApiPort int32
Storage bool
}
func (api *api) Spawn() (nodeInfo, error) {
dir, err := ioutil.TempDir(os.TempDir(), "lotus-")
if err != nil {
return nodeInfo{}, err
}
genParam := "--genesis=" + api.genesis
id := atomic.AddInt32(&api.cmds, 1)
if id == 1 {
// make genesis
genf, err := ioutil.TempFile(os.TempDir(), "lotus-genesis-")
var onCmd = &cli.Command{
Name: "on",
Usage: "run a command on a given node",
Action: func(cctx *cli.Context) error {
client, err := apiClient()
if err != nil {
return nodeInfo{}, err
return err
}
api.genesis = genf.Name()
genParam = "--lotus-make-random-genesis=" + api.genesis
if err := genf.Close(); err != nil {
return nodeInfo{}, err
nd, err := strconv.ParseInt(cctx.Args().Get(0), 10, 32)
if err != nil {
return err
}
}
node := nodeById(client.Nodes(), int(nd))
var cmd *exec.Cmd
if !node.Storage {
cmd = exec.Command("./lotus", cctx.Args().Slice()[1:]...)
cmd.Env = []string{
"LOTUS_PATH=" + node.Repo,
}
} else {
cmd = exec.Command("./lotus-storage-miner")
cmd.Env = []string{
"LOTUS_STORAGE_PATH=" + node.Repo,
"LOTUS_PATH=" + node.FullNode,
}
}
errlogfile, err := os.OpenFile(dir+".err.log", os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nodeInfo{}, err
}
logfile, err := os.OpenFile(dir+".out.log", os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nodeInfo{}, err
}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd := exec.Command("./lotus", "daemon", genParam, "--api", fmt.Sprintf("%d", 2500+id))
cmd.Stderr = io.MultiWriter(os.Stderr, errlogfile)
cmd.Stdout = io.MultiWriter(os.Stdout, logfile)
cmd.Env = []string{"LOTUS_PATH=" + dir}
if err := cmd.Start(); err != nil {
return nodeInfo{}, err
}
info := nodeInfo{
Repo: dir,
ID: id,
ApiPort: 2500 + id,
}
api.runningLk.Lock()
api.running[id] = runningNode{
cmd: cmd,
meta: info,
stop: func() {
defer errlogfile.Close()
defer logfile.Close()
},
}
api.runningLk.Unlock()
time.Sleep(time.Millisecond * 750) // TODO: Something less terrible
return info, nil
err = cmd.Run()
return err
},
}
func (api *api) Nodes() []nodeInfo {
api.runningLk.Lock()
out := make([]nodeInfo, 0, len(api.running))
for _, node := range api.running {
out = append(out, node.meta)
}
var shCmd = &cli.Command{
Name: "sh",
Usage: "spawn shell with node shell variables set",
Action: func(cctx *cli.Context) error {
client, err := apiClient()
if err != nil {
return err
}
api.runningLk.Unlock()
nd, err := strconv.ParseInt(cctx.Args().Get(0), 10, 32)
if err != nil {
return err
}
return out
node := nodeById(client.Nodes(), int(nd))
shcmd := exec.Command("/bin/bash")
if !node.Storage {
shcmd.Env = []string{
"LOTUS_PATH=" + node.Repo,
}
} else {
shcmd.Env = []string{
"LOTUS_STORAGE_PATH=" + node.Repo,
"LOTUS_PATH=" + node.FullNode,
}
}
shcmd.Stdin = os.Stdin
shcmd.Stdout = os.Stdout
shcmd.Stderr = os.Stderr
fmt.Printf("Entering shell for Node %d\n", nd)
err = shcmd.Run()
fmt.Printf("Closed pond shell\n")
return err
},
}
func (api *api) TokenFor(id int32) (string, error) {
api.runningLk.Lock()
defer api.runningLk.Unlock()
rnd, ok := api.running[id]
if !ok {
return "", errors.New("no running node with this ID")
func nodeById(nodes []nodeInfo, i int) nodeInfo {
for _, n := range nodes {
if n.ID == int32(i) {
return n
}
}
r, err := repo.NewFS(rnd.meta.Repo)
if err != nil {
return "", err
}
t, err := r.APIToken()
if err != nil {
return "", err
}
return string(t), nil
panic("no node with this id")
}
func (api *api) SpawnStorage(fullNodeRepo string) (nodeInfo, error) {
dir, err := ioutil.TempDir(os.TempDir(), "lotus-storage-")
if err != nil {
return nodeInfo{}, err
}
var runCmd = &cli.Command{
Name: "run",
Usage: "run lotuspond daemon",
Action: func(cctx *cli.Context) error {
rpcServer := jsonrpc.NewServer()
rpcServer.Register("Pond", &api{running: map[int32]runningNode{}})
errlogfile, err := os.OpenFile(dir+".err.log", os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nodeInfo{}, err
}
logfile, err := os.OpenFile(dir+".out.log", os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nodeInfo{}, err
}
http.Handle("/", http.FileServer(http.Dir("lotuspond/front/build")))
http.Handle("/rpc/v0", rpcServer)
id := atomic.AddInt32(&api.cmds, 1)
cmd := exec.Command("./lotus-storage-miner", "init")
cmd.Stderr = io.MultiWriter(os.Stderr, errlogfile)
cmd.Stdout = io.MultiWriter(os.Stdout, logfile)
cmd.Env = []string{"LOTUS_STORAGE_PATH=" + dir, "LOTUS_PATH=" + fullNodeRepo}
if err := cmd.Run(); err != nil {
return nodeInfo{}, err
}
time.Sleep(time.Millisecond * 300)
cmd = exec.Command("./lotus-storage-miner", "run", "--api", fmt.Sprintf("%d", 2500+id))
cmd.Stderr = io.MultiWriter(os.Stderr, errlogfile)
cmd.Stdout = io.MultiWriter(os.Stdout, logfile)
cmd.Env = []string{"LOTUS_STORAGE_PATH=" + dir, "LOTUS_PATH=" + fullNodeRepo}
if err := cmd.Start(); err != nil {
return nodeInfo{}, err
}
info := nodeInfo{
Repo: dir,
ID: id,
ApiPort: 2500 + id,
Storage: true,
}
api.runningLk.Lock()
api.running[id] = runningNode{
cmd: cmd,
meta: info,
stop: func() {
defer errlogfile.Close()
defer logfile.Close()
},
}
api.runningLk.Unlock()
time.Sleep(time.Millisecond * 750) // TODO: Something less terrible
return info, nil
fmt.Printf("Listening on http://%s\n", listenAddr)
return http.ListenAndServe(listenAddr, nil)
},
}
func main() {
rpcServer := jsonrpc.NewServer()
rpcServer.Register("Pond", &api{running: map[int32]runningNode{}})
http.Handle("/", http.FileServer(http.Dir("lotuspond/front/build")))
http.Handle("/rpc/v0", rpcServer)
fmt.Printf("Listening on http://%s\n", listenAddr)
http.ListenAndServe(listenAddr, nil)
app := &cli.App{
Name: "pond",
Commands: []*cli.Command{
runCmd,
shCmd,
onCmd,
},
}
if err := app.Run(os.Args); err != nil {
panic(err)
}
}

View File

@ -181,7 +181,6 @@ func Online() Option {
libp2p(),
// common
Override(new(*wallet.Wallet), wallet.NewWallet),
// Full node
@ -191,6 +190,7 @@ func Online() Option {
Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages),
Override(new(*store.ChainStore), modules.ChainStore),
Override(new(*wallet.Wallet), wallet.NewWallet),
Override(new(dtypes.ChainGCLocker), blockstore.NewGCLocker),
Override(new(dtypes.ChainGCBlockstore), modules.ChainGCBlockstore),

View File

@ -96,6 +96,8 @@ func (m *Miner) handlePostingSealedSectors(ctx context.Context) {
}
func (m *Miner) commitSector(ctx context.Context, sinfo sectorbuilder.SectorSealingStatus) error {
log.Info("committing sector")
ok, err := sectorbuilder.VerifySeal(1024, sinfo.CommR[:], sinfo.CommD[:], sinfo.CommRStar[:], m.maddr, sinfo.SectorID, sinfo.Proof)
if err != nil {
log.Error("failed to verify seal we just created: ", err)