Merge commit 'origin/release/v1.15.0~2' into feat/paych-avail-reuse

This commit is contained in:
Łukasz Magiera 2022-02-14 19:27:12 +01:00
commit 10af768c60
130 changed files with 3437 additions and 595 deletions

View File

@ -44,13 +44,13 @@ commands:
- restore_cache: - restore_cache:
name: Restore parameters cache name: Restore parameters cache
keys: keys:
- 'v25-2k-lotus-params' - 'v26-2k-lotus-params'
paths: paths:
- /var/tmp/filecoin-proof-parameters/ - /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048 - run: ./lotus fetch-params 2048
- save_cache: - save_cache:
name: Save parameters cache name: Save parameters cache
key: 'v25-2k-lotus-params' key: 'v26-2k-lotus-params'
paths: paths:
- /var/tmp/filecoin-proof-parameters/ - /var/tmp/filecoin-proof-parameters/
install_ipfs: install_ipfs:

View File

@ -44,13 +44,13 @@ commands:
- restore_cache: - restore_cache:
name: Restore parameters cache name: Restore parameters cache
keys: keys:
- 'v25-2k-lotus-params' - 'v26-2k-lotus-params'
paths: paths:
- /var/tmp/filecoin-proof-parameters/ - /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048 - run: ./lotus fetch-params 2048
- save_cache: - save_cache:
name: Save parameters cache name: Save parameters cache
key: 'v25-2k-lotus-params' key: 'v26-2k-lotus-params'
paths: paths:
- /var/tmp/filecoin-proof-parameters/ - /var/tmp/filecoin-proof-parameters/
install_ipfs: install_ipfs:

View File

@ -113,6 +113,8 @@ type StorageMiner interface {
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message // SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
// SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin
// WorkerConnect tells the node to connect to workers RPC // WorkerConnect tells the node to connect to workers RPC
WorkerConnect(context.Context, string) error //perm:admin retry:true WorkerConnect(context.Context, string) error //perm:admin retry:true
@ -130,6 +132,7 @@ type StorageMiner interface {
ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true
ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true
ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true

View File

@ -39,6 +39,7 @@ type Worker interface {
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin
ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin

View File

@ -723,6 +723,8 @@ type StorageMinerStruct struct {
ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
ReturnFinalizeReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
@ -755,6 +757,8 @@ type StorageMinerStruct struct {
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"` SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"`
SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"`
@ -872,6 +876,8 @@ type WorkerStruct struct {
Fetch func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"` Fetch func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
FinalizeReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"` GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"`
@ -4280,6 +4286,17 @@ func (s *StorageMinerStub) ReturnFetch(p0 context.Context, p1 storiface.CallID,
return ErrNotSupported return ErrNotSupported
} }
func (s *StorageMinerStruct) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
if s.Internal.ReturnFinalizeReplicaUpdate == nil {
return ErrNotSupported
}
return s.Internal.ReturnFinalizeReplicaUpdate(p0, p1, p2)
}
func (s *StorageMinerStub) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { func (s *StorageMinerStruct) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
if s.Internal.ReturnFinalizeSector == nil { if s.Internal.ReturnFinalizeSector == nil {
return ErrNotSupported return ErrNotSupported
@ -4456,6 +4473,17 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *StorageMinerStruct) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
if s.Internal.SectorAbortUpgrade == nil {
return ErrNotSupported
}
return s.Internal.SectorAbortUpgrade(p0, p1)
}
func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) { func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
if s.Internal.SectorAddPieceToAny == nil { if s.Internal.SectorAddPieceToAny == nil {
return *new(SectorOffset), ErrNotSupported return *new(SectorOffset), ErrNotSupported
@ -5006,6 +5034,17 @@ func (s *WorkerStub) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storifac
return *new(storiface.CallID), ErrNotSupported return *new(storiface.CallID), ErrNotSupported
} }
func (s *WorkerStruct) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
if s.Internal.FinalizeReplicaUpdate == nil {
return *new(storiface.CallID), ErrNotSupported
}
return s.Internal.FinalizeReplicaUpdate(p0, p1, p2)
}
func (s *WorkerStub) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
return *new(storiface.CallID), ErrNotSupported
}
func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
if s.Internal.FinalizeSector == nil { if s.Internal.FinalizeSector == nil {
return *new(storiface.CallID), ErrNotSupported return *new(storiface.CallID), ErrNotSupported

View File

@ -49,10 +49,11 @@ These are options in the `[Chainstore.Splitstore]` section of the configuration:
blockstore and discards writes; this is necessary to support syncing from a snapshot. blockstore and discards writes; this is necessary to support syncing from a snapshot.
- `MarkSetType` -- specifies the type of markset to use during compaction. - `MarkSetType` -- specifies the type of markset to use during compaction.
The markset is the data structure used by compaction/gc to track live objects. The markset is the data structure used by compaction/gc to track live objects.
The default value is `"map"`, which will use an in-memory map; if you are limited The default value is "badger", which will use a disk backed markset using badger.
in memory (or indeed see compaction run out of memory), you can also specify If you have a lot of memory (48G or more) you can also use "map", which will use
`"badger"` which will use an disk backed markset, using badger. This will use an in memory markset, speeding up compaction at the cost of higher memory usage.
much less memory, but will also make compaction slower. Note: If you are using a VPS with a network volume, you need to provision at least
3000 IOPs with the badger markset.
- `HotStoreMessageRetention` -- specifies how many finalities, beyond the 4 - `HotStoreMessageRetention` -- specifies how many finalities, beyond the 4
finalities maintained by default, to maintain messages and message receipts in the finalities maintained by default, to maintain messages and message receipts in the
hotstore. This is useful for assistive nodes that want to support syncing for other hotstore. This is useful for assistive nodes that want to support syncing for other
@ -105,6 +106,12 @@ Compaction works transactionally with the following algorithm:
- We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
- We then end the transaction and compact/gc the hotstore. - We then end the transaction and compact/gc the hotstore.
As of [#8008](https://github.com/filecoin-project/lotus/pull/8008) the compaction algorithm has been
modified to eliminate sorting and maintain the cold object set on disk. This drastically reduces
memory usage; in fact, when using badger as the markset compaction uses very little memory, and
it should be now possible to run splitstore with 32GB of RAM or less without danger of running out of
memory during compaction.
## Garbage Collection ## Garbage Collection
TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577) TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577)

View File

@ -0,0 +1,118 @@
package splitstore
import (
"bufio"
"io"
"os"
"golang.org/x/xerrors"
cid "github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
)
type Checkpoint struct {
file *os.File
buf *bufio.Writer
}
func NewCheckpoint(path string) (*Checkpoint, error) {
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_SYNC, 0644)
if err != nil {
return nil, xerrors.Errorf("error creating checkpoint: %w", err)
}
buf := bufio.NewWriter(file)
return &Checkpoint{
file: file,
buf: buf,
}, nil
}
func OpenCheckpoint(path string) (*Checkpoint, cid.Cid, error) {
filein, err := os.Open(path)
if err != nil {
return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for reading: %w", err)
}
defer filein.Close() //nolint:errcheck
bufin := bufio.NewReader(filein)
start, err := readRawCid(bufin, nil)
if err != nil && err != io.EOF {
return nil, cid.Undef, xerrors.Errorf("error reading cid from checkpoint: %w", err)
}
fileout, err := os.OpenFile(path, os.O_WRONLY|os.O_SYNC, 0644)
if err != nil {
return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for writing: %w", err)
}
bufout := bufio.NewWriter(fileout)
return &Checkpoint{
file: fileout,
buf: bufout,
}, start, nil
}
func (cp *Checkpoint) Set(c cid.Cid) error {
if _, err := cp.file.Seek(0, io.SeekStart); err != nil {
return xerrors.Errorf("error seeking beginning of checkpoint: %w", err)
}
if err := writeRawCid(cp.buf, c, true); err != nil {
return xerrors.Errorf("error writing cid to checkpoint: %w", err)
}
return nil
}
func (cp *Checkpoint) Close() error {
if cp.file == nil {
return nil
}
err := cp.file.Close()
cp.file = nil
cp.buf = nil
return err
}
func readRawCid(buf *bufio.Reader, hbuf []byte) (cid.Cid, error) {
sz, err := buf.ReadByte()
if err != nil {
return cid.Undef, err // don't wrap EOF as it is not an error here
}
if hbuf == nil {
hbuf = make([]byte, int(sz))
} else {
hbuf = hbuf[:int(sz)]
}
if _, err := io.ReadFull(buf, hbuf); err != nil {
return cid.Undef, xerrors.Errorf("error reading hash: %w", err) // wrap EOF, it's corrupt
}
hash, err := mh.Cast(hbuf)
if err != nil {
return cid.Undef, xerrors.Errorf("error casting multihash: %w", err)
}
return cid.NewCidV1(cid.Raw, hash), nil
}
func writeRawCid(buf *bufio.Writer, c cid.Cid, flush bool) error {
hash := c.Hash()
if err := buf.WriteByte(byte(len(hash))); err != nil {
return err
}
if _, err := buf.Write(hash); err != nil {
return err
}
if flush {
return buf.Flush()
}
return nil
}

View File

@ -0,0 +1,147 @@
package splitstore
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
)
func TestCheckpoint(t *testing.T) {
dir, err := ioutil.TempDir("", "checkpoint.*")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = os.RemoveAll(dir)
})
path := filepath.Join(dir, "checkpoint")
makeCid := func(key string) cid.Cid {
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
if err != nil {
t.Fatal(err)
}
return cid.NewCidV1(cid.Raw, h)
}
k1 := makeCid("a")
k2 := makeCid("b")
k3 := makeCid("c")
k4 := makeCid("d")
cp, err := NewCheckpoint(path)
if err != nil {
t.Fatal(err)
}
if err := cp.Set(k1); err != nil {
t.Fatal(err)
}
if err := cp.Set(k2); err != nil {
t.Fatal(err)
}
if err := cp.Close(); err != nil {
t.Fatal(err)
}
cp, start, err := OpenCheckpoint(path)
if err != nil {
t.Fatal(err)
}
if !start.Equals(k2) {
t.Fatalf("expected start to be %s; got %s", k2, start)
}
if err := cp.Set(k3); err != nil {
t.Fatal(err)
}
if err := cp.Set(k4); err != nil {
t.Fatal(err)
}
if err := cp.Close(); err != nil {
t.Fatal(err)
}
cp, start, err = OpenCheckpoint(path)
if err != nil {
t.Fatal(err)
}
if !start.Equals(k4) {
t.Fatalf("expected start to be %s; got %s", k4, start)
}
if err := cp.Close(); err != nil {
t.Fatal(err)
}
// also test correct operation with an empty checkpoint
cp, err = NewCheckpoint(path)
if err != nil {
t.Fatal(err)
}
if err := cp.Close(); err != nil {
t.Fatal(err)
}
cp, start, err = OpenCheckpoint(path)
if err != nil {
t.Fatal(err)
}
if start.Defined() {
t.Fatal("expected start to be undefined")
}
if err := cp.Set(k1); err != nil {
t.Fatal(err)
}
if err := cp.Set(k2); err != nil {
t.Fatal(err)
}
if err := cp.Close(); err != nil {
t.Fatal(err)
}
cp, start, err = OpenCheckpoint(path)
if err != nil {
t.Fatal(err)
}
if !start.Equals(k2) {
t.Fatalf("expected start to be %s; got %s", k2, start)
}
if err := cp.Set(k3); err != nil {
t.Fatal(err)
}
if err := cp.Set(k4); err != nil {
t.Fatal(err)
}
if err := cp.Close(); err != nil {
t.Fatal(err)
}
cp, start, err = OpenCheckpoint(path)
if err != nil {
t.Fatal(err)
}
if !start.Equals(k4) {
t.Fatalf("expected start to be %s; got %s", k4, start)
}
if err := cp.Close(); err != nil {
t.Fatal(err)
}
}

View File

@ -0,0 +1,102 @@
package splitstore
import (
"bufio"
"io"
"os"
"golang.org/x/xerrors"
cid "github.com/ipfs/go-cid"
)
type ColdSetWriter struct {
file *os.File
buf *bufio.Writer
}
type ColdSetReader struct {
file *os.File
buf *bufio.Reader
}
func NewColdSetWriter(path string) (*ColdSetWriter, error) {
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return nil, xerrors.Errorf("error creating coldset: %w", err)
}
buf := bufio.NewWriter(file)
return &ColdSetWriter{
file: file,
buf: buf,
}, nil
}
func NewColdSetReader(path string) (*ColdSetReader, error) {
file, err := os.Open(path)
if err != nil {
return nil, xerrors.Errorf("error opening coldset: %w", err)
}
buf := bufio.NewReader(file)
return &ColdSetReader{
file: file,
buf: buf,
}, nil
}
func (s *ColdSetWriter) Write(c cid.Cid) error {
return writeRawCid(s.buf, c, false)
}
func (s *ColdSetWriter) Close() error {
if s.file == nil {
return nil
}
err1 := s.buf.Flush()
err2 := s.file.Close()
s.buf = nil
s.file = nil
if err1 != nil {
return err1
}
return err2
}
func (s *ColdSetReader) ForEach(f func(cid.Cid) error) error {
hbuf := make([]byte, 256)
for {
next, err := readRawCid(s.buf, hbuf)
if err != nil {
if err == io.EOF {
return nil
}
return xerrors.Errorf("error reading coldset: %w", err)
}
if err := f(next); err != nil {
return err
}
}
}
func (s *ColdSetReader) Reset() error {
_, err := s.file.Seek(0, io.SeekStart)
return err
}
func (s *ColdSetReader) Close() error {
if s.file == nil {
return nil
}
err := s.file.Close()
s.file = nil
s.buf = nil
return err
}

View File

@ -0,0 +1,99 @@
package splitstore
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
)
func TestColdSet(t *testing.T) {
dir, err := ioutil.TempDir("", "coldset.*")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = os.RemoveAll(dir)
})
path := filepath.Join(dir, "coldset")
makeCid := func(i int) cid.Cid {
h, err := multihash.Sum([]byte(fmt.Sprintf("cid.%d", i)), multihash.SHA2_256, -1)
if err != nil {
t.Fatal(err)
}
return cid.NewCidV1(cid.Raw, h)
}
const count = 1000
cids := make([]cid.Cid, 0, count)
for i := 0; i < count; i++ {
cids = append(cids, makeCid(i))
}
cw, err := NewColdSetWriter(path)
if err != nil {
t.Fatal(err)
}
for _, c := range cids {
if err := cw.Write(c); err != nil {
t.Fatal(err)
}
}
if err := cw.Close(); err != nil {
t.Fatal(err)
}
cr, err := NewColdSetReader(path)
if err != nil {
t.Fatal(err)
}
index := 0
err = cr.ForEach(func(c cid.Cid) error {
if index >= count {
t.Fatal("too many cids")
}
if !c.Equals(cids[index]) {
t.Fatalf("wrong cid %d; expected %s but got %s", index, cids[index], c)
}
index++
return nil
})
if err != nil {
t.Fatal(err)
}
if err := cr.Reset(); err != nil {
t.Fatal(err)
}
index = 0
err = cr.ForEach(func(c cid.Cid) error {
if index >= count {
t.Fatal("too many cids")
}
if !c.Equals(cids[index]) {
t.Fatalf("wrong cid; expected %s but got %s", cids[index], c)
}
index++
return nil
})
if err != nil {
t.Fatal(err)
}
}

View File

@ -10,39 +10,36 @@ import (
var errMarkSetClosed = errors.New("markset closed") var errMarkSetClosed = errors.New("markset closed")
// MarkSet is a utility to keep track of seen CID, and later query for them. // MarkSet is an interface for tracking CIDs during chain and object walks
//
// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt).
// * If a probabilistic result is acceptable, it can be backed by a bloom filter
type MarkSet interface { type MarkSet interface {
ObjectVisitor
Mark(cid.Cid) error Mark(cid.Cid) error
MarkMany([]cid.Cid) error
Has(cid.Cid) (bool, error) Has(cid.Cid) (bool, error)
Close() error Close() error
SetConcurrent()
}
type MarkSetVisitor interface { // BeginCriticalSection ensures that the markset is persisted to disk for recovery in case
MarkSet // of abnormal termination during the critical section span.
ObjectVisitor BeginCriticalSection() error
// EndCriticalSection ends the critical section span.
EndCriticalSection()
} }
type MarkSetEnv interface { type MarkSetEnv interface {
// Create creates a new markset within the environment. // New creates a new markset within the environment.
// name is a unique name for this markset, mapped to the filesystem in disk-backed environments // name is a unique name for this markset, mapped to the filesystem for on-disk persistence.
// sizeHint is a hint about the expected size of the markset // sizeHint is a hint about the expected size of the markset
Create(name string, sizeHint int64) (MarkSet, error) New(name string, sizeHint int64) (MarkSet, error)
// CreateVisitor is like Create, but returns a wider interface that supports atomic visits. // Recover recovers an existing markset persisted on-disk.
// It may not be supported by some markset types (e.g. bloom). Recover(name string) (MarkSet, error)
CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) // Close closes the markset
// SupportsVisitor returns true if the marksets created by this environment support the visitor interface.
SupportsVisitor() bool
Close() error Close() error
} }
func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) { func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) {
switch mtype { switch mtype {
case "map": case "map":
return NewMapMarkSetEnv() return NewMapMarkSetEnv(path)
case "badger": case "badger":
return NewBadgerMarkSetEnv(path) return NewBadgerMarkSetEnv(path)
default: default:

View File

@ -3,6 +3,7 @@ package splitstore
import ( import (
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"sync" "sync"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -28,13 +29,13 @@ type BadgerMarkSet struct {
writers int writers int
seqno int seqno int
version int version int
persist bool
db *badger.DB db *badger.DB
path string path string
} }
var _ MarkSet = (*BadgerMarkSet)(nil) var _ MarkSet = (*BadgerMarkSet)(nil)
var _ MarkSetVisitor = (*BadgerMarkSet)(nil)
var badgerMarkSetBatchSize = 16384 var badgerMarkSetBatchSize = 16384
@ -48,11 +49,10 @@ func NewBadgerMarkSetEnv(path string) (MarkSetEnv, error) {
return &BadgerMarkSetEnv{path: msPath}, nil return &BadgerMarkSetEnv{path: msPath}, nil
} }
func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet, error) { func (e *BadgerMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) {
name += ".tmp"
path := filepath.Join(e.path, name) path := filepath.Join(e.path, name)
db, err := openTransientBadgerDB(path) db, err := openBadgerDB(path, false)
if err != nil { if err != nil {
return nil, xerrors.Errorf("error creating badger db: %w", err) return nil, xerrors.Errorf("error creating badger db: %w", err)
} }
@ -68,18 +68,72 @@ func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet,
return ms, nil return ms, nil
} }
func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { func (e *BadgerMarkSetEnv) Recover(name string) (MarkSet, error) {
return e.create(name, sizeHint) path := filepath.Join(e.path, name)
}
func (e *BadgerMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) { if _, err := os.Stat(path); err != nil {
return e.create(name, sizeHint) return nil, xerrors.Errorf("error stating badger db path: %w", err)
} }
func (e *BadgerMarkSetEnv) SupportsVisitor() bool { return true } db, err := openBadgerDB(path, true)
if err != nil {
return nil, xerrors.Errorf("error creating badger db: %w", err)
}
ms := &BadgerMarkSet{
pend: make(map[string]struct{}),
writing: make(map[int]map[string]struct{}),
db: db,
path: path,
persist: true,
}
ms.cond.L = &ms.mx
return ms, nil
}
func (e *BadgerMarkSetEnv) Close() error { func (e *BadgerMarkSetEnv) Close() error {
return os.RemoveAll(e.path) return nil
}
func (s *BadgerMarkSet) BeginCriticalSection() error {
s.mx.Lock()
if s.persist {
s.mx.Unlock()
return nil
}
var write bool
var seqno int
if len(s.pend) > 0 {
write = true
seqno = s.nextBatch()
}
s.persist = true
s.mx.Unlock()
if write {
// all writes sync once perist is true
return s.write(seqno)
}
// wait for any pending writes and sync
s.mx.Lock()
for s.writers > 0 {
s.cond.Wait()
}
s.mx.Unlock()
return s.db.Sync()
}
func (s *BadgerMarkSet) EndCriticalSection() {
s.mx.Lock()
defer s.mx.Unlock()
s.persist = false
} }
func (s *BadgerMarkSet) Mark(c cid.Cid) error { func (s *BadgerMarkSet) Mark(c cid.Cid) error {
@ -99,6 +153,23 @@ func (s *BadgerMarkSet) Mark(c cid.Cid) error {
return nil return nil
} }
func (s *BadgerMarkSet) MarkMany(batch []cid.Cid) error {
s.mx.Lock()
if s.pend == nil {
s.mx.Unlock()
return errMarkSetClosed
}
write, seqno := s.putMany(batch)
s.mx.Unlock()
if write {
return s.write(seqno)
}
return nil
}
func (s *BadgerMarkSet) Has(c cid.Cid) (bool, error) { func (s *BadgerMarkSet) Has(c cid.Cid) (bool, error) {
s.mx.RLock() s.mx.RLock()
defer s.mx.RUnlock() defer s.mx.RUnlock()
@ -204,16 +275,34 @@ func (s *BadgerMarkSet) tryDB(key []byte) (has bool, err error) {
// writer holds the exclusive lock // writer holds the exclusive lock
func (s *BadgerMarkSet) put(key string) (write bool, seqno int) { func (s *BadgerMarkSet) put(key string) (write bool, seqno int) {
s.pend[key] = struct{}{} s.pend[key] = struct{}{}
if len(s.pend) < badgerMarkSetBatchSize { if !s.persist && len(s.pend) < badgerMarkSetBatchSize {
return false, 0 return false, 0
} }
seqno = s.seqno seqno = s.nextBatch()
return true, seqno
}
func (s *BadgerMarkSet) putMany(batch []cid.Cid) (write bool, seqno int) {
for _, c := range batch {
key := string(c.Hash())
s.pend[key] = struct{}{}
}
if !s.persist && len(s.pend) < badgerMarkSetBatchSize {
return false, 0
}
seqno = s.nextBatch()
return true, seqno
}
func (s *BadgerMarkSet) nextBatch() int {
seqno := s.seqno
s.seqno++ s.seqno++
s.writing[seqno] = s.pend s.writing[seqno] = s.pend
s.pend = make(map[string]struct{}) s.pend = make(map[string]struct{})
return seqno
return true, seqno
} }
func (s *BadgerMarkSet) write(seqno int) (err error) { func (s *BadgerMarkSet) write(seqno int) (err error) {
@ -258,6 +347,14 @@ func (s *BadgerMarkSet) write(seqno int) (err error) {
return xerrors.Errorf("error flushing batch to badger markset: %w", err) return xerrors.Errorf("error flushing batch to badger markset: %w", err)
} }
s.mx.RLock()
persist := s.persist
s.mx.RUnlock()
if persist {
return s.db.Sync()
}
return nil return nil
} }
@ -277,13 +374,12 @@ func (s *BadgerMarkSet) Close() error {
db := s.db db := s.db
s.db = nil s.db = nil
return closeTransientBadgerDB(db, s.path) return closeBadgerDB(db, s.path, s.persist)
} }
func (s *BadgerMarkSet) SetConcurrent() {} func openBadgerDB(path string, recover bool) (*badger.DB, error) {
// if it is not a recovery, clean up first
func openTransientBadgerDB(path string) (*badger.DB, error) { if !recover {
// clean up first
err := os.RemoveAll(path) err := os.RemoveAll(path)
if err != nil { if err != nil {
return nil, xerrors.Errorf("error clearing markset directory: %w", err) return nil, xerrors.Errorf("error clearing markset directory: %w", err)
@ -293,10 +389,14 @@ func openTransientBadgerDB(path string) (*badger.DB, error) {
if err != nil { if err != nil {
return nil, xerrors.Errorf("error creating markset directory: %w", err) return nil, xerrors.Errorf("error creating markset directory: %w", err)
} }
}
opts := badger.DefaultOptions(path) opts := badger.DefaultOptions(path)
// we manually sync when we are in critical section
opts.SyncWrites = false opts.SyncWrites = false
// no need to do that
opts.CompactL0OnClose = false opts.CompactL0OnClose = false
// we store hashes, not much to gain by compression
opts.Compression = options.None opts.Compression = options.None
// Note: We use FileIO for loading modes to avoid memory thrashing and interference // Note: We use FileIO for loading modes to avoid memory thrashing and interference
// between the system blockstore and the markset. // between the system blockstore and the markset.
@ -305,6 +405,15 @@ func openTransientBadgerDB(path string) (*badger.DB, error) {
// exceeded 1GB in size. // exceeded 1GB in size.
opts.TableLoadingMode = options.FileIO opts.TableLoadingMode = options.FileIO
opts.ValueLogLoadingMode = options.FileIO opts.ValueLogLoadingMode = options.FileIO
// We increase the number of L0 tables before compaction to make it unlikely to
// be necessary.
opts.NumLevelZeroTables = 20 // default is 5
opts.NumLevelZeroTablesStall = 30 // default is 10
// increase the number of compactors from default 2 so that if we ever have to
// compact, it is fast
if runtime.NumCPU()/2 > opts.NumCompactors {
opts.NumCompactors = runtime.NumCPU() / 2
}
opts.Logger = &badgerLogger{ opts.Logger = &badgerLogger{
SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(),
skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(),
@ -313,12 +422,16 @@ func openTransientBadgerDB(path string) (*badger.DB, error) {
return badger.Open(opts) return badger.Open(opts)
} }
func closeTransientBadgerDB(db *badger.DB, path string) error { func closeBadgerDB(db *badger.DB, path string, persist bool) error {
err := db.Close() err := db.Close()
if err != nil { if err != nil {
return xerrors.Errorf("error closing badger markset: %w", err) return xerrors.Errorf("error closing badger markset: %w", err)
} }
if persist {
return nil
}
err = os.RemoveAll(path) err = os.RemoveAll(path)
if err != nil { if err != nil {
return xerrors.Errorf("error deleting badger markset: %w", err) return xerrors.Errorf("error deleting badger markset: %w", err)

View File

@ -1,12 +1,20 @@
package splitstore package splitstore
import ( import (
"bufio"
"io"
"os"
"path/filepath"
"sync" "sync"
"golang.org/x/xerrors"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
) )
type MapMarkSetEnv struct{} type MapMarkSetEnv struct {
path string
}
var _ MarkSetEnv = (*MapMarkSetEnv)(nil) var _ MarkSetEnv = (*MapMarkSetEnv)(nil)
@ -14,55 +22,194 @@ type MapMarkSet struct {
mx sync.RWMutex mx sync.RWMutex
set map[string]struct{} set map[string]struct{}
ts bool persist bool
file *os.File
buf *bufio.Writer
path string
} }
var _ MarkSet = (*MapMarkSet)(nil) var _ MarkSet = (*MapMarkSet)(nil)
var _ MarkSetVisitor = (*MapMarkSet)(nil)
func NewMapMarkSetEnv() (*MapMarkSetEnv, error) { func NewMapMarkSetEnv(path string) (*MapMarkSetEnv, error) {
return &MapMarkSetEnv{}, nil msPath := filepath.Join(path, "markset.map")
err := os.MkdirAll(msPath, 0755) //nolint:gosec
if err != nil {
return nil, xerrors.Errorf("error creating markset directory: %w", err)
}
return &MapMarkSetEnv{path: msPath}, nil
} }
func (e *MapMarkSetEnv) create(name string, sizeHint int64) (*MapMarkSet, error) { func (e *MapMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) {
path := filepath.Join(e.path, name)
return &MapMarkSet{ return &MapMarkSet{
set: make(map[string]struct{}, sizeHint), set: make(map[string]struct{}, sizeHint),
path: path,
}, nil }, nil
} }
func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { func (e *MapMarkSetEnv) Recover(name string) (MarkSet, error) {
return e.create(name, sizeHint) path := filepath.Join(e.path, name)
} s := &MapMarkSet{
set: make(map[string]struct{}),
path: path,
}
func (e *MapMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) { in, err := os.Open(path)
return e.create(name, sizeHint) if err != nil {
} return nil, xerrors.Errorf("error opening markset file for read: %w", err)
}
defer in.Close() //nolint:errcheck
func (e *MapMarkSetEnv) SupportsVisitor() bool { return true } // wrap a buffered reader to make this faster
buf := bufio.NewReader(in)
for {
var sz byte
if sz, err = buf.ReadByte(); err != nil {
break
}
key := make([]byte, int(sz))
if _, err = io.ReadFull(buf, key); err != nil {
break
}
s.set[string(key)] = struct{}{}
}
if err != io.EOF {
return nil, xerrors.Errorf("error reading markset file: %w", err)
}
file, err := os.OpenFile(s.path, os.O_WRONLY|os.O_APPEND, 0)
if err != nil {
return nil, xerrors.Errorf("error opening markset file for write: %w", err)
}
s.persist = true
s.file = file
s.buf = bufio.NewWriter(file)
return s, nil
}
func (e *MapMarkSetEnv) Close() error { func (e *MapMarkSetEnv) Close() error {
return nil return nil
} }
func (s *MapMarkSet) Mark(cid cid.Cid) error { func (s *MapMarkSet) BeginCriticalSection() error {
if s.ts {
s.mx.Lock() s.mx.Lock()
defer s.mx.Unlock() defer s.mx.Unlock()
}
if s.set == nil { if s.set == nil {
return errMarkSetClosed return errMarkSetClosed
} }
s.set[string(cid.Hash())] = struct{}{} if s.persist {
return nil
}
file, err := os.OpenFile(s.path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return xerrors.Errorf("error opening markset file: %w", err)
}
// wrap a buffered writer to make this faster
s.buf = bufio.NewWriter(file)
for key := range s.set {
if err := s.writeKey([]byte(key), false); err != nil {
_ = file.Close()
s.buf = nil
return err
}
}
if err := s.buf.Flush(); err != nil {
_ = file.Close()
s.buf = nil
return xerrors.Errorf("error flushing markset file buffer: %w", err)
}
s.file = file
s.persist = true
return nil
}
func (s *MapMarkSet) EndCriticalSection() {
s.mx.Lock()
defer s.mx.Unlock()
if !s.persist {
return
}
_ = s.file.Close()
_ = os.Remove(s.path)
s.file = nil
s.buf = nil
s.persist = false
}
func (s *MapMarkSet) Mark(c cid.Cid) error {
s.mx.Lock()
defer s.mx.Unlock()
if s.set == nil {
return errMarkSetClosed
}
hash := c.Hash()
s.set[string(hash)] = struct{}{}
if s.persist {
if err := s.writeKey(hash, true); err != nil {
return err
}
if err := s.file.Sync(); err != nil {
return xerrors.Errorf("error syncing markset: %w", err)
}
}
return nil
}
func (s *MapMarkSet) MarkMany(batch []cid.Cid) error {
s.mx.Lock()
defer s.mx.Unlock()
if s.set == nil {
return errMarkSetClosed
}
for _, c := range batch {
hash := c.Hash()
s.set[string(hash)] = struct{}{}
if s.persist {
if err := s.writeKey(hash, false); err != nil {
return err
}
}
}
if s.persist {
if err := s.buf.Flush(); err != nil {
return xerrors.Errorf("error flushing markset buffer to disk: %w", err)
}
if err := s.file.Sync(); err != nil {
return xerrors.Errorf("error syncing markset: %w", err)
}
}
return nil return nil
} }
func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) { func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
if s.ts {
s.mx.RLock() s.mx.RLock()
defer s.mx.RUnlock() defer s.mx.RUnlock()
}
if s.set == nil { if s.set == nil {
return false, errMarkSetClosed return false, errMarkSetClosed
@ -73,33 +220,70 @@ func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
} }
func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) { func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) {
if s.ts {
s.mx.Lock() s.mx.Lock()
defer s.mx.Unlock() defer s.mx.Unlock()
}
if s.set == nil { if s.set == nil {
return false, errMarkSetClosed return false, errMarkSetClosed
} }
key := string(c.Hash()) hash := c.Hash()
key := string(hash)
if _, ok := s.set[key]; ok { if _, ok := s.set[key]; ok {
return false, nil return false, nil
} }
s.set[key] = struct{}{} s.set[key] = struct{}{}
if s.persist {
if err := s.writeKey(hash, true); err != nil {
return false, err
}
if err := s.file.Sync(); err != nil {
return false, xerrors.Errorf("error syncing markset: %w", err)
}
}
return true, nil return true, nil
} }
func (s *MapMarkSet) Close() error { func (s *MapMarkSet) Close() error {
if s.ts {
s.mx.Lock() s.mx.Lock()
defer s.mx.Unlock() defer s.mx.Unlock()
if s.set == nil {
return nil
} }
s.set = nil s.set = nil
if s.file != nil {
if err := s.file.Close(); err != nil {
log.Warnf("error closing markset file: %s", err)
}
if !s.persist {
if err := os.Remove(s.path); err != nil {
log.Warnf("error removing markset file: %s", err)
}
}
}
return nil return nil
} }
func (s *MapMarkSet) SetConcurrent() { func (s *MapMarkSet) writeKey(k []byte, flush bool) error {
s.ts = true if err := s.buf.WriteByte(byte(len(k))); err != nil {
return xerrors.Errorf("error writing markset key length to disk: %w", err)
}
if _, err := s.buf.Write(k); err != nil {
return xerrors.Errorf("error writing markset key to disk: %w", err)
}
if flush {
if err := s.buf.Flush(); err != nil {
return xerrors.Errorf("error flushing markset buffer to disk: %w", err)
}
}
return nil
} }

View File

@ -11,7 +11,10 @@ import (
func TestMapMarkSet(t *testing.T) { func TestMapMarkSet(t *testing.T) {
testMarkSet(t, "map") testMarkSet(t, "map")
testMarkSetRecovery(t, "map")
testMarkSetMarkMany(t, "map")
testMarkSetVisitor(t, "map") testMarkSetVisitor(t, "map")
testMarkSetVisitorRecovery(t, "map")
} }
func TestBadgerMarkSet(t *testing.T) { func TestBadgerMarkSet(t *testing.T) {
@ -21,12 +24,13 @@ func TestBadgerMarkSet(t *testing.T) {
badgerMarkSetBatchSize = bs badgerMarkSetBatchSize = bs
}) })
testMarkSet(t, "badger") testMarkSet(t, "badger")
testMarkSetRecovery(t, "badger")
testMarkSetMarkMany(t, "badger")
testMarkSetVisitor(t, "badger") testMarkSetVisitor(t, "badger")
testMarkSetVisitorRecovery(t, "badger")
} }
func testMarkSet(t *testing.T, lsType string) { func testMarkSet(t *testing.T, lsType string) {
t.Helper()
path, err := ioutil.TempDir("", "markset.*") path, err := ioutil.TempDir("", "markset.*")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -42,12 +46,12 @@ func testMarkSet(t *testing.T, lsType string) {
} }
defer env.Close() //nolint:errcheck defer env.Close() //nolint:errcheck
hotSet, err := env.Create("hot", 0) hotSet, err := env.New("hot", 0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
coldSet, err := env.Create("cold", 0) coldSet, err := env.New("cold", 0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -62,6 +66,7 @@ func testMarkSet(t *testing.T, lsType string) {
} }
mustHave := func(s MarkSet, cid cid.Cid) { mustHave := func(s MarkSet, cid cid.Cid) {
t.Helper()
has, err := s.Has(cid) has, err := s.Has(cid)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -73,6 +78,7 @@ func testMarkSet(t *testing.T, lsType string) {
} }
mustNotHave := func(s MarkSet, cid cid.Cid) { mustNotHave := func(s MarkSet, cid cid.Cid) {
t.Helper()
has, err := s.Has(cid) has, err := s.Has(cid)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -114,12 +120,12 @@ func testMarkSet(t *testing.T, lsType string) {
t.Fatal(err) t.Fatal(err)
} }
hotSet, err = env.Create("hot", 0) hotSet, err = env.New("hot", 0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
coldSet, err = env.Create("cold", 0) coldSet, err = env.New("cold", 0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -150,8 +156,6 @@ func testMarkSet(t *testing.T, lsType string) {
} }
func testMarkSetVisitor(t *testing.T, lsType string) { func testMarkSetVisitor(t *testing.T, lsType string) {
t.Helper()
path, err := ioutil.TempDir("", "markset.*") path, err := ioutil.TempDir("", "markset.*")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -167,7 +171,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
} }
defer env.Close() //nolint:errcheck defer env.Close() //nolint:errcheck
visitor, err := env.CreateVisitor("test", 0) visitor, err := env.New("test", 0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -219,3 +223,322 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
mustNotVisit(visitor, k3) mustNotVisit(visitor, k3)
mustNotVisit(visitor, k4) mustNotVisit(visitor, k4)
} }
func testMarkSetVisitorRecovery(t *testing.T, lsType string) {
path, err := ioutil.TempDir("", "markset.*")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = os.RemoveAll(path)
})
env, err := OpenMarkSetEnv(path, lsType)
if err != nil {
t.Fatal(err)
}
defer env.Close() //nolint:errcheck
visitor, err := env.New("test", 0)
if err != nil {
t.Fatal(err)
}
defer visitor.Close() //nolint:errcheck
makeCid := func(key string) cid.Cid {
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
if err != nil {
t.Fatal(err)
}
return cid.NewCidV1(cid.Raw, h)
}
mustVisit := func(v ObjectVisitor, cid cid.Cid) {
visit, err := v.Visit(cid)
if err != nil {
t.Fatal(err)
}
if !visit {
t.Fatal("object should be visited")
}
}
mustNotVisit := func(v ObjectVisitor, cid cid.Cid) {
visit, err := v.Visit(cid)
if err != nil {
t.Fatal(err)
}
if visit {
t.Fatal("unexpected visit")
}
}
k1 := makeCid("a")
k2 := makeCid("b")
k3 := makeCid("c")
k4 := makeCid("d")
mustVisit(visitor, k1)
mustVisit(visitor, k2)
if err := visitor.BeginCriticalSection(); err != nil {
t.Fatal(err)
}
mustVisit(visitor, k3)
mustVisit(visitor, k4)
mustNotVisit(visitor, k1)
mustNotVisit(visitor, k2)
mustNotVisit(visitor, k3)
mustNotVisit(visitor, k4)
if err := visitor.Close(); err != nil {
t.Fatal(err)
}
visitor, err = env.Recover("test")
if err != nil {
t.Fatal(err)
}
mustNotVisit(visitor, k1)
mustNotVisit(visitor, k2)
mustNotVisit(visitor, k3)
mustNotVisit(visitor, k4)
visitor.EndCriticalSection()
if err := visitor.Close(); err != nil {
t.Fatal(err)
}
_, err = env.Recover("test")
if err == nil {
t.Fatal("expected recovery to fail")
}
}
func testMarkSetRecovery(t *testing.T, lsType string) {
path, err := ioutil.TempDir("", "markset.*")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = os.RemoveAll(path)
})
env, err := OpenMarkSetEnv(path, lsType)
if err != nil {
t.Fatal(err)
}
defer env.Close() //nolint:errcheck
markSet, err := env.New("test", 0)
if err != nil {
t.Fatal(err)
}
makeCid := func(key string) cid.Cid {
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
if err != nil {
t.Fatal(err)
}
return cid.NewCidV1(cid.Raw, h)
}
mustHave := func(s MarkSet, cid cid.Cid) {
t.Helper()
has, err := s.Has(cid)
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatal("mark not found")
}
}
mustNotHave := func(s MarkSet, cid cid.Cid) {
t.Helper()
has, err := s.Has(cid)
if err != nil {
t.Fatal(err)
}
if has {
t.Fatal("unexpected mark")
}
}
k1 := makeCid("a")
k2 := makeCid("b")
k3 := makeCid("c")
k4 := makeCid("d")
if err := markSet.Mark(k1); err != nil {
t.Fatal(err)
}
if err := markSet.Mark(k2); err != nil {
t.Fatal(err)
}
mustHave(markSet, k1)
mustHave(markSet, k2)
mustNotHave(markSet, k3)
mustNotHave(markSet, k4)
if err := markSet.BeginCriticalSection(); err != nil {
t.Fatal(err)
}
if err := markSet.Mark(k3); err != nil {
t.Fatal(err)
}
if err := markSet.Mark(k4); err != nil {
t.Fatal(err)
}
mustHave(markSet, k1)
mustHave(markSet, k2)
mustHave(markSet, k3)
mustHave(markSet, k4)
if err := markSet.Close(); err != nil {
t.Fatal(err)
}
markSet, err = env.Recover("test")
if err != nil {
t.Fatal(err)
}
mustHave(markSet, k1)
mustHave(markSet, k2)
mustHave(markSet, k3)
mustHave(markSet, k4)
markSet.EndCriticalSection()
if err := markSet.Close(); err != nil {
t.Fatal(err)
}
_, err = env.Recover("test")
if err == nil {
t.Fatal("expected recovery to fail")
}
}
func testMarkSetMarkMany(t *testing.T, lsType string) {
path, err := ioutil.TempDir("", "markset.*")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = os.RemoveAll(path)
})
env, err := OpenMarkSetEnv(path, lsType)
if err != nil {
t.Fatal(err)
}
defer env.Close() //nolint:errcheck
markSet, err := env.New("test", 0)
if err != nil {
t.Fatal(err)
}
makeCid := func(key string) cid.Cid {
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
if err != nil {
t.Fatal(err)
}
return cid.NewCidV1(cid.Raw, h)
}
mustHave := func(s MarkSet, cid cid.Cid) {
t.Helper()
has, err := s.Has(cid)
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatal("mark not found")
}
}
mustNotHave := func(s MarkSet, cid cid.Cid) {
t.Helper()
has, err := s.Has(cid)
if err != nil {
t.Fatal(err)
}
if has {
t.Fatal("unexpected mark")
}
}
k1 := makeCid("a")
k2 := makeCid("b")
k3 := makeCid("c")
k4 := makeCid("d")
if err := markSet.MarkMany([]cid.Cid{k1, k2}); err != nil {
t.Fatal(err)
}
mustHave(markSet, k1)
mustHave(markSet, k2)
mustNotHave(markSet, k3)
mustNotHave(markSet, k4)
if err := markSet.BeginCriticalSection(); err != nil {
t.Fatal(err)
}
if err := markSet.MarkMany([]cid.Cid{k3, k4}); err != nil {
t.Fatal(err)
}
mustHave(markSet, k1)
mustHave(markSet, k2)
mustHave(markSet, k3)
mustHave(markSet, k4)
if err := markSet.Close(); err != nil {
t.Fatal(err)
}
markSet, err = env.Recover("test")
if err != nil {
t.Fatal(err)
}
mustHave(markSet, k1)
mustHave(markSet, k2)
mustHave(markSet, k3)
mustHave(markSet, k4)
markSet.EndCriticalSection()
if err := markSet.Close(); err != nil {
t.Fatal(err)
}
_, err = env.Recover("test")
if err == nil {
t.Fatal("expected recovery to fail")
}
}

View File

@ -129,8 +129,6 @@ type SplitStore struct {
headChangeMx sync.Mutex headChangeMx sync.Mutex
coldPurgeSize int
chain ChainAccessor chain ChainAccessor
ds dstore.Datastore ds dstore.Datastore
cold bstore.Blockstore cold bstore.Blockstore
@ -158,6 +156,10 @@ type SplitStore struct {
txnRefsMx sync.Mutex txnRefsMx sync.Mutex
txnRefs map[cid.Cid]struct{} txnRefs map[cid.Cid]struct{}
txnMissing map[cid.Cid]struct{} txnMissing map[cid.Cid]struct{}
txnMarkSet MarkSet
txnSyncMx sync.Mutex
txnSyncCond sync.Cond
txnSync bool
// registered protectors // registered protectors
protectors []func(func(cid.Cid) error) error protectors []func(func(cid.Cid) error) error
@ -186,10 +188,6 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
return nil, err return nil, err
} }
if !markSetEnv.SupportsVisitor() {
return nil, xerrors.Errorf("markset type does not support atomic visitors")
}
// and now we can make a SplitStore // and now we can make a SplitStore
ss := &SplitStore{ ss := &SplitStore{
cfg: cfg, cfg: cfg,
@ -198,11 +196,10 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
cold: cold, cold: cold,
hot: hots, hot: hots,
markSetEnv: markSetEnv, markSetEnv: markSetEnv,
coldPurgeSize: defaultColdPurgeSize,
} }
ss.txnViewsCond.L = &ss.txnViewsMx ss.txnViewsCond.L = &ss.txnViewsMx
ss.txnSyncCond.L = &ss.txnSyncMx
ss.ctx, ss.cancel = context.WithCancel(context.Background()) ss.ctx, ss.cancel = context.WithCancel(context.Background())
if enableDebugLog { if enableDebugLog {
@ -212,6 +209,14 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
} }
} }
if ss.checkpointExists() {
log.Info("found compaction checkpoint; resuming compaction")
if err := ss.completeCompaction(); err != nil {
markSetEnv.Close() //nolint:errcheck
return nil, xerrors.Errorf("error resuming compaction: %w", err)
}
}
return ss, nil return ss, nil
} }
@ -234,6 +239,20 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
s.txnLk.RLock() s.txnLk.RLock()
defer s.txnLk.RUnlock() defer s.txnLk.RUnlock()
// critical section
if s.txnMarkSet != nil {
has, err := s.txnMarkSet.Has(cid)
if err != nil {
return false, err
}
if has {
return s.has(cid)
}
return s.cold.Has(ctx, cid)
}
has, err := s.hot.Has(ctx, cid) has, err := s.hot.Has(ctx, cid)
if err != nil { if err != nil {
@ -261,6 +280,20 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error)
s.txnLk.RLock() s.txnLk.RLock()
defer s.txnLk.RUnlock() defer s.txnLk.RUnlock()
// critical section
if s.txnMarkSet != nil {
has, err := s.txnMarkSet.Has(cid)
if err != nil {
return nil, err
}
if has {
return s.get(cid)
}
return s.cold.Get(ctx, cid)
}
blk, err := s.hot.Get(ctx, cid) blk, err := s.hot.Get(ctx, cid)
switch err { switch err {
@ -298,6 +331,20 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
s.txnLk.RLock() s.txnLk.RLock()
defer s.txnLk.RUnlock() defer s.txnLk.RUnlock()
// critical section
if s.txnMarkSet != nil {
has, err := s.txnMarkSet.Has(cid)
if err != nil {
return 0, err
}
if has {
return s.getSize(cid)
}
return s.cold.GetSize(ctx, cid)
}
size, err := s.hot.GetSize(ctx, cid) size, err := s.hot.GetSize(ctx, cid)
switch err { switch err {
@ -336,6 +383,12 @@ func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error {
s.debug.LogWrite(blk) s.debug.LogWrite(blk)
// critical section
if s.txnMarkSet != nil {
s.markLiveRefs([]cid.Cid{blk.Cid()})
return nil
}
s.trackTxnRef(blk.Cid()) s.trackTxnRef(blk.Cid())
return nil return nil
} }
@ -381,6 +434,12 @@ func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error {
s.debug.LogWriteMany(blks) s.debug.LogWriteMany(blks)
// critical section
if s.txnMarkSet != nil {
s.markLiveRefs(batch)
return nil
}
s.trackTxnRefMany(batch) s.trackTxnRefMany(batch)
return nil return nil
} }
@ -440,6 +499,23 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro
return cb(data) return cb(data)
} }
// critical section
s.txnLk.RLock() // the lock is released in protectView if we are not in critical section
if s.txnMarkSet != nil {
has, err := s.txnMarkSet.Has(cid)
s.txnLk.RUnlock()
if err != nil {
return err
}
if has {
return s.view(cid, cb)
}
return s.cold.View(ctx, cid, cb)
}
// views are (optimistically) protected two-fold: // views are (optimistically) protected two-fold:
// - if there is an active transaction, then the reference is protected. // - if there is an active transaction, then the reference is protected.
// - if there is no active transaction, active views are tracked in a // - if there is no active transaction, active views are tracked in a
@ -589,6 +665,11 @@ func (s *SplitStore) Close() error {
} }
if atomic.LoadInt32(&s.compacting) == 1 { if atomic.LoadInt32(&s.compacting) == 1 {
s.txnSyncMx.Lock()
s.txnSync = true
s.txnSyncCond.Broadcast()
s.txnSyncMx.Unlock()
log.Warn("close with ongoing compaction in progress; waiting for it to finish...") log.Warn("close with ongoing compaction in progress; waiting for it to finish...")
for atomic.LoadInt32(&s.compacting) == 1 { for atomic.LoadInt32(&s.compacting) == 1 {
time.Sleep(time.Second) time.Sleep(time.Second)

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -67,7 +68,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
} }
defer output.Close() //nolint:errcheck defer output.Close() //nolint:errcheck
var mx sync.Mutex
write := func(format string, args ...interface{}) { write := func(format string, args ...interface{}) {
mx.Lock()
defer mx.Unlock()
_, err := fmt.Fprintf(output, format+"\n", args...) _, err := fmt.Fprintf(output, format+"\n", args...)
if err != nil { if err != nil {
log.Warnf("error writing check output: %s", err) log.Warnf("error writing check output: %s", err)
@ -82,9 +86,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
write("compaction index: %d", s.compactionIndex) write("compaction index: %d", s.compactionIndex)
write("--") write("--")
var coldCnt, missingCnt int64 coldCnt := new(int64)
missingCnt := new(int64)
visitor, err := s.markSetEnv.CreateVisitor("check", 0) visitor, err := s.markSetEnv.New("check", 0)
if err != nil { if err != nil {
return xerrors.Errorf("error creating visitor: %w", err) return xerrors.Errorf("error creating visitor: %w", err)
} }
@ -111,10 +116,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
} }
if has { if has {
coldCnt++ atomic.AddInt64(coldCnt, 1)
write("cold object reference: %s", c) write("cold object reference: %s", c)
} else { } else {
missingCnt++ atomic.AddInt64(missingCnt, 1)
write("missing object reference: %s", c) write("missing object reference: %s", c)
return errStopWalk return errStopWalk
} }
@ -128,9 +133,9 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
return err return err
} }
log.Infow("check done", "cold", coldCnt, "missing", missingCnt) log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt)
write("--") write("--")
write("cold: %d missing: %d", coldCnt, missingCnt) write("cold: %d missing: %d", *coldCnt, *missingCnt)
write("DONE") write("DONE")
return nil return nil

View File

@ -3,8 +3,10 @@ package splitstore
import ( import (
"bytes" "bytes"
"errors" "errors"
"os"
"path/filepath"
"runtime" "runtime"
"sort" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -47,6 +49,10 @@ var (
// SyncGapTime is the time delay from a tipset's min timestamp before we decide // SyncGapTime is the time delay from a tipset's min timestamp before we decide
// there is a sync gap // there is a sync gap
SyncGapTime = time.Minute SyncGapTime = time.Minute
// SyncWaitTime is the time delay from a tipset's min timestamp before we decide
// we have synced.
SyncWaitTime = 30 * time.Second
) )
var ( var (
@ -56,8 +62,6 @@ var (
const ( const (
batchSize = 16384 batchSize = 16384
defaultColdPurgeSize = 7_000_000
) )
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
@ -140,9 +144,9 @@ func (s *SplitStore) isNearUpgrade(epoch abi.ChainEpoch) bool {
// transactionally protect incoming tipsets // transactionally protect incoming tipsets
func (s *SplitStore) protectTipSets(apply []*types.TipSet) { func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
s.txnLk.RLock() s.txnLk.RLock()
defer s.txnLk.RUnlock()
if !s.txnActive { if !s.txnActive {
s.txnLk.RUnlock()
return return
} }
@ -151,12 +155,115 @@ func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
cids = append(cids, ts.Cids()...) cids = append(cids, ts.Cids()...)
} }
if len(cids) == 0 {
s.txnLk.RUnlock()
return
}
// critical section
if s.txnMarkSet != nil {
curTs := apply[len(apply)-1]
timestamp := time.Unix(int64(curTs.MinTimestamp()), 0)
doSync := time.Since(timestamp) < SyncWaitTime
go func() {
if doSync {
defer func() {
s.txnSyncMx.Lock()
defer s.txnSyncMx.Unlock()
s.txnSync = true
s.txnSyncCond.Broadcast()
}()
}
defer s.txnLk.RUnlock()
s.markLiveRefs(cids)
}()
return
}
s.trackTxnRefMany(cids) s.trackTxnRefMany(cids)
s.txnLk.RUnlock()
}
func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
log.Debugf("marking %d live refs", len(cids))
startMark := time.Now()
count := new(int32)
visitor := newConcurrentVisitor()
walkObject := func(c cid.Cid) error {
return s.walkObjectIncomplete(c, visitor,
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk
}
visit, err := s.txnMarkSet.Visit(c)
if err != nil {
return xerrors.Errorf("error visiting object: %w", err)
}
if !visit {
return errStopWalk
}
atomic.AddInt32(count, 1)
return nil
},
func(missing cid.Cid) error {
log.Warnf("missing object reference %s in %s", missing, c)
return errStopWalk
})
}
// optimize the common case of single put
if len(cids) == 1 {
if err := walkObject(cids[0]); err != nil {
log.Errorf("error marking tipset refs: %s", err)
}
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
return
}
workch := make(chan cid.Cid, len(cids))
for _, c := range cids {
workch <- c
}
close(workch)
worker := func() error {
for c := range workch {
if err := walkObject(c); err != nil {
return err
}
}
return nil
}
workers := runtime.NumCPU() / 2
if workers < 2 {
workers = 2
}
if workers > len(cids) {
workers = len(cids)
}
g := new(errgroup.Group)
for i := 0; i < workers; i++ {
g.Go(worker)
}
if err := g.Wait(); err != nil {
log.Errorf("error marking tipset refs: %s", err)
}
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
} }
// transactionally protect a view // transactionally protect a view
func (s *SplitStore) protectView(c cid.Cid) { func (s *SplitStore) protectView(c cid.Cid) {
s.txnLk.RLock() // the txnLk is held for read
defer s.txnLk.RUnlock() defer s.txnLk.RUnlock()
if s.txnActive { if s.txnActive {
@ -227,7 +334,7 @@ func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) {
} }
// protect all pending transactional references // protect all pending transactional references
func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error { func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
for { for {
var txnRefs map[cid.Cid]struct{} var txnRefs map[cid.Cid]struct{}
@ -299,14 +406,14 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error {
// transactionally protect a reference by walking the object and marking. // transactionally protect a reference by walking the object and marking.
// concurrent markings are short circuited by checking the markset. // concurrent markings are short circuited by checking the markset.
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSetVisitor) error { func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
if err := s.checkClosing(); err != nil { if err := s.checkClosing(); err != nil {
return err return err
} }
// Note: cold objects are deleted heaviest first, so the consituents of an object // Note: cold objects are deleted heaviest first, so the consituents of an object
// cannot be deleted before the object itself. // cannot be deleted before the object itself.
return s.walkObjectIncomplete(root, tmpVisitor(), return s.walkObjectIncomplete(root, newTmpVisitor(),
func(c cid.Cid) error { func(c cid.Cid) error {
if isUnitaryObject(c) { if isUnitaryObject(c) {
return errStopWalk return errStopWalk
@ -386,6 +493,12 @@ func (s *SplitStore) compact(curTs *types.TipSet) {
} }
func (s *SplitStore) doCompact(curTs *types.TipSet) error { func (s *SplitStore) doCompact(curTs *types.TipSet) error {
if s.checkpointExists() {
// this really shouldn't happen, but if it somehow does, it means that the hotstore
// might be potentially inconsistent; abort compaction and notify the user to intervene.
return xerrors.Errorf("checkpoint exists; aborting compaction")
}
currentEpoch := curTs.Height() currentEpoch := curTs.Height()
boundaryEpoch := currentEpoch - CompactionBoundary boundaryEpoch := currentEpoch - CompactionBoundary
@ -397,7 +510,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex) log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex)
markSet, err := s.markSetEnv.CreateVisitor("live", s.markSetSize) markSet, err := s.markSetEnv.New("live", s.markSetSize)
if err != nil { if err != nil {
return xerrors.Errorf("error creating mark set: %w", err) return xerrors.Errorf("error creating mark set: %w", err)
} }
@ -408,9 +521,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
return err return err
} }
// we are ready for concurrent marking
s.beginTxnMarking(markSet)
// 0. track all protected references at beginning of compaction; anything added later should // 0. track all protected references at beginning of compaction; anything added later should
// be transactionally protected by the write // be transactionally protected by the write
log.Info("protecting references with registered protectors") log.Info("protecting references with registered protectors")
@ -424,7 +534,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
log.Info("marking reachable objects") log.Info("marking reachable objects")
startMark := time.Now() startMark := time.Now()
var count int64 count := new(int64)
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{},
func(c cid.Cid) error { func(c cid.Cid) error {
if isUnitaryObject(c) { if isUnitaryObject(c) {
@ -440,7 +550,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
return errStopWalk return errStopWalk
} }
count++ atomic.AddInt64(count, 1)
return nil return nil
}) })
@ -448,9 +558,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
return xerrors.Errorf("error marking: %w", err) return xerrors.Errorf("error marking: %w", err)
} }
s.markSetSize = count + count>>2 // overestimate a bit s.markSetSize = *count + *count>>2 // overestimate a bit
log.Infow("marking done", "took", time.Since(startMark), "marked", count) log.Infow("marking done", "took", time.Since(startMark), "marked", *count)
if err := s.checkClosing(); err != nil { if err := s.checkClosing(); err != nil {
return err return err
@ -470,10 +580,15 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
log.Info("collecting cold objects") log.Info("collecting cold objects")
startCollect := time.Now() startCollect := time.Now()
coldw, err := NewColdSetWriter(s.coldSetPath())
if err != nil {
return xerrors.Errorf("error creating coldset: %w", err)
}
defer coldw.Close() //nolint:errcheck
// some stats for logging // some stats for logging
var hotCnt, coldCnt int var hotCnt, coldCnt int
cold := make([]cid.Cid, 0, s.coldPurgeSize)
err = s.hot.ForEachKey(func(c cid.Cid) error { err = s.hot.ForEachKey(func(c cid.Cid) error {
// was it marked? // was it marked?
mark, err := markSet.Has(c) mark, err := markSet.Has(c)
@ -487,7 +602,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
} }
// it's cold, mark it as candidate for move // it's cold, mark it as candidate for move
cold = append(cold, c) if err := coldw.Write(c); err != nil {
return xerrors.Errorf("error writing cid to coldstore: %w", err)
}
coldCnt++ coldCnt++
return nil return nil
@ -497,12 +614,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
return xerrors.Errorf("error collecting cold objects: %w", err) return xerrors.Errorf("error collecting cold objects: %w", err)
} }
log.Infow("cold collection done", "took", time.Since(startCollect)) if err := coldw.Close(); err != nil {
return xerrors.Errorf("error closing coldset: %w", err)
if coldCnt > 0 {
s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit
} }
log.Infow("cold collection done", "took", time.Since(startCollect))
log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt)
stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt)))
stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt)))
@ -520,11 +637,17 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
return err return err
} }
coldr, err := NewColdSetReader(s.coldSetPath())
if err != nil {
return xerrors.Errorf("error opening coldset: %w", err)
}
defer coldr.Close() //nolint:errcheck
// 3. copy the cold objects to the coldstore -- if we have one // 3. copy the cold objects to the coldstore -- if we have one
if !s.cfg.DiscardColdBlocks { if !s.cfg.DiscardColdBlocks {
log.Info("moving cold objects to the coldstore") log.Info("moving cold objects to the coldstore")
startMove := time.Now() startMove := time.Now()
err = s.moveColdBlocks(cold) err = s.moveColdBlocks(coldr)
if err != nil { if err != nil {
return xerrors.Errorf("error moving cold objects: %w", err) return xerrors.Errorf("error moving cold objects: %w", err)
} }
@ -533,41 +656,64 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
if err := s.checkClosing(); err != nil { if err := s.checkClosing(); err != nil {
return err return err
} }
if err := coldr.Reset(); err != nil {
return xerrors.Errorf("error resetting coldset: %w", err)
}
} }
// 4. sort cold objects so that the dags with most references are deleted first // 4. Purge cold objects with checkpointing for recovery.
// this ensures that we can't refer to a dag with its consituents already deleted, ie // This is the critical section of compaction, whereby any cold object not in the markSet is
// we lave no dangling references. // considered already deleted.
log.Info("sorting cold objects") // We delete cold objects in batches, holding the transaction lock, where we check the markSet
startSort := time.Now() // again for new references created by the VM.
err = s.sortObjects(cold) // After each batch, we write a checkpoint to disk; if the process is interrupted before completion,
if err != nil { // the process will continue from the checkpoint in the next recovery.
return xerrors.Errorf("error sorting objects: %w", err) if err := s.beginCriticalSection(markSet); err != nil {
} return xerrors.Errorf("error beginning critical section: %w", err)
log.Infow("sorting done", "took", time.Since(startSort))
// 4.1 protect transactional refs once more
// strictly speaking, this is not necessary as purge will do it before deleting each
// batch. however, there is likely a largish number of references accumulated during
// ths sort and this protects before entering pruge context.
err = s.protectTxnRefs(markSet)
if err != nil {
return xerrors.Errorf("error protecting transactional refs: %w", err)
} }
if err := s.checkClosing(); err != nil { if err := s.checkClosing(); err != nil {
return err return err
} }
// wait for the head to catch up so that the current tipset is marked
s.waitForSync()
if err := s.checkClosing(); err != nil {
return err
}
checkpoint, err := NewCheckpoint(s.checkpointPath())
if err != nil {
return xerrors.Errorf("error creating checkpoint: %w", err)
}
defer checkpoint.Close() //nolint:errcheck
// 5. purge cold objects from the hotstore, taking protected references into account // 5. purge cold objects from the hotstore, taking protected references into account
log.Info("purging cold objects from the hotstore") log.Info("purging cold objects from the hotstore")
startPurge := time.Now() startPurge := time.Now()
err = s.purge(cold, markSet) err = s.purge(coldr, checkpoint, markSet)
if err != nil { if err != nil {
return xerrors.Errorf("error purging cold blocks: %w", err) return xerrors.Errorf("error purging cold objects: %w", err)
} }
log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
s.endCriticalSection()
if err := checkpoint.Close(); err != nil {
log.Warnf("error closing checkpoint: %s", err)
}
if err := os.Remove(s.checkpointPath()); err != nil {
log.Warnf("error removing checkpoint: %s", err)
}
if err := coldr.Close(); err != nil {
log.Warnf("error closing coldset: %s", err)
}
if err := os.Remove(s.coldSetPath()); err != nil {
log.Warnf("error removing coldset: %s", err)
}
// we are done; do some housekeeping // we are done; do some housekeeping
s.endTxnProtect() s.endTxnProtect()
s.gcHotstore() s.gcHotstore()
@ -598,12 +744,51 @@ func (s *SplitStore) beginTxnProtect() {
defer s.txnLk.Unlock() defer s.txnLk.Unlock()
s.txnActive = true s.txnActive = true
s.txnSync = false
s.txnRefs = make(map[cid.Cid]struct{}) s.txnRefs = make(map[cid.Cid]struct{})
s.txnMissing = make(map[cid.Cid]struct{}) s.txnMissing = make(map[cid.Cid]struct{})
} }
func (s *SplitStore) beginTxnMarking(markSet MarkSetVisitor) { func (s *SplitStore) beginCriticalSection(markSet MarkSet) error {
markSet.SetConcurrent() log.Info("beginning critical section")
// do that once first to get the bulk before the markset is in critical section
if err := s.protectTxnRefs(markSet); err != nil {
return xerrors.Errorf("error protecting transactional references: %w", err)
}
if err := markSet.BeginCriticalSection(); err != nil {
return xerrors.Errorf("error beginning critical section for markset: %w", err)
}
s.txnLk.Lock()
defer s.txnLk.Unlock()
s.txnMarkSet = markSet
// and do it again while holding the lock to mark references that might have been created
// in the meantime and avoid races of the type Has->txnRef->enterCS->Get fails because
// it's not in the markset
if err := s.protectTxnRefs(markSet); err != nil {
return xerrors.Errorf("error protecting transactional references: %w", err)
}
return nil
}
func (s *SplitStore) waitForSync() {
log.Info("waiting for sync")
startWait := time.Now()
defer func() {
log.Infow("waiting for sync done", "took", time.Since(startWait))
}()
s.txnSyncMx.Lock()
defer s.txnSyncMx.Unlock()
for !s.txnSync {
s.txnSyncCond.Wait()
}
} }
func (s *SplitStore) endTxnProtect() { func (s *SplitStore) endTxnProtect() {
@ -615,32 +800,51 @@ func (s *SplitStore) endTxnProtect() {
} }
s.txnActive = false s.txnActive = false
s.txnSync = false
s.txnRefs = nil s.txnRefs = nil
s.txnMissing = nil s.txnMissing = nil
s.txnMarkSet = nil
}
func (s *SplitStore) endCriticalSection() {
log.Info("ending critical section")
s.txnLk.Lock()
defer s.txnLk.Unlock()
s.txnMarkSet.EndCriticalSection()
s.txnMarkSet = nil
} }
func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch, func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch,
visitor ObjectVisitor, f func(cid.Cid) error) error { visitor ObjectVisitor, f func(cid.Cid) error) error {
var walked *cid.Set var walked ObjectVisitor
toWalk := ts.Cids() var mx sync.Mutex
walkCnt := 0 // we copy the tipset first into a new slice, which allows us to reuse it in every epoch.
scanCnt := 0 toWalk := make([]cid.Cid, len(ts.Cids()))
copy(toWalk, ts.Cids())
walkCnt := new(int64)
scanCnt := new(int64)
stopWalk := func(_ cid.Cid) error { return errStopWalk } stopWalk := func(_ cid.Cid) error { return errStopWalk }
walkBlock := func(c cid.Cid) error { walkBlock := func(c cid.Cid) error {
if !walked.Visit(c) { visit, err := walked.Visit(c)
if err != nil {
return err
}
if !visit {
return nil return nil
} }
walkCnt++ atomic.AddInt64(walkCnt, 1)
if err := f(c); err != nil { if err := f(c); err != nil {
return err return err
} }
var hdr types.BlockHeader var hdr types.BlockHeader
err := s.view(c, func(data []byte) error { err = s.view(c, func(data []byte) error {
return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
}) })
@ -676,11 +880,13 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
if err := s.walkObject(hdr.ParentStateRoot, visitor, f); err != nil { if err := s.walkObject(hdr.ParentStateRoot, visitor, f); err != nil {
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
} }
scanCnt++ atomic.AddInt64(scanCnt, 1)
} }
if hdr.Height > 0 { if hdr.Height > 0 {
mx.Lock()
toWalk = append(toWalk, hdr.Parents...) toWalk = append(toWalk, hdr.Parents...)
mx.Unlock()
} }
return nil return nil
@ -692,20 +898,43 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
return err return err
} }
workers := len(toWalk)
if workers > runtime.NumCPU()/2 {
workers = runtime.NumCPU() / 2
}
if workers < 2 {
workers = 2
}
// the walk is BFS, so we can reset the walked set in every iteration and avoid building up // the walk is BFS, so we can reset the walked set in every iteration and avoid building up
// a set that contains all blocks (1M epochs -> 5M blocks -> 200MB worth of memory and growing // a set that contains all blocks (1M epochs -> 5M blocks -> 200MB worth of memory and growing
// over time) // over time)
walked = cid.NewSet() walked = newConcurrentVisitor()
walking := toWalk workch := make(chan cid.Cid, len(toWalk))
toWalk = nil for _, c := range toWalk {
for _, c := range walking { workch <- c
}
close(workch)
toWalk = toWalk[:0]
g := new(errgroup.Group)
for i := 0; i < workers; i++ {
g.Go(func() error {
for c := range workch {
if err := walkBlock(c); err != nil { if err := walkBlock(c); err != nil {
return xerrors.Errorf("error walking block (cid: %s): %w", c, err) return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
} }
} }
return nil
})
} }
log.Infow("chain walk done", "walked", walkCnt, "scanned", scanCnt) if err := g.Wait(); err != nil {
return err
}
}
log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt)
return nil return nil
} }
@ -824,7 +1053,7 @@ func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, m
return nil return nil
} }
// internal version used by walk // internal version used during compaction and related operations
func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error { func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error {
if isIdentiyCid(c) { if isIdentiyCid(c) {
data, err := decodeIdentityCid(c) data, err := decodeIdentityCid(c)
@ -859,10 +1088,34 @@ func (s *SplitStore) has(c cid.Cid) (bool, error) {
return s.cold.Has(s.ctx, c) return s.cold.Has(s.ctx, c)
} }
func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { func (s *SplitStore) get(c cid.Cid) (blocks.Block, error) {
blk, err := s.hot.Get(s.ctx, c)
switch err {
case nil:
return blk, nil
case bstore.ErrNotFound:
return s.cold.Get(s.ctx, c)
default:
return nil, err
}
}
func (s *SplitStore) getSize(c cid.Cid) (int, error) {
sz, err := s.hot.GetSize(s.ctx, c)
switch err {
case nil:
return sz, nil
case bstore.ErrNotFound:
return s.cold.GetSize(s.ctx, c)
default:
return 0, err
}
}
func (s *SplitStore) moveColdBlocks(coldr *ColdSetReader) error {
batch := make([]blocks.Block, 0, batchSize) batch := make([]blocks.Block, 0, batchSize)
for _, c := range cold { err := coldr.ForEach(func(c cid.Cid) error {
if err := s.checkClosing(); err != nil { if err := s.checkClosing(); err != nil {
return err return err
} }
@ -871,7 +1124,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
if err != nil { if err != nil {
if err == bstore.ErrNotFound { if err == bstore.ErrNotFound {
log.Warnf("hotstore missing block %s", c) log.Warnf("hotstore missing block %s", c)
continue return nil
} }
return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err) return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err)
@ -885,6 +1138,12 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
} }
batch = batch[:0] batch = batch[:0]
} }
return nil
})
if err != nil {
return xerrors.Errorf("error iterating coldset: %w", err)
} }
if len(batch) > 0 { if len(batch) > 0 {
@ -897,160 +1156,60 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
return nil return nil
} }
// sorts a slice of objects heaviest first -- it's a little expensive but worth the func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet MarkSet) error {
// guarantee that we don't leave dangling references behind, e.g. if we die in the middle batch := make([]cid.Cid, 0, batchSize)
// of a purge.
func (s *SplitStore) sortObjects(cids []cid.Cid) error {
// we cache the keys to avoid making a gazillion of strings
keys := make(map[cid.Cid]string)
key := func(c cid.Cid) string {
s, ok := keys[c]
if !ok {
s = string(c.Hash())
keys[c] = s
}
return s
}
// compute sorting weights as the cumulative number of DAG links
weights := make(map[string]int)
for _, c := range cids {
// this can take quite a while, so check for shutdown with every opportunity
if err := s.checkClosing(); err != nil {
return err
}
w := s.getObjectWeight(c, weights, key)
weights[key(c)] = w
}
// sort!
sort.Slice(cids, func(i, j int) bool {
wi := weights[key(cids[i])]
wj := weights[key(cids[j])]
if wi == wj {
return bytes.Compare(cids[i].Hash(), cids[j].Hash()) > 0
}
return wi > wj
})
return nil
}
func (s *SplitStore) getObjectWeight(c cid.Cid, weights map[string]int, key func(cid.Cid) string) int {
w, ok := weights[key(c)]
if ok {
return w
}
// we treat block headers specially to avoid walking the entire chain
var hdr types.BlockHeader
err := s.view(c, func(data []byte) error {
return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
})
if err == nil {
w1 := s.getObjectWeight(hdr.ParentStateRoot, weights, key)
weights[key(hdr.ParentStateRoot)] = w1
w2 := s.getObjectWeight(hdr.Messages, weights, key)
weights[key(hdr.Messages)] = w2
return 1 + w1 + w2
}
var links []cid.Cid
err = s.view(c, func(data []byte) error {
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
links = append(links, c)
})
})
if err != nil {
return 1
}
w = 1
for _, c := range links {
// these are internal refs, so dags will be dags
if c.Prefix().Codec != cid.DagCBOR {
w++
continue
}
wc := s.getObjectWeight(c, weights, key)
weights[key(c)] = wc
w += wc
}
return w
}
func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error {
if len(cids) == 0 {
return nil
}
// we don't delete one giant batch of millions of objects, but rather do smaller batches
// so that we don't stop the world for an extended period of time
done := false
for i := 0; !done; i++ {
start := i * batchSize
end := start + batchSize
if end >= len(cids) {
end = len(cids)
done = true
}
err := deleteBatch(cids[start:end])
if err != nil {
return xerrors.Errorf("error deleting batch: %w", err)
}
}
return nil
}
func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error {
deadCids := make([]cid.Cid, 0, batchSize) deadCids := make([]cid.Cid, 0, batchSize)
var purgeCnt, liveCnt int var purgeCnt, liveCnt int
defer func() { defer func() {
log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt) log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt)
}() }()
return s.purgeBatch(cids, deleteBatch := func() error {
func(cids []cid.Cid) error { pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet)
deadCids := deadCids[:0]
purgeCnt += pc
liveCnt += lc
batch = batch[:0]
for {
if err := s.checkClosing(); err != nil {
return err return err
} }
s.txnLk.Lock() err := coldr.ForEach(func(c cid.Cid) error {
if len(s.txnRefs) == 0 { batch = append(batch, c)
// keep the lock! if len(batch) == batchSize {
break return deleteBatch()
} }
// unlock and protect return nil
s.txnLk.Unlock() })
err := s.protectTxnRefs(markSet)
if err != nil { if err != nil {
return xerrors.Errorf("error protecting transactional refs: %w", err) return err
}
} }
if len(batch) > 0 {
return deleteBatch()
}
return nil
}
func (s *SplitStore) purgeBatch(batch, deadCids []cid.Cid, checkpoint *Checkpoint, markSet MarkSet) (purgeCnt int, liveCnt int, err error) {
if err := s.checkClosing(); err != nil {
return 0, 0, err
}
s.txnLk.Lock()
defer s.txnLk.Unlock() defer s.txnLk.Unlock()
for _, c := range cids { for _, c := range batch {
live, err := markSet.Has(c) has, err := markSet.Has(c)
if err != nil { if err != nil {
return xerrors.Errorf("error checking for liveness: %w", err) return 0, 0, xerrors.Errorf("error checking markset for liveness: %w", err)
} }
if live { if has {
liveCnt++ liveCnt++
continue continue
} }
@ -1058,16 +1217,141 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error {
deadCids = append(deadCids, c) deadCids = append(deadCids, c)
} }
err := s.hot.DeleteMany(s.ctx, deadCids) if len(deadCids) == 0 {
if err != nil { if err := checkpoint.Set(batch[len(batch)-1]); err != nil {
return xerrors.Errorf("error purging cold objects: %w", err) return 0, 0, xerrors.Errorf("error setting checkpoint: %w", err)
}
return 0, liveCnt, nil
}
if err := s.hot.DeleteMany(s.ctx, deadCids); err != nil {
return 0, liveCnt, xerrors.Errorf("error purging cold objects: %w", err)
} }
s.debug.LogDelete(deadCids) s.debug.LogDelete(deadCids)
purgeCnt = len(deadCids)
if err := checkpoint.Set(batch[len(batch)-1]); err != nil {
return purgeCnt, liveCnt, xerrors.Errorf("error setting checkpoint: %w", err)
}
return purgeCnt, liveCnt, nil
}
func (s *SplitStore) coldSetPath() string {
return filepath.Join(s.path, "coldset")
}
func (s *SplitStore) checkpointPath() string {
return filepath.Join(s.path, "checkpoint")
}
func (s *SplitStore) checkpointExists() bool {
_, err := os.Stat(s.checkpointPath())
return err == nil
}
func (s *SplitStore) completeCompaction() error {
checkpoint, last, err := OpenCheckpoint(s.checkpointPath())
if err != nil {
return xerrors.Errorf("error opening checkpoint: %w", err)
}
defer checkpoint.Close() //nolint:errcheck
coldr, err := NewColdSetReader(s.coldSetPath())
if err != nil {
return xerrors.Errorf("error opening coldset: %w", err)
}
defer coldr.Close() //nolint:errcheck
markSet, err := s.markSetEnv.Recover("live")
if err != nil {
return xerrors.Errorf("error recovering markset: %w", err)
}
defer markSet.Close() //nolint:errcheck
// PURGE
log.Info("purging cold objects from the hotstore")
startPurge := time.Now()
err = s.completePurge(coldr, checkpoint, last, markSet)
if err != nil {
return xerrors.Errorf("error purging cold objects: %w", err)
}
log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
markSet.EndCriticalSection()
if err := checkpoint.Close(); err != nil {
log.Warnf("error closing checkpoint: %s", err)
}
if err := os.Remove(s.checkpointPath()); err != nil {
log.Warnf("error removing checkpoint: %s", err)
}
if err := coldr.Close(); err != nil {
log.Warnf("error closing coldset: %s", err)
}
if err := os.Remove(s.coldSetPath()); err != nil {
log.Warnf("error removing coldset: %s", err)
}
// Note: at this point we can start the splitstore; a compaction should run on
// the first head change, which will trigger gc on the hotstore.
// We don't mind the second (back-to-back) compaction as the head will
// have advanced during marking and coldset accumulation.
return nil
}
func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint, start cid.Cid, markSet MarkSet) error {
if !start.Defined() {
return s.purge(coldr, checkpoint, markSet)
}
seeking := true
batch := make([]cid.Cid, 0, batchSize)
deadCids := make([]cid.Cid, 0, batchSize)
var purgeCnt, liveCnt int
defer func() {
log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt)
}()
deleteBatch := func() error {
pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet)
purgeCnt += pc
liveCnt += lc
batch = batch[:0]
return err
}
err := coldr.ForEach(func(c cid.Cid) error {
if seeking {
if start.Equals(c) {
seeking = false
}
return nil
}
batch = append(batch, c)
if len(batch) == batchSize {
return deleteBatch()
}
purgeCnt += len(deadCids)
return nil return nil
}) })
if err != nil {
return err
}
if len(batch) > 0 {
return deleteBatch()
}
return nil
} }
// I really don't like having this code, but we seem to have some occasional DAG references with // I really don't like having this code, but we seem to have some occasional DAG references with
@ -1077,7 +1361,7 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error {
// have this gem[TM]. // have this gem[TM].
// My best guess is that they are parent message receipts or yet to be computed state roots; magik // My best guess is that they are parent message receipts or yet to be computed state roots; magik
// thinks the cause may be block validation. // thinks the cause may be block validation.
func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) { func (s *SplitStore) waitForMissingRefs(markSet MarkSet) {
s.txnLk.Lock() s.txnLk.Lock()
missing := s.txnMissing missing := s.txnMissing
s.txnMissing = nil s.txnMissing = nil
@ -1106,7 +1390,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) {
} }
towalk := missing towalk := missing
visitor := tmpVisitor() visitor := newTmpVisitor()
missing = make(map[cid.Cid]struct{}) missing = make(map[cid.Cid]struct{})
for c := range towalk { for c := range towalk {

View File

@ -4,6 +4,8 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"os"
"sync" "sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
@ -20,12 +22,14 @@ import (
datastore "github.com/ipfs/go-datastore" datastore "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync" dssync "github.com/ipfs/go-datastore/sync"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
mh "github.com/multiformats/go-multihash"
) )
func init() { func init() {
CompactionThreshold = 5 CompactionThreshold = 5
CompactionBoundary = 2 CompactionBoundary = 2
WarmupBoundary = 0 WarmupBoundary = 0
SyncWaitTime = time.Millisecond
logging.SetLogLevel("splitstore", "DEBUG") logging.SetLogLevel("splitstore", "DEBUG")
} }
@ -80,8 +84,17 @@ func testSplitStore(t *testing.T, cfg *Config) {
t.Fatal(err) t.Fatal(err)
} }
path, err := ioutil.TempDir("", "splitstore.*")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = os.RemoveAll(path)
})
// open the splitstore // open the splitstore
ss, err := Open("", ds, hot, cold, cfg) ss, err := Open(path, ds, hot, cold, cfg)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -125,6 +138,10 @@ func testSplitStore(t *testing.T, cfg *Config) {
} }
waitForCompaction := func() { waitForCompaction := func() {
ss.txnSyncMx.Lock()
ss.txnSync = true
ss.txnSyncCond.Broadcast()
ss.txnSyncMx.Unlock()
for atomic.LoadInt32(&ss.compacting) == 1 { for atomic.LoadInt32(&ss.compacting) == 1 {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
} }
@ -259,8 +276,17 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
path, err := ioutil.TempDir("", "splitstore.*")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = os.RemoveAll(path)
})
// open the splitstore // open the splitstore
ss, err := Open("", ds, hot, cold, &Config{MarkSetType: "map"}) ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -305,6 +331,10 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
} }
waitForCompaction := func() { waitForCompaction := func() {
ss.txnSyncMx.Lock()
ss.txnSync = true
ss.txnSyncCond.Broadcast()
ss.txnSyncMx.Unlock()
for atomic.LoadInt32(&ss.compacting) == 1 { for atomic.LoadInt32(&ss.compacting) == 1 {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
} }
@ -426,17 +456,25 @@ func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, app
type mockStore struct { type mockStore struct {
mx sync.Mutex mx sync.Mutex
set map[cid.Cid]blocks.Block set map[string]blocks.Block
} }
func newMockStore() *mockStore { func newMockStore() *mockStore {
return &mockStore{set: make(map[cid.Cid]blocks.Block)} return &mockStore{set: make(map[string]blocks.Block)}
}
func (b *mockStore) keyOf(c cid.Cid) string {
return string(c.Hash())
}
func (b *mockStore) cidOf(k string) cid.Cid {
return cid.NewCidV1(cid.Raw, mh.Multihash([]byte(k)))
} }
func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) { func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) {
b.mx.Lock() b.mx.Lock()
defer b.mx.Unlock() defer b.mx.Unlock()
_, ok := b.set[cid] _, ok := b.set[b.keyOf(cid)]
return ok, nil return ok, nil
} }
@ -446,7 +484,7 @@ func (b *mockStore) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) {
b.mx.Lock() b.mx.Lock()
defer b.mx.Unlock() defer b.mx.Unlock()
blk, ok := b.set[cid] blk, ok := b.set[b.keyOf(cid)]
if !ok { if !ok {
return nil, blockstore.ErrNotFound return nil, blockstore.ErrNotFound
} }
@ -474,7 +512,7 @@ func (b *mockStore) Put(_ context.Context, blk blocks.Block) error {
b.mx.Lock() b.mx.Lock()
defer b.mx.Unlock() defer b.mx.Unlock()
b.set[blk.Cid()] = blk b.set[b.keyOf(blk.Cid())] = blk
return nil return nil
} }
@ -483,7 +521,7 @@ func (b *mockStore) PutMany(_ context.Context, blks []blocks.Block) error {
defer b.mx.Unlock() defer b.mx.Unlock()
for _, blk := range blks { for _, blk := range blks {
b.set[blk.Cid()] = blk b.set[b.keyOf(blk.Cid())] = blk
} }
return nil return nil
} }
@ -492,7 +530,7 @@ func (b *mockStore) DeleteBlock(_ context.Context, cid cid.Cid) error {
b.mx.Lock() b.mx.Lock()
defer b.mx.Unlock() defer b.mx.Unlock()
delete(b.set, cid) delete(b.set, b.keyOf(cid))
return nil return nil
} }
@ -501,7 +539,7 @@ func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error {
defer b.mx.Unlock() defer b.mx.Unlock()
for _, c := range cids { for _, c := range cids {
delete(b.set, c) delete(b.set, b.keyOf(c))
} }
return nil return nil
} }
@ -515,7 +553,7 @@ func (b *mockStore) ForEachKey(f func(cid.Cid) error) error {
defer b.mx.Unlock() defer b.mx.Unlock()
for c := range b.set { for c := range b.set {
err := f(c) err := f(b.cidOf(c))
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,6 +1,7 @@
package splitstore package splitstore
import ( import (
"sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -55,12 +56,13 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
if WarmupBoundary < epoch { if WarmupBoundary < epoch {
boundaryEpoch = epoch - WarmupBoundary boundaryEpoch = epoch - WarmupBoundary
} }
var mx sync.Mutex
batchHot := make([]blocks.Block, 0, batchSize) batchHot := make([]blocks.Block, 0, batchSize)
count := int64(0) count := new(int64)
xcount := int64(0) xcount := new(int64)
missing := int64(0) missing := new(int64)
visitor, err := s.markSetEnv.CreateVisitor("warmup", 0) visitor, err := s.markSetEnv.New("warmup", 0)
if err != nil { if err != nil {
return xerrors.Errorf("error creating visitor: %w", err) return xerrors.Errorf("error creating visitor: %w", err)
} }
@ -73,7 +75,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
return errStopWalk return errStopWalk
} }
count++ atomic.AddInt64(count, 1)
has, err := s.hot.Has(s.ctx, c) has, err := s.hot.Has(s.ctx, c)
if err != nil { if err != nil {
@ -87,22 +89,25 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
blk, err := s.cold.Get(s.ctx, c) blk, err := s.cold.Get(s.ctx, c)
if err != nil { if err != nil {
if err == bstore.ErrNotFound { if err == bstore.ErrNotFound {
missing++ atomic.AddInt64(missing, 1)
return errStopWalk return errStopWalk
} }
return err return err
} }
xcount++ atomic.AddInt64(xcount, 1)
mx.Lock()
batchHot = append(batchHot, blk) batchHot = append(batchHot, blk)
if len(batchHot) == batchSize { if len(batchHot) == batchSize {
err = s.hot.PutMany(s.ctx, batchHot) err = s.hot.PutMany(s.ctx, batchHot)
if err != nil { if err != nil {
mx.Unlock()
return err return err
} }
batchHot = batchHot[:0] batchHot = batchHot[:0]
} }
mx.Unlock()
return nil return nil
}) })
@ -118,9 +123,9 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
} }
} }
log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing) log.Infow("warmup stats", "visited", *count, "warm", *xcount, "missing", *missing)
s.markSetSize = count + count>>2 // overestimate a bit s.markSetSize = *count + *count>>2 // overestimate a bit
err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize)) err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize))
if err != nil { if err != nil {
log.Warnf("error saving mark set size: %s", err) log.Warnf("error saving mark set size: %s", err)

View File

@ -1,6 +1,8 @@
package splitstore package splitstore
import ( import (
"sync"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
) )
@ -17,16 +19,34 @@ func (v *noopVisitor) Visit(_ cid.Cid) (bool, error) {
return true, nil return true, nil
} }
type cidSetVisitor struct { type tmpVisitor struct {
set *cid.Set set *cid.Set
} }
var _ ObjectVisitor = (*cidSetVisitor)(nil) var _ ObjectVisitor = (*tmpVisitor)(nil)
func (v *cidSetVisitor) Visit(c cid.Cid) (bool, error) { func (v *tmpVisitor) Visit(c cid.Cid) (bool, error) {
return v.set.Visit(c), nil return v.set.Visit(c), nil
} }
func tmpVisitor() ObjectVisitor { func newTmpVisitor() ObjectVisitor {
return &cidSetVisitor{set: cid.NewSet()} return &tmpVisitor{set: cid.NewSet()}
}
type concurrentVisitor struct {
mx sync.Mutex
set *cid.Set
}
var _ ObjectVisitor = (*concurrentVisitor)(nil)
func newConcurrentVisitor() *concurrentVisitor {
return &concurrentVisitor{set: cid.NewSet()}
}
func (v *concurrentVisitor) Visit(c cid.Cid) (bool, error) {
v.mx.Lock()
defer v.mx.Unlock()
return v.set.Visit(c), nil
} }

View File

@ -1,2 +1,2 @@
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBdRCBLUeKvoy22u5DcXs61adFn31v8WWCZgmBjDCjbsC /dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWFHDtFx7CVTy4xoCDutVo1cScvSnQjDeaM8UzwVS1qwkh
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWDUQJBA18njjXnG9RtLxoN3muvdU7PEy55QorUEsdAqdy /dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWKt8cwpkiumkT8x32c3YFxsPRwhV5J8hCYPn9mhUmcAXt

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -90,6 +90,7 @@ func init() {
UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight) UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight)
UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight) UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight)
UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight) UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight)
UpgradeOhSnapHeight = getUpgradeHeight("LOTUS_OHSNAP_HEIGHT", UpgradeOhSnapHeight)
BuildType |= Build2k BuildType |= Build2k

View File

@ -42,8 +42,7 @@ const UpgradeTurboHeight = -15
const UpgradeHyperdriveHeight = -16 const UpgradeHyperdriveHeight = -16
const UpgradeChocolateHeight = -17 const UpgradeChocolateHeight = -17
// 2022-01-17T19:00:00Z const UpgradeOhSnapHeight = 240
const UpgradeOhSnapHeight = 30262
func init() { func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30)) policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))

View File

@ -54,7 +54,8 @@ const UpgradeHyperdriveHeight = 420
const UpgradeChocolateHeight = 312746 const UpgradeChocolateHeight = 312746
const UpgradeOhSnapHeight = 99999999 // 2022-02-10T19:23:00Z
const UpgradeOhSnapHeight = 682006
func init() { func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30)) policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))

View File

@ -1,4 +1,54 @@
{ {
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.params": {
"cid": "Qma5WL6abSqYg9uUQAZ3EHS286bsNsha7oAGsJBD48Bq2q",
"digest": "c3ad7bb549470b82ad52ed070aebb4f4",
"sector_size": 536870912
},
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.vk": {
"cid": "QmUa7f9JtJMsqJJ3s3ZXk6WyF4xJLE8FiqYskZGgk8GCDv",
"digest": "994c5b7d450ca9da348c910689f2dc7f",
"sector_size": 536870912
},
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.params": {
"cid": "QmQiT4qBGodrVNEgVTDXxBNDdPbaD8Ag7Sx3ZTq1zHX79S",
"digest": "5aedd2cf3e5c0a15623d56a1b43110ad",
"sector_size": 8388608
},
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.vk": {
"cid": "QmdcpKUQvHM8RFRVKbk1yHfEqMcBzhtFWKRp9SNEmWq37i",
"digest": "abd80269054d391a734febdac0d2e687",
"sector_size": 8388608
},
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.params": {
"cid": "QmYM6Hg7mjmvA3ZHTsqkss1fkdyDju5dDmLiBZGJ5pz9y9",
"digest": "311f92a3e75036ced01b1c0025f1fa0c",
"sector_size": 2048
},
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.vk": {
"cid": "QmaQsTLL3nc5dw6wAvaioJSBfd1jhQrA2o6ucFf7XeV74P",
"digest": "eadad9784969890d30f2749708c79771",
"sector_size": 2048
},
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": {
"cid": "QmNPc75iEfcahCwNKdqnWLtxnjspUGGR4iscjiz3wP3RtS",
"digest": "1b3cfd761a961543f9eb273e435a06a2",
"sector_size": 34359738368
},
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": {
"cid": "QmdFFUe1gcz9MMHc6YW8aoV48w4ckvcERjt7PkydQAMfCN",
"digest": "3a6941983754737fde880d29c7094905",
"sector_size": 34359738368
},
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": {
"cid": "QmUB6xTVjzBQGuDNeyJMrrJ1byk58vhPm8eY2Lv9pgwanp",
"digest": "1a392e7b759fb18e036c7559b5ece816",
"sector_size": 68719476736
},
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": {
"cid": "Qmd794Jty7k26XJ8Eg4NDEks65Qk8G4GVfGkwqvymv8HAg",
"digest": "80e366df2f1011953c2d01c7b7c9ee8e",
"sector_size": 68719476736
},
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": {
"cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR", "cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR",
"digest": "7610b9f82bfc88405b7a832b651ce2f6", "digest": "7610b9f82bfc88405b7a832b651ce2f6",

View File

@ -142,7 +142,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re
go func() { go func() {
start := build.Clock.Now() start := build.Clock.Now()
log.Infow("start fetching randomness", "round", round) log.Debugw("start fetching randomness", "round", round)
resp, err := db.client.Get(ctx, round) resp, err := db.client.Get(ctx, round)
var br beacon.Response var br beacon.Response
@ -152,7 +152,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re
br.Entry.Round = resp.Round() br.Entry.Round = resp.Round()
br.Entry.Data = resp.Signature() br.Entry.Data = resp.Signature()
} }
log.Infow("done fetching randomness", "round", round, "took", build.Clock.Since(start)) log.Debugw("done fetching randomness", "round", round, "took", build.Clock.Since(start))
out <- br out <- br
close(out) close(out)
}() }()

View File

@ -182,7 +182,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
} }
} }
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) return xerrors.Errorf("parent state root did not match computed state (%s != %s)", h.ParentStateRoot, stateroot)
} }
if precp != h.ParentMessageReceipts { if precp != h.ParentMessageReceipts {

View File

@ -173,10 +173,17 @@ type MessagePool struct {
sigValCache *lru.TwoQueueCache sigValCache *lru.TwoQueueCache
nonceCache *lru.Cache
evtTypes [3]journal.EventType evtTypes [3]journal.EventType
journal journal.Journal journal journal.Journal
} }
type nonceCacheKey struct {
tsk types.TipSetKey
addr address.Address
}
type msgSet struct { type msgSet struct {
msgs map[uint64]*types.SignedMessage msgs map[uint64]*types.SignedMessage
nextNonce uint64 nextNonce uint64
@ -361,6 +368,7 @@ func (ms *msgSet) toSlice() []*types.SignedMessage {
func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
cache, _ := lru.New2Q(build.BlsSignatureCacheSize) cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q(build.VerifSigCacheSize) verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
noncecache, _ := lru.New(256)
cfg, err := loadConfig(ctx, ds) cfg, err := loadConfig(ctx, ds)
if err != nil { if err != nil {
@ -386,6 +394,7 @@ func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.Upgra
pruneCooldown: make(chan struct{}, 1), pruneCooldown: make(chan struct{}, 1),
blsSigCache: cache, blsSigCache: cache,
sigValCache: verifcache, sigValCache: verifcache,
nonceCache: noncecache,
changes: lps.New(50), changes: lps.New(50),
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)), localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
api: api, api: api,
@ -1016,11 +1025,23 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address,
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration) done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
defer done() defer done()
nk := nonceCacheKey{
tsk: ts.Key(),
addr: addr,
}
n, ok := mp.nonceCache.Get(nk)
if ok {
return n.(uint64), nil
}
act, err := mp.api.GetActorAfter(addr, ts) act, err := mp.api.GetActorAfter(addr, ts)
if err != nil { if err != nil {
return 0, err return 0, err
} }
mp.nonceCache.Add(nk, act.Nonce)
return act.Nonce, nil return act.Nonce, nil
} }

View File

@ -1,3 +1,4 @@
//stm: #unit
package messagepool package messagepool
import ( import (
@ -206,6 +207,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
t.Helper() t.Helper()
//stm: @CHAIN_MEMPOOL_GET_NONCE_001
n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK) n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -366,8 +368,10 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) {
tma.applyBlock(t, a) tma.applyBlock(t, a)
tsa := mock.TipSet(a) tsa := mock.TipSet(a)
//stm: @CHAIN_MEMPOOL_PENDING_001
_, _ = mp.Pending(context.TODO()) _, _ = mp.Pending(context.TODO())
//stm: @CHAIN_MEMPOOL_SELECT_001
selm, _ := mp.SelectMessages(context.Background(), tsa, 1) selm, _ := mp.SelectMessages(context.Background(), tsa, 1)
if len(selm) == 0 { if len(selm) == 0 {
t.Fatal("should have returned the rest of the messages") t.Fatal("should have returned the rest of the messages")
@ -428,6 +432,7 @@ func TestRevertMessages(t *testing.T) {
assertNonce(t, mp, sender, 4) assertNonce(t, mp, sender, 4)
//stm: @CHAIN_MEMPOOL_PENDING_001
p, _ := mp.Pending(context.TODO()) p, _ := mp.Pending(context.TODO())
fmt.Printf("%+v\n", p) fmt.Printf("%+v\n", p)
if len(p) != 3 { if len(p) != 3 {
@ -486,6 +491,7 @@ func TestPruningSimple(t *testing.T) {
mp.Prune() mp.Prune()
//stm: @CHAIN_MEMPOOL_PENDING_001
msgs, _ := mp.Pending(context.TODO()) msgs, _ := mp.Pending(context.TODO())
if len(msgs) != 5 { if len(msgs) != 5 {
t.Fatal("expected only 5 messages in pool, got: ", len(msgs)) t.Fatal("expected only 5 messages in pool, got: ", len(msgs))
@ -528,6 +534,7 @@ func TestLoadLocal(t *testing.T) {
msgs := make(map[cid.Cid]struct{}) msgs := make(map[cid.Cid]struct{})
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
//stm: @CHAIN_MEMPOOL_PUSH_001
cid, err := mp.Push(context.TODO(), m) cid, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -544,6 +551,7 @@ func TestLoadLocal(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
//stm: @CHAIN_MEMPOOL_PENDING_001
pmsgs, _ := mp.Pending(context.TODO()) pmsgs, _ := mp.Pending(context.TODO())
if len(msgs) != len(pmsgs) { if len(msgs) != len(pmsgs) {
t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs)) t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs))
@ -599,6 +607,7 @@ func TestClearAll(t *testing.T) {
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
//stm: @CHAIN_MEMPOOL_PUSH_001
_, err := mp.Push(context.TODO(), m) _, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -610,8 +619,10 @@ func TestClearAll(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
//stm: @CHAIN_MEMPOOL_CLEAR_001
mp.Clear(context.Background(), true) mp.Clear(context.Background(), true)
//stm: @CHAIN_MEMPOOL_PENDING_001
pending, _ := mp.Pending(context.TODO()) pending, _ := mp.Pending(context.TODO())
if len(pending) > 0 { if len(pending) > 0 {
t.Fatalf("cleared the mpool, but got %d pending messages", len(pending)) t.Fatalf("cleared the mpool, but got %d pending messages", len(pending))
@ -654,6 +665,7 @@ func TestClearNonLocal(t *testing.T) {
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
//stm: @CHAIN_MEMPOOL_PUSH_001
_, err := mp.Push(context.TODO(), m) _, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -665,8 +677,10 @@ func TestClearNonLocal(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
//stm: @CHAIN_MEMPOOL_CLEAR_001
mp.Clear(context.Background(), false) mp.Clear(context.Background(), false)
//stm: @CHAIN_MEMPOOL_PENDING_001
pending, _ := mp.Pending(context.TODO()) pending, _ := mp.Pending(context.TODO())
if len(pending) != 10 { if len(pending) != 10 {
t.Fatalf("expected 10 pending messages, but got %d instead", len(pending)) t.Fatalf("expected 10 pending messages, but got %d instead", len(pending))
@ -724,6 +738,7 @@ func TestUpdates(t *testing.T) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
//stm: @CHAIN_MEMPOOL_PUSH_001
_, err := mp.Push(context.TODO(), m) _, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -1,3 +1,4 @@
//stm: #unit
package messagesigner package messagesigner
import ( import (
@ -60,6 +61,7 @@ func TestMessageSignerSignMessage(t *testing.T) {
to2, err := w.WalletNew(ctx, types.KTSecp256k1) to2, err := w.WalletNew(ctx, types.KTSecp256k1)
require.NoError(t, err) require.NoError(t, err)
//stm: @CHAIN_MESSAGE_SIGNER_NEW_SIGNER_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_005
type msgSpec struct { type msgSpec struct {
msg *types.Message msg *types.Message
mpoolNonce [1]uint64 mpoolNonce [1]uint64

View File

@ -1,3 +1,4 @@
//stm:#unit
package rand_test package rand_test
import ( import (
@ -55,11 +56,13 @@ func TestNullRandomnessV1(t *testing.T) {
randEpoch := ts.TipSet.TipSet().Height() - 2 randEpoch := ts.TipSet.TipSet().Height() - 2
//stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V1_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_02
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key()) rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
//stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(beforeNullHeight)+offset) bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(beforeNullHeight)+offset)
select { select {
@ -68,6 +71,7 @@ func TestNullRandomnessV1(t *testing.T) {
t.Fatal(resp.Err) t.Fatal(resp.Err)
} }
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy) rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -131,11 +135,13 @@ func TestNullRandomnessV2(t *testing.T) {
randEpoch := ts.TipSet.TipSet().Height() - 2 randEpoch := ts.TipSet.TipSet().Height() - 2
//stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V2_01
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key()) rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
//stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(ts.TipSet.TipSet().Height())+offset) bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(ts.TipSet.TipSet().Height())+offset)
select { select {
@ -144,6 +150,7 @@ func TestNullRandomnessV2(t *testing.T) {
t.Fatal(resp.Err) t.Fatal(resp.Err)
} }
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_03
// note that the randEpoch passed to DrawRandomness is still randEpoch (not the latest ts height) // note that the randEpoch passed to DrawRandomness is still randEpoch (not the latest ts height)
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy) rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
if err != nil { if err != nil {
@ -212,11 +219,13 @@ func TestNullRandomnessV3(t *testing.T) {
randEpoch := ts.TipSet.TipSet().Height() - 2 randEpoch := ts.TipSet.TipSet().Height() - 2
//stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V3_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key()) rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
//stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(randEpoch)+offset) bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(randEpoch)+offset)
select { select {
@ -225,6 +234,7 @@ func TestNullRandomnessV3(t *testing.T) {
t.Fatal(resp.Err) t.Fatal(resp.Err)
} }
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy) rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -1,3 +1,4 @@
//stm: #unit
package sub package sub
import ( import (
@ -49,6 +50,7 @@ func TestFetchCidsWithDedup(t *testing.T) {
} }
g := &getter{msgs} g := &getter{msgs}
//stm: @CHAIN_INCOMING_FETCH_MESSAGES_BY_CID_001
// the cids have a duplicate // the cids have a duplicate
res, err := FetchMessagesByCids(context.TODO(), g, append(cids, cids[0])) res, err := FetchMessagesByCids(context.TODO(), g, append(cids, cids[0]))

View File

@ -1,3 +1,4 @@
//stm: #unit
package chain_test package chain_test
import ( import (
@ -462,6 +463,8 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) {
} }
func TestSyncSimple(t *testing.T) { func TestSyncSimple(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
H := 50 H := 50
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -478,6 +481,8 @@ func TestSyncSimple(t *testing.T) {
} }
func TestSyncMining(t *testing.T) { func TestSyncMining(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
H := 50 H := 50
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -500,6 +505,8 @@ func TestSyncMining(t *testing.T) {
} }
func TestSyncBadTimestamp(t *testing.T) { func TestSyncBadTimestamp(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
H := 50 H := 50
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -554,6 +561,8 @@ func (wpp badWpp) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi
} }
func TestSyncBadWinningPoSt(t *testing.T) { func TestSyncBadWinningPoSt(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
H := 15 H := 15
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -583,6 +592,9 @@ func (tu *syncTestUtil) loadChainToNode(to int) {
} }
func TestSyncFork(t *testing.T) { func TestSyncFork(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
H := 10 H := 10
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -650,6 +662,9 @@ func TestSyncFork(t *testing.T) {
// A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X). // A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X).
// We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected. // We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected.
func TestDuplicateNonce(t *testing.T) { func TestDuplicateNonce(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
H := 10 H := 10
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -704,6 +719,7 @@ func TestDuplicateNonce(t *testing.T) {
var includedMsg cid.Cid var includedMsg cid.Cid
var skippedMsg cid.Cid var skippedMsg cid.Cid
//stm: @CHAIN_STATE_SEARCH_MSG_001
r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true) r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true)
r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true) r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true)
@ -745,6 +761,9 @@ func TestDuplicateNonce(t *testing.T) {
// This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't // This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't
// be applied on the parent state. // be applied on the parent state.
func TestBadNonce(t *testing.T) { func TestBadNonce(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
H := 10 H := 10
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -792,6 +811,9 @@ func TestBadNonce(t *testing.T) {
// One of the messages uses the sender's robust address, the other uses the ID address. // One of the messages uses the sender's robust address, the other uses the ID address.
// Such a block is invalid and should not sync. // Such a block is invalid and should not sync.
func TestMismatchedNoncesRobustID(t *testing.T) { func TestMismatchedNoncesRobustID(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
v5h := abi.ChainEpoch(4) v5h := abi.ChainEpoch(4)
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
@ -804,6 +826,7 @@ func TestMismatchedNoncesRobustID(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Produce a message from the banker // Produce a message from the banker
//stm: @CHAIN_STATE_LOOKUP_ID_001
makeMsg := func(id bool) *types.SignedMessage { makeMsg := func(id bool) *types.SignedMessage {
sender := tu.g.Banker() sender := tu.g.Banker()
if id { if id {
@ -846,6 +869,9 @@ func TestMismatchedNoncesRobustID(t *testing.T) {
// One of the messages uses the sender's robust address, the other uses the ID address. // One of the messages uses the sender's robust address, the other uses the ID address.
// Such a block is valid and should sync. // Such a block is valid and should sync.
func TestMatchedNoncesRobustID(t *testing.T) { func TestMatchedNoncesRobustID(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
v5h := abi.ChainEpoch(4) v5h := abi.ChainEpoch(4)
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
@ -858,6 +884,7 @@ func TestMatchedNoncesRobustID(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Produce a message from the banker with specified nonce // Produce a message from the banker with specified nonce
//stm: @CHAIN_STATE_LOOKUP_ID_001
makeMsg := func(n uint64, id bool) *types.SignedMessage { makeMsg := func(n uint64, id bool) *types.SignedMessage {
sender := tu.g.Banker() sender := tu.g.Banker()
if id { if id {
@ -917,6 +944,8 @@ func runSyncBenchLength(b *testing.B, l int) {
} }
func TestSyncInputs(t *testing.T) { func TestSyncInputs(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_VALIDATE_BLOCK_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_STOP_001
H := 10 H := 10
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -944,6 +973,9 @@ func TestSyncInputs(t *testing.T) {
} }
func TestSyncCheckpointHead(t *testing.T) { func TestSyncCheckpointHead(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
H := 10 H := 10
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -963,6 +995,7 @@ func TestSyncCheckpointHead(t *testing.T) {
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
tu.waitUntilSyncTarget(p1, a.TipSet()) tu.waitUntilSyncTarget(p1, a.TipSet())
//stm: @CHAIN_SYNCER_CHECKPOINT_001
tu.checkpointTs(p1, a.TipSet().Key()) tu.checkpointTs(p1, a.TipSet().Key())
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
@ -982,15 +1015,20 @@ func TestSyncCheckpointHead(t *testing.T) {
tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
p1Head := tu.getHead(p1) p1Head := tu.getHead(p1)
require.True(tu.t, p1Head.Equals(a.TipSet())) require.True(tu.t, p1Head.Equals(a.TipSet()))
//stm: @CHAIN_SYNCER_CHECK_BAD_001
tu.assertBad(p1, b.TipSet()) tu.assertBad(p1, b.TipSet())
// Should be able to switch forks. // Should be able to switch forks.
//stm: @CHAIN_SYNCER_CHECKPOINT_001
tu.checkpointTs(p1, b.TipSet().Key()) tu.checkpointTs(p1, b.TipSet().Key())
p1Head = tu.getHead(p1) p1Head = tu.getHead(p1)
require.True(tu.t, p1Head.Equals(b.TipSet())) require.True(tu.t, p1Head.Equals(b.TipSet()))
} }
func TestSyncCheckpointEarlierThanHead(t *testing.T) { func TestSyncCheckpointEarlierThanHead(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
H := 10 H := 10
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)
@ -1010,6 +1048,7 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
tu.waitUntilSyncTarget(p1, a.TipSet()) tu.waitUntilSyncTarget(p1, a.TipSet())
//stm: @CHAIN_SYNCER_CHECKPOINT_001
tu.checkpointTs(p1, a1.TipSet().Key()) tu.checkpointTs(p1, a1.TipSet().Key())
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
@ -1029,15 +1068,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
p1Head := tu.getHead(p1) p1Head := tu.getHead(p1)
require.True(tu.t, p1Head.Equals(a.TipSet())) require.True(tu.t, p1Head.Equals(a.TipSet()))
//stm: @CHAIN_SYNCER_CHECK_BAD_001
tu.assertBad(p1, b.TipSet()) tu.assertBad(p1, b.TipSet())
// Should be able to switch forks. // Should be able to switch forks.
//stm: @CHAIN_SYNCER_CHECKPOINT_001
tu.checkpointTs(p1, b.TipSet().Key()) tu.checkpointTs(p1, b.TipSet().Key())
p1Head = tu.getHead(p1) p1Head = tu.getHead(p1)
require.True(tu.t, p1Head.Equals(b.TipSet())) require.True(tu.t, p1Head.Equals(b.TipSet()))
} }
func TestInvalidHeight(t *testing.T) { func TestInvalidHeight(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
H := 50 H := 50
tu := prepSyncTest(t, H) tu := prepSyncTest(t, H)

View File

@ -466,6 +466,7 @@ var stateOrder = map[sealing.SectorState]stateMeta{}
var stateList = []stateMeta{ var stateList = []stateMeta{
{col: 39, state: "Total"}, {col: 39, state: "Total"},
{col: color.FgGreen, state: sealing.Proving}, {col: color.FgGreen, state: sealing.Proving},
{col: color.FgGreen, state: sealing.UpdateActivating},
{col: color.FgBlue, state: sealing.Empty}, {col: color.FgBlue, state: sealing.Empty},
{col: color.FgBlue, state: sealing.WaitDeals}, {col: color.FgBlue, state: sealing.WaitDeals},
@ -496,6 +497,7 @@ var stateList = []stateMeta{
{col: color.FgYellow, state: sealing.SubmitReplicaUpdate}, {col: color.FgYellow, state: sealing.SubmitReplicaUpdate},
{col: color.FgYellow, state: sealing.ReplicaUpdateWait}, {col: color.FgYellow, state: sealing.ReplicaUpdateWait},
{col: color.FgYellow, state: sealing.FinalizeReplicaUpdate}, {col: color.FgYellow, state: sealing.FinalizeReplicaUpdate},
{col: color.FgYellow, state: sealing.ReleaseSectorKey},
{col: color.FgCyan, state: sealing.Terminating}, {col: color.FgCyan, state: sealing.Terminating},
{col: color.FgCyan, state: sealing.TerminateWait}, {col: color.FgCyan, state: sealing.TerminateWait},
@ -524,6 +526,7 @@ var stateList = []stateMeta{
{col: color.FgRed, state: sealing.SnapDealsAddPieceFailed}, {col: color.FgRed, state: sealing.SnapDealsAddPieceFailed},
{col: color.FgRed, state: sealing.SnapDealsDealsExpired}, {col: color.FgRed, state: sealing.SnapDealsDealsExpired},
{col: color.FgRed, state: sealing.ReplicaUpdateFailed}, {col: color.FgRed, state: sealing.ReplicaUpdateFailed},
{col: color.FgRed, state: sealing.ReleaseSectorKeyFailed},
} }
func init() { func init() {

View File

@ -55,6 +55,7 @@ var sectorsCmd = &cli.Command{
sectorsTerminateCmd, sectorsTerminateCmd,
sectorsRemoveCmd, sectorsRemoveCmd,
sectorsSnapUpCmd, sectorsSnapUpCmd,
sectorsSnapAbortCmd,
sectorsMarkForUpgradeCmd, sectorsMarkForUpgradeCmd,
sectorsStartSealCmd, sectorsStartSealCmd,
sectorsSealDelayCmd, sectorsSealDelayCmd,
@ -1520,6 +1521,31 @@ var sectorsSnapUpCmd = &cli.Command{
}, },
} }
var sectorsSnapAbortCmd = &cli.Command{
Name: "abort-upgrade",
Usage: "Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before",
ArgsUsage: "<sectorNum>",
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 1 {
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
if err != nil {
return xerrors.Errorf("could not parse sector number: %w", err)
}
return nodeApi.SectorAbortUpgrade(ctx, abi.SectorNumber(id))
},
}
var sectorsMarkForUpgradeCmd = &cli.Command{ var sectorsMarkForUpgradeCmd = &cli.Command{
Name: "mark-for-upgrade", Name: "mark-for-upgrade",
Usage: "Mark a committed capacity sector for replacement by a sector with deals", Usage: "Mark a committed capacity sector for replacement by a sector with deals",

View File

@ -261,7 +261,7 @@ var runCmd = &cli.Command{
var taskTypes []sealtasks.TaskType var taskTypes []sealtasks.TaskType
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize) taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFinalizeReplicaUpdate)
if cctx.Bool("addpiece") { if cctx.Bool("addpiece") {
taskTypes = append(taskTypes, sealtasks.TTAddPiece) taskTypes = append(taskTypes, sealtasks.TTAddPiece)

View File

@ -4,10 +4,12 @@ import (
"encoding/json" "encoding/json"
corebig "math/big" corebig "math/big"
"os" "os"
"strconv"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
filbig "github.com/filecoin-project/go-state-types/big" filbig "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@ -22,29 +24,50 @@ type networkTotalsOutput struct {
Payload networkTotals `json:"payload"` Payload networkTotals `json:"payload"`
} }
type providerMeta struct {
nonidentifiable bool
}
// force formatting as decimal to aid human readers
type humanFloat float64
func (f humanFloat) MarshalJSON() ([]byte, error) {
// 'f' uses decimal digits without exponents.
// The bit size of 32 ensures we don't use too many decimal places.
return []byte(strconv.FormatFloat(float64(f), 'f', -1, 32)), nil
}
type Totals struct {
TotalDeals int `json:"total_num_deals"`
TotalBytes int64 `json:"total_stored_data_size"`
PrivateTotalDeals int `json:"private_total_num_deals"`
PrivateTotalBytes int64 `json:"private_total_stored_data_size"`
CapacityCarryingData humanFloat `json:"capacity_fraction_carrying_data"`
}
type networkTotals struct { type networkTotals struct {
QaNetworkPower filbig.Int `json:"total_qa_power"` QaNetworkPower filbig.Int `json:"total_qa_power"`
RawNetworkPower filbig.Int `json:"total_raw_capacity"` RawNetworkPower filbig.Int `json:"total_raw_capacity"`
CapacityCarryingData float64 `json:"capacity_fraction_carrying_data"`
UniqueCids int `json:"total_unique_cids"` UniqueCids int `json:"total_unique_cids"`
UniqueProviders int `json:"total_unique_providers"` UniqueBytes int64 `json:"total_unique_data_size"`
UniqueClients int `json:"total_unique_clients"` UniqueClients int `json:"total_unique_clients"`
TotalDeals int `json:"total_num_deals"` UniqueProviders int `json:"total_unique_providers"`
TotalBytes int64 `json:"total_stored_data_size"` UniquePrivateProviders int `json:"total_unique_private_providers"`
FilplusTotalDeals int `json:"filplus_total_num_deals"` Totals
FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` FilPlus Totals `json:"filecoin_plus_subset"`
seenClient map[address.Address]bool pieces map[cid.Cid]struct{}
seenProvider map[address.Address]bool clients map[address.Address]struct{}
seenPieceCid map[cid.Cid]bool providers map[address.Address]providerMeta
} }
var storageStatsCmd = &cli.Command{ var storageStatsCmd = &cli.Command{
Name: "storage-stats", Name: "storage-stats",
Usage: "Translates current lotus state into a json summary suitable for driving https://storage.filecoin.io/", Usage: "Translates current lotus state into a json summary suitable for driving https://storage.filecoin.io/",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.Int64Flag{ &cli.StringFlag{
Name: "height", Name: "tipset",
Usage: "Comma separated array of cids, or @height",
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
@ -56,22 +79,24 @@ var storageStatsCmd = &cli.Command{
} }
defer apiCloser() defer apiCloser()
head, err := api.ChainHead(ctx) var ts *types.TipSet
if cctx.String("tipset") == "" {
ts, err = api.ChainHead(ctx)
if err != nil {
return err
}
ts, err = api.ChainGetTipSetByHeight(ctx, ts.Height()-defaultEpochLookback, ts.Key())
if err != nil { if err != nil {
return err return err
} }
requestedHeight := cctx.Int64("height")
if requestedHeight > 0 {
head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(requestedHeight), head.Key())
} else { } else {
head, err = api.ChainGetTipSetByHeight(ctx, head.Height()-defaultEpochLookback, head.Key()) ts, err = lcli.ParseTipSetRef(ctx, api, cctx.String("tipset"))
}
if err != nil { if err != nil {
return err return err
} }
}
power, err := api.StateMinerPower(ctx, address.Address{}, head.Key()) power, err := api.StateMinerPower(ctx, address.Address{}, ts.Key())
if err != nil { if err != nil {
return err return err
} }
@ -79,12 +104,12 @@ var storageStatsCmd = &cli.Command{
netTotals := networkTotals{ netTotals := networkTotals{
QaNetworkPower: power.TotalPower.QualityAdjPower, QaNetworkPower: power.TotalPower.QualityAdjPower,
RawNetworkPower: power.TotalPower.RawBytePower, RawNetworkPower: power.TotalPower.RawBytePower,
seenClient: make(map[address.Address]bool), pieces: make(map[cid.Cid]struct{}),
seenProvider: make(map[address.Address]bool), clients: make(map[address.Address]struct{}),
seenPieceCid: make(map[cid.Cid]bool), providers: make(map[address.Address]providerMeta),
} }
deals, err := api.StateMarketDeals(ctx, head.Key()) deals, err := api.StateMarketDeals(ctx, ts.Key())
if err != nil { if err != nil {
return err return err
} }
@ -94,35 +119,76 @@ var storageStatsCmd = &cli.Command{
// Only count deals that have properly started, not past/future ones // Only count deals that have properly started, not past/future ones
// https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85 // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85
// Bail on 0 as well in case SectorStartEpoch is uninitialized due to some bug // Bail on 0 as well in case SectorStartEpoch is uninitialized due to some bug
//
// Additionally if the SlashEpoch is set this means the underlying sector is
// terminated for whatever reason ( not just slashed ), and the deal record
// will soon be removed from the state entirely
if dealInfo.State.SectorStartEpoch <= 0 || if dealInfo.State.SectorStartEpoch <= 0 ||
dealInfo.State.SectorStartEpoch > head.Height() { dealInfo.State.SectorStartEpoch > ts.Height() ||
dealInfo.State.SlashEpoch > -1 {
continue continue
} }
netTotals.seenClient[dealInfo.Proposal.Client] = true netTotals.clients[dealInfo.Proposal.Client] = struct{}{}
if _, seen := netTotals.providers[dealInfo.Proposal.Provider]; !seen {
pm := providerMeta{}
mi, err := api.StateMinerInfo(ctx, dealInfo.Proposal.Provider, ts.Key())
if err != nil {
return err
}
if mi.PeerId == nil || *mi.PeerId == "" {
log.Infof("private provider %s", dealInfo.Proposal.Provider)
pm.nonidentifiable = true
netTotals.UniquePrivateProviders++
}
netTotals.providers[dealInfo.Proposal.Provider] = pm
netTotals.UniqueProviders++
}
if _, seen := netTotals.pieces[dealInfo.Proposal.PieceCID]; !seen {
netTotals.pieces[dealInfo.Proposal.PieceCID] = struct{}{}
netTotals.UniqueBytes += int64(dealInfo.Proposal.PieceSize)
netTotals.UniqueCids++
}
netTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize) netTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize)
netTotals.seenProvider[dealInfo.Proposal.Provider] = true
netTotals.seenPieceCid[dealInfo.Proposal.PieceCID] = true
netTotals.TotalDeals++ netTotals.TotalDeals++
if netTotals.providers[dealInfo.Proposal.Provider].nonidentifiable {
netTotals.PrivateTotalBytes += int64(dealInfo.Proposal.PieceSize)
netTotals.PrivateTotalDeals++
}
if dealInfo.Proposal.VerifiedDeal { if dealInfo.Proposal.VerifiedDeal {
netTotals.FilplusTotalDeals++ netTotals.FilPlus.TotalBytes += int64(dealInfo.Proposal.PieceSize)
netTotals.FilplusTotalBytes += int64(dealInfo.Proposal.PieceSize) netTotals.FilPlus.TotalDeals++
if netTotals.providers[dealInfo.Proposal.Provider].nonidentifiable {
netTotals.FilPlus.PrivateTotalBytes += int64(dealInfo.Proposal.PieceSize)
netTotals.FilPlus.PrivateTotalDeals++
}
} }
} }
netTotals.UniqueCids = len(netTotals.seenPieceCid) netTotals.UniqueClients = len(netTotals.clients)
netTotals.UniqueClients = len(netTotals.seenClient)
netTotals.UniqueProviders = len(netTotals.seenProvider)
netTotals.CapacityCarryingData, _ = new(corebig.Rat).SetFrac( ccd, _ := new(corebig.Rat).SetFrac(
corebig.NewInt(netTotals.TotalBytes), corebig.NewInt(netTotals.TotalBytes),
netTotals.RawNetworkPower.Int, netTotals.RawNetworkPower.Int,
).Float64() ).Float64()
netTotals.CapacityCarryingData = humanFloat(ccd)
ccdfp, _ := new(corebig.Rat).SetFrac(
corebig.NewInt(netTotals.FilPlus.TotalBytes),
netTotals.RawNetworkPower.Int,
).Float64()
netTotals.FilPlus.CapacityCarryingData = humanFloat(ccdfp)
return json.NewEncoder(os.Stdout).Encode( return json.NewEncoder(os.Stdout).Encode(
networkTotalsOutput{ networkTotalsOutput{
Epoch: int64(head.Height()), Epoch: int64(ts.Height()),
Endpoint: "NETWORK_WIDE_TOTALS", Endpoint: "NETWORK_WIDE_TOTALS",
Payload: netTotals, Payload: netTotals,
}, },

View File

@ -24,6 +24,15 @@ var ProtocolCodenames = []struct {
{build.UpgradeTapeHeight + 1, "tape"}, {build.UpgradeTapeHeight + 1, "tape"},
{build.UpgradeLiftoffHeight + 1, "liftoff"}, {build.UpgradeLiftoffHeight + 1, "liftoff"},
{build.UpgradeKumquatHeight + 1, "postliftoff"}, {build.UpgradeKumquatHeight + 1, "postliftoff"},
{build.UpgradeCalicoHeight + 1, "calico"},
{build.UpgradePersianHeight + 1, "persian"},
{build.UpgradeOrangeHeight + 1, "orange"},
{build.UpgradeTrustHeight + 1, "trust"},
{build.UpgradeNorwegianHeight + 1, "norwegian"},
{build.UpgradeTurboHeight + 1, "turbo"},
{build.UpgradeHyperdriveHeight + 1, "hyperdrive"},
{build.UpgradeChocolateHeight + 1, "chocolate"},
{build.UpgradeOhSnapHeight + 1, "ohsnap"},
} }
// GetProtocolCodename gets the protocol codename associated with a height. // GetProtocolCodename gets the protocol codename associated with a height.

View File

@ -8,12 +8,11 @@ import (
"io" "io"
"log" "log"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
@ -43,6 +42,15 @@ func doExtractMessage(opts extractOpts) error {
return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err) return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err)
} }
// Assumes that the desired message isn't at the boundary of network versions.
// Otherwise this will be inaccurate. But it's such a tiny edge case that
// it's not worth spending the time to support boundary messages unless
// actually needed.
nv, err := FullAPI.StateNetworkVersion(ctx, incTs.Key())
if err != nil {
return fmt.Errorf("failed to resolve network version from inclusion height: %w", err)
}
// get the circulating supply before the message was executed. // get the circulating supply before the message was executed.
circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key()) circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key())
if err != nil { if err != nil {
@ -53,6 +61,7 @@ func doExtractMessage(opts extractOpts) error {
log.Printf("message was executed in tipset: %s", execTs.Key()) log.Printf("message was executed in tipset: %s", execTs.Key())
log.Printf("message was included in tipset: %s", incTs.Key()) log.Printf("message was included in tipset: %s", incTs.Key())
log.Printf("network version at inclusion: %d", nv)
log.Printf("circulating supply at inclusion tipset: %d", circSupply) log.Printf("circulating supply at inclusion tipset: %d", circSupply)
log.Printf("finding precursor messages using mode: %s", opts.precursor) log.Printf("finding precursor messages using mode: %s", opts.precursor)
@ -111,6 +120,7 @@ func doExtractMessage(opts extractOpts) error {
BaseFee: basefee, BaseFee: basefee,
// recorded randomness will be discarded. // recorded randomness will be discarded.
Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI),
NetworkVersion: nv,
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to execute precursor message: %w", err) return fmt.Errorf("failed to execute precursor message: %w", err)
@ -146,6 +156,7 @@ func doExtractMessage(opts extractOpts) error {
CircSupply: circSupplyDetail.FilCirculating, CircSupply: circSupplyDetail.FilCirculating,
BaseFee: basefee, BaseFee: basefee,
Rand: recordingRand, Rand: recordingRand,
NetworkVersion: nv,
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to execute message: %w", err) return fmt.Errorf("failed to execute message: %w", err)
@ -263,11 +274,6 @@ func doExtractMessage(opts extractOpts) error {
return err return err
} }
nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key())
if err != nil {
return err
}
codename := GetProtocolCodename(execTs.Height()) codename := GetProtocolCodename(execTs.Height())
// Write out the test vector. // Write out the test vector.

View File

@ -129,6 +129,7 @@ func runSimulateCmd(_ *cli.Context) error {
CircSupply: circSupply.FilCirculating, CircSupply: circSupply.FilCirculating,
BaseFee: baseFee, BaseFee: baseFee,
Rand: rand, Rand: rand,
// TODO NetworkVersion
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to apply message: %w", err) return fmt.Errorf("failed to apply message: %w", err)

View File

@ -97,6 +97,7 @@
* [Return](#Return) * [Return](#Return)
* [ReturnAddPiece](#ReturnAddPiece) * [ReturnAddPiece](#ReturnAddPiece)
* [ReturnFetch](#ReturnFetch) * [ReturnFetch](#ReturnFetch)
* [ReturnFinalizeReplicaUpdate](#ReturnFinalizeReplicaUpdate)
* [ReturnFinalizeSector](#ReturnFinalizeSector) * [ReturnFinalizeSector](#ReturnFinalizeSector)
* [ReturnGenerateSectorKeyFromData](#ReturnGenerateSectorKeyFromData) * [ReturnGenerateSectorKeyFromData](#ReturnGenerateSectorKeyFromData)
* [ReturnMoveStorage](#ReturnMoveStorage) * [ReturnMoveStorage](#ReturnMoveStorage)
@ -116,6 +117,7 @@
* [SealingAbort](#SealingAbort) * [SealingAbort](#SealingAbort)
* [SealingSchedDiag](#SealingSchedDiag) * [SealingSchedDiag](#SealingSchedDiag)
* [Sector](#Sector) * [Sector](#Sector)
* [SectorAbortUpgrade](#SectorAbortUpgrade)
* [SectorAddPieceToAny](#SectorAddPieceToAny) * [SectorAddPieceToAny](#SectorAddPieceToAny)
* [SectorCommitFlush](#SectorCommitFlush) * [SectorCommitFlush](#SectorCommitFlush)
* [SectorCommitPending](#SectorCommitPending) * [SectorCommitPending](#SectorCommitPending)
@ -2054,6 +2056,30 @@ Response: `{}`
### ReturnFetch ### ReturnFetch
Perms: admin
Inputs:
```json
[
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
},
{
"Code": 0,
"Message": "string value"
}
]
```
Response: `{}`
### ReturnFinalizeReplicaUpdate
Perms: admin Perms: admin
Inputs: Inputs:
@ -2474,6 +2500,21 @@ Response: `{}`
## Sector ## Sector
### SectorAbortUpgrade
SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
Perms: admin
Inputs:
```json
[
9
]
```
Response: `{}`
### SectorAddPieceToAny ### SectorAddPieceToAny
Add piece to an open sector. If no sectors with enough space are open, Add piece to an open sector. If no sectors with enough space are open,
either a new sector will be created, or this call will block until more either a new sector will be created, or this call will block until more

View File

@ -10,6 +10,7 @@
* [Add](#Add) * [Add](#Add)
* [AddPiece](#AddPiece) * [AddPiece](#AddPiece)
* [Finalize](#Finalize) * [Finalize](#Finalize)
* [FinalizeReplicaUpdate](#FinalizeReplicaUpdate)
* [FinalizeSector](#FinalizeSector) * [FinalizeSector](#FinalizeSector)
* [Generate](#Generate) * [Generate](#Generate)
* [GenerateSectorKeyFromData](#GenerateSectorKeyFromData) * [GenerateSectorKeyFromData](#GenerateSectorKeyFromData)
@ -1112,6 +1113,41 @@ Response:
## Finalize ## Finalize
### FinalizeReplicaUpdate
Perms: admin
Inputs:
```json
[
{
"ID": {
"Miner": 1000,
"Number": 9
},
"ProofType": 8
},
[
{
"Offset": 1024,
"Size": 1024
}
]
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
### FinalizeSector ### FinalizeSector

View File

@ -1580,6 +1580,7 @@ COMMANDS:
terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)
remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))
snap-up Mark a committed capacity sector to be filled with deals snap-up Mark a committed capacity sector to be filled with deals
abort-upgrade Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before
mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals
seal Manually start sealing a sector (filling any unused space with junk) seal Manually start sealing a sector (filling any unused space with junk)
set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts
@ -1815,6 +1816,19 @@ OPTIONS:
``` ```
### lotus-miner sectors abort-upgrade
```
NAME:
lotus-miner sectors abort-upgrade - Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before
USAGE:
lotus-miner sectors abort-upgrade [command options] <sectorNum>
OPTIONS:
--help, -h show help (default: false)
```
### lotus-miner sectors mark-for-upgrade ### lotus-miner sectors mark-for-upgrade
``` ```
NAME: NAME:

View File

@ -171,11 +171,11 @@
#HotStoreType = "badger" #HotStoreType = "badger"
# MarkSetType specifies the type of the markset. # MarkSetType specifies the type of the markset.
# It can be "map" (default) for in memory marking or "badger" for on-disk marking. # It can be "map" for in memory marking or "badger" (default) for on-disk marking.
# #
# type: string # type: string
# env var: LOTUS_CHAINSTORE_SPLITSTORE_MARKSETTYPE # env var: LOTUS_CHAINSTORE_SPLITSTORE_MARKSETTYPE
#MarkSetType = "map" #MarkSetType = "badger"
# HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond # HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond
# the compaction boundary; default is 0. # the compaction boundary; default is 0.

View File

@ -167,6 +167,14 @@
# env var: LOTUS_DEALMAKING_EXPECTEDSEALDURATION # env var: LOTUS_DEALMAKING_EXPECTEDSEALDURATION
#ExpectedSealDuration = "24h0m0s" #ExpectedSealDuration = "24h0m0s"
# Whether new sectors are created to pack incoming deals
# When this is set to false no new sectors will be created for sealing incoming deals
# This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade
#
# type: bool
# env var: LOTUS_DEALMAKING_MAKENEWSECTORFORDEALS
#MakeNewSectorForDeals = true
# Maximum amount of time proposed deal StartEpoch can be in future # Maximum amount of time proposed deal StartEpoch can be in future
# #
# type: Duration # type: Duration

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit e660df5616e397b2d8ac316f45ddfa7a44637971 Subproject commit 5ec5d805c01ea85224f6448dd6c6fa0a2a73c028

View File

@ -55,6 +55,25 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
return nil return nil
} }
// temporary hack to make the check work with snapdeals
// will go away in https://github.com/filecoin-project/lotus/pull/7971
if lp.Sealed == "" || lp.Cache == "" {
// maybe it's update
lockedUpdate, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone)
if err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
if lockedUpdate {
lp, _, err = m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
if err != nil {
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
return nil
}
lp.Sealed, lp.Cache = lp.Update, lp.UpdateCache
}
}
if lp.Sealed == "" || lp.Cache == "" { if lp.Sealed == "" || lp.Cache == "" {
log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache) log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache)
bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", lp.Cache, lp.Sealed) bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", lp.Cache, lp.Sealed)

View File

@ -669,7 +669,7 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, pha
func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) {
empty := storage.ReplicaUpdateOut{} empty := storage.ReplicaUpdateOut{}
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
if err != nil { if err != nil {
return empty, xerrors.Errorf("failed to acquire sector paths: %w", err) return empty, xerrors.Errorf("failed to acquire sector paths: %w", err)
} }
@ -718,7 +718,7 @@ func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
} }
func (sb *Sealer) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { func (sb *Sealer) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdateCache|storiface.FTUpdate, storiface.FTNone, storiface.PathSealing) paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathSealing)
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to acquire sector paths: %w", err) return nil, xerrors.Errorf("failed to acquire sector paths: %w", err)
} }
@ -769,7 +769,7 @@ func (sb *Sealer) ReleaseSealed(ctx context.Context, sector storage.SectorRef) e
return xerrors.Errorf("not supported at this layer") return xerrors.Errorf("not supported at this layer")
} }
func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { func (sb *Sealer) freeUnsealed(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
ssize, err := sector.ProofType.SectorSize() ssize, err := sector.ProofType.SectorSize()
if err != nil { if err != nil {
return err return err
@ -834,6 +834,19 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef,
} }
return nil
}
func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
ssize, err := sector.ProofType.SectorSize()
if err != nil {
return err
}
if err := sb.freeUnsealed(ctx, sector, keepUnsealed); err != nil {
return err
}
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, 0, storiface.PathStorage) paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, 0, storiface.PathStorage)
if err != nil { if err != nil {
return xerrors.Errorf("acquiring sector cache path: %w", err) return xerrors.Errorf("acquiring sector cache path: %w", err)
@ -843,6 +856,43 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef,
return ffi.ClearCache(uint64(ssize), paths.Cache) return ffi.ClearCache(uint64(ssize), paths.Cache)
} }
func (sb *Sealer) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
ssize, err := sector.ProofType.SectorSize()
if err != nil {
return err
}
if err := sb.freeUnsealed(ctx, sector, keepUnsealed); err != nil {
return err
}
{
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, 0, storiface.PathStorage)
if err != nil {
return xerrors.Errorf("acquiring sector cache path: %w", err)
}
defer done()
if err := ffi.ClearCache(uint64(ssize), paths.Cache); err != nil {
return xerrors.Errorf("clear cache: %w", err)
}
}
{
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdateCache, 0, storiface.PathStorage)
if err != nil {
return xerrors.Errorf("acquiring sector cache path: %w", err)
}
defer done()
if err := ffi.ClearCache(uint64(ssize), paths.UpdateCache); err != nil {
return xerrors.Errorf("clear cache: %w", err)
}
}
return nil
}
func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
// This call is meant to mark storage as 'freeable'. Given that unsealing is // This call is meant to mark storage as 'freeable'. Given that unsealing is
// very expensive, we don't remove data as soon as we can - instead we only // very expensive, we don't remove data as soon as we can - instead we only

View File

@ -146,7 +146,7 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store
go m.sched.runSched() go m.sched.runSched()
localTasks := []sealtasks.TaskType{ localTasks := []sealtasks.TaskType{
sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFetch, sealtasks.TTFinalizeReplicaUpdate,
} }
if sc.AllowAddPiece { if sc.AllowAddPiece {
localTasks = append(localTasks, sealtasks.TTAddPiece) localTasks = append(localTasks, sealtasks.TTAddPiece)
@ -577,6 +577,74 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef,
return nil return nil
} }
func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache); err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
fts := storiface.FTUnsealed
{
unsealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false)
if err != nil {
return xerrors.Errorf("finding unsealed sector: %w", err)
}
if len(unsealedStores) == 0 { // Is some edge-cases unsealed sector may not exist already, that's fine
fts = storiface.FTNone
}
}
pathType := storiface.PathStorage
{
sealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUpdate, 0, false)
if err != nil {
return xerrors.Errorf("finding sealed sector: %w", err)
}
for _, store := range sealedStores {
if store.CanSeal {
pathType = storiface.PathSealing
break
}
}
}
selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache, false)
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalizeReplicaUpdate, selector,
m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache|fts, pathType, storiface.AcquireMove),
func(ctx context.Context, w Worker) error {
_, err := m.waitSimpleCall(ctx)(w.FinalizeReplicaUpdate(ctx, sector, keepUnsealed))
return err
})
if err != nil {
return err
}
fetchSel := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathStorage)
moveUnsealed := fts
{
if len(keepUnsealed) == 0 {
moveUnsealed = storiface.FTNone
}
}
err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel,
m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache|moveUnsealed, storiface.PathStorage, storiface.AcquireMove),
func(ctx context.Context, w Worker) error {
_, err := m.waitSimpleCall(ctx)(w.MoveStorage(ctx, sector, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache|moveUnsealed))
return err
})
if err != nil {
return xerrors.Errorf("moving sector to storage: %w", err)
}
return nil
}
func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
return nil return nil
} }
@ -715,14 +783,13 @@ func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
return out, waitErr return out, waitErr
} }
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil { if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil {
return storage.ReplicaUpdateOut{}, xerrors.Errorf("acquiring sector lock: %w", err) return storage.ReplicaUpdateOut{}, xerrors.Errorf("acquiring sector lock: %w", err)
} }
selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
log.Errorf("scheduled work for replica update")
err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces)) err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces))
if err != nil { if err != nil {
return xerrors.Errorf("startWork: %w", err) return xerrors.Errorf("startWork: %w", err)
@ -768,9 +835,12 @@ func (m *Manager) ProveReplicaUpdate1(ctx context.Context, sector storage.Sector
return nil, xerrors.Errorf("acquiring sector lock: %w", err) return nil, xerrors.Errorf("acquiring sector lock: %w", err)
} }
selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, true) // NOTE: We set allowFetch to false in so that we always execute on a worker
// with direct access to the data. We want to do that because this step is
// generally very cheap / fast, and transferring data is not worth the effort
selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, false)
err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed)) err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed))
if err != nil { if err != nil {
@ -873,6 +943,10 @@ func (m *Manager) ReturnProveReplicaUpdate2(ctx context.Context, callID storifac
return m.returnResult(ctx, callID, proof, err) return m.returnResult(ctx, callID, proof, err)
} }
func (m *Manager) ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
return m.returnResult(ctx, callID, nil, err)
}
func (m *Manager) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { func (m *Manager) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
return m.returnResult(ctx, callID, nil, err) return m.returnResult(ctx, callID, nil, err)
} }

View File

@ -1,3 +1,4 @@
//stm: #unit
package sectorstorage package sectorstorage
import ( import (
@ -363,6 +364,7 @@ func TestRedoPC1(t *testing.T) {
// Manager restarts in the middle of a task, restarts it, it completes // Manager restarts in the middle of a task, restarts it, it completes
func TestRestartManager(t *testing.T) { func TestRestartManager(t *testing.T) {
//stm: @WORKER_JOBS_001
test := func(returnBeforeCall bool) func(*testing.T) { test := func(returnBeforeCall bool) func(*testing.T) {
return func(t *testing.T) { return func(t *testing.T) {
logging.SetAllLoggers(logging.LevelDebug) logging.SetAllLoggers(logging.LevelDebug)
@ -507,6 +509,7 @@ func TestRestartWorker(t *testing.T) {
<-arch <-arch
require.NoError(t, w.Close()) require.NoError(t, w.Close())
//stm: @WORKER_STATS_001
for { for {
if len(m.WorkerStats()) == 0 { if len(m.WorkerStats()) == 0 {
break break
@ -569,6 +572,7 @@ func TestReenableWorker(t *testing.T) {
// disable // disable
atomic.StoreInt64(&w.testDisable, 1) atomic.StoreInt64(&w.testDisable, 1)
//stm: @WORKER_STATS_001
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
if !m.WorkerStats()[w.session].Enabled { if !m.WorkerStats()[w.session].Enabled {
break break

View File

@ -477,6 +477,10 @@ func (mgr *SectorMgr) FinalizeSector(context.Context, storage.SectorRef, []stora
return nil return nil
} }
func (mgr *SectorMgr) FinalizeReplicaUpdate(context.Context, storage.SectorRef, []storage.Range) error {
return nil
}
func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
return nil return nil
} }
@ -577,6 +581,10 @@ func (mgr *SectorMgr) ReturnGenerateSectorKeyFromData(ctx context.Context, callI
panic("not supported") panic("not supported")
} }
func (mgr *SectorMgr) ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
panic("not supported")
}
func (m mockVerifProver) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { func (m mockVerifProver) VerifySeal(svi proof.SealVerifyInfo) (bool, error) {
plen, err := svi.SealProof.ProofSize() plen, err := svi.SealProof.ProofSize()
if err != nil { if err != nil {

View File

@ -1,3 +1,4 @@
//stm: #unit
package sectorstorage package sectorstorage
import ( import (
@ -118,6 +119,10 @@ func (s *schedTestWorker) GenerateSectorKeyFromData(ctx context.Context, sector
panic("implement me") panic("implement me")
} }
func (s *schedTestWorker) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
panic("implement me")
}
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
panic("implement me") panic("implement me")
} }
@ -206,6 +211,7 @@ func TestSchedStartStop(t *testing.T) {
} }
func TestSched(t *testing.T) { func TestSched(t *testing.T) {
//stm: @WORKER_JOBS_001
storiface.ParallelNum = 1 storiface.ParallelNum = 1
storiface.ParallelDenom = 1 storiface.ParallelDenom = 1

View File

@ -6,7 +6,7 @@ const (
TTAddPiece TaskType = "seal/v0/addpiece" TTAddPiece TaskType = "seal/v0/addpiece"
TTPreCommit1 TaskType = "seal/v0/precommit/1" TTPreCommit1 TaskType = "seal/v0/precommit/1"
TTPreCommit2 TaskType = "seal/v0/precommit/2" TTPreCommit2 TaskType = "seal/v0/precommit/2"
TTCommit1 TaskType = "seal/v0/commit/1" // NOTE: We use this to transfer the sector into miner-local storage for now; Don't use on workers! TTCommit1 TaskType = "seal/v0/commit/1"
TTCommit2 TaskType = "seal/v0/commit/2" TTCommit2 TaskType = "seal/v0/commit/2"
TTFinalize TaskType = "seal/v0/finalize" TTFinalize TaskType = "seal/v0/finalize"
@ -18,6 +18,7 @@ const (
TTProveReplicaUpdate1 TaskType = "seal/v0/provereplicaupdate/1" TTProveReplicaUpdate1 TaskType = "seal/v0/provereplicaupdate/1"
TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2" TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2"
TTRegenSectorKey TaskType = "seal/v0/regensectorkey" TTRegenSectorKey TaskType = "seal/v0/regensectorkey"
TTFinalizeReplicaUpdate TaskType = "seal/v0/finalize/replicaupdate"
) )
var order = map[TaskType]int{ var order = map[TaskType]int{
@ -52,6 +53,7 @@ var shortNames = map[TaskType]string{
TTProveReplicaUpdate1: "PR1", TTProveReplicaUpdate1: "PR1",
TTProveReplicaUpdate2: "PR2", TTProveReplicaUpdate2: "PR2",
TTRegenSectorKey: "GSK", TTRegenSectorKey: "GSK",
TTFinalizeReplicaUpdate: "FRU",
} }
func (a TaskType) MuchLess(b TaskType) (bool, bool) { func (a TaskType) MuchLess(b TaskType) (bool, bool) {

View File

@ -1,3 +1,4 @@
//stm: #unit
package stores_test package stores_test
import ( import (
@ -154,6 +155,7 @@ func TestMoveShared(t *testing.T) {
} }
func TestReader(t *testing.T) { func TestReader(t *testing.T) {
//stm: @STORAGE_INFO_001
logging.SetAllLoggers(logging.LevelDebug) logging.SetAllLoggers(logging.LevelDebug)
bz := []byte("Hello World") bz := []byte("Hello World")

View File

@ -120,6 +120,7 @@ type WorkerCalls interface {
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error)
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error)
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error)
FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error)
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error)
ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (CallID, error) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (CallID, error)
ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (CallID, error) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (CallID, error)
@ -182,6 +183,7 @@ type WorkerReturn interface {
ReturnProveReplicaUpdate1(ctx context.Context, callID CallID, proofs storage.ReplicaVanillaProofs, err *CallError) error ReturnProveReplicaUpdate1(ctx context.Context, callID CallID, proofs storage.ReplicaVanillaProofs, err *CallError) error
ReturnProveReplicaUpdate2(ctx context.Context, callID CallID, proof storage.ReplicaUpdateProof, err *CallError) error ReturnProveReplicaUpdate2(ctx context.Context, callID CallID, proof storage.ReplicaUpdateProof, err *CallError) error
ReturnGenerateSectorKeyFromData(ctx context.Context, callID CallID, err *CallError) error ReturnGenerateSectorKeyFromData(ctx context.Context, callID CallID, err *CallError) error
ReturnFinalizeReplicaUpdate(ctx context.Context, callID CallID, err *CallError) error
ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error
ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error
ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error

View File

@ -87,6 +87,10 @@ func (t *testExec) GenerateSectorKeyFromData(ctx context.Context, sector storage
panic("implement me") panic("implement me")
} }
func (t *testExec) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
panic("implement me")
}
func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error { func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error {
panic("implement me") panic("implement me")
} }

View File

@ -168,6 +168,7 @@ const (
SealCommit1 ReturnType = "SealCommit1" SealCommit1 ReturnType = "SealCommit1"
SealCommit2 ReturnType = "SealCommit2" SealCommit2 ReturnType = "SealCommit2"
FinalizeSector ReturnType = "FinalizeSector" FinalizeSector ReturnType = "FinalizeSector"
FinalizeReplicaUpdate ReturnType = "FinalizeReplicaUpdate"
ReplicaUpdate ReturnType = "ReplicaUpdate" ReplicaUpdate ReturnType = "ReplicaUpdate"
ProveReplicaUpdate1 ReturnType = "ProveReplicaUpdate1" ProveReplicaUpdate1 ReturnType = "ProveReplicaUpdate1"
ProveReplicaUpdate2 ReturnType = "ProveReplicaUpdate2" ProveReplicaUpdate2 ReturnType = "ProveReplicaUpdate2"
@ -224,6 +225,7 @@ var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storifac
ProveReplicaUpdate1: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate1), ProveReplicaUpdate1: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate1),
ProveReplicaUpdate2: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate2), ProveReplicaUpdate2: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate2),
GenerateSectorKey: rfunc(storiface.WorkerReturn.ReturnGenerateSectorKeyFromData), GenerateSectorKey: rfunc(storiface.WorkerReturn.ReturnGenerateSectorKeyFromData),
FinalizeReplicaUpdate: rfunc(storiface.WorkerReturn.ReturnFinalizeReplicaUpdate),
MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage),
UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece),
Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), Fetch: rfunc(storiface.WorkerReturn.ReturnFetch),
@ -456,6 +458,27 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorR
}) })
} }
func (l *LocalWorker) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
sb, err := l.executor()
if err != nil {
return storiface.UndefCall, err
}
return l.asyncCall(ctx, sector, FinalizeReplicaUpdate, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
if err := sb.FinalizeReplicaUpdate(ctx, sector, keepUnsealed); err != nil {
return nil, xerrors.Errorf("finalizing sector: %w", err)
}
if len(keepUnsealed) == 0 {
if err := l.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true, nil); err != nil {
return nil, xerrors.Errorf("removing unsealed data: %w", err)
}
}
return nil, err
})
}
func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) { func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) {
return storiface.UndefCall, xerrors.Errorf("implement me") return storiface.UndefCall, xerrors.Errorf("implement me")
} }

View File

@ -215,4 +215,8 @@ func (t *trackedWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.
}) })
} }
func (t *trackedWorker) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTFinalizeReplicaUpdate, func() (storiface.CallID, error) { return t.Worker.FinalizeReplicaUpdate(ctx, sector, keepUnsealed) })
}
var _ Worker = &trackedWorker{} var _ Worker = &trackedWorker{}

View File

@ -20,6 +20,7 @@ import (
// We should implement some wait-for-api logic // We should implement some wait-for-api logic
type ErrApi struct{ error } type ErrApi struct{ error }
type ErrNoDeals struct{ error }
type ErrInvalidDeals struct{ error } type ErrInvalidDeals struct{ error }
type ErrInvalidPiece struct{ error } type ErrInvalidPiece struct{ error }
type ErrExpiredDeals struct{ error } type ErrExpiredDeals struct{ error }
@ -38,12 +39,14 @@ type ErrCommitWaitFailed struct{ error }
type ErrBadRU struct{ error } type ErrBadRU struct{ error }
type ErrBadPR struct{ error } type ErrBadPR struct{ error }
func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error { func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI, mustHaveDeals bool) error {
tok, height, err := api.ChainHead(ctx) tok, height, err := api.ChainHead(ctx)
if err != nil { if err != nil {
return &ErrApi{xerrors.Errorf("getting chain head: %w", err)} return &ErrApi{xerrors.Errorf("getting chain head: %w", err)}
} }
dealCount := 0
for i, p := range si.Pieces { for i, p := range si.Pieces {
// if no deal is associated with the piece, ensure that we added it as // if no deal is associated with the piece, ensure that we added it as
// filler (i.e. ensure that it has a zero PieceCID) // filler (i.e. ensure that it has a zero PieceCID)
@ -55,6 +58,8 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api
continue continue
} }
dealCount++
proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok) proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok)
if err != nil { if err != nil {
return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)} return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)}
@ -77,13 +82,17 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api
} }
} }
if mustHaveDeals && dealCount <= 0 {
return &ErrNoDeals{(xerrors.Errorf("sector %d must have deals, but does not", si.SectorNumber))}
}
return nil return nil
} }
// checkPrecommit checks that data commitment generated in the sealing process // checkPrecommit checks that data commitment generated in the sealing process
// matches pieces, and that the seal ticket isn't expired // matches pieces, and that the seal ticket isn't expired
func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, height abi.ChainEpoch, api SealingAPI) (err error) { func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, height abi.ChainEpoch, api SealingAPI) (err error) {
if err := checkPieces(ctx, maddr, si, api); err != nil { if err := checkPieces(ctx, maddr, si, api, false); err != nil {
return err return err
} }
@ -184,7 +193,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
return &ErrInvalidProof{xerrors.New("invalid proof (compute error?)")} return &ErrInvalidProof{xerrors.New("invalid proof (compute error?)")}
} }
if err := checkPieces(ctx, m.maddr, si, m.Api); err != nil { if err := checkPieces(ctx, m.maddr, si, m.Api, false); err != nil {
return err return err
} }
@ -194,7 +203,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
// check that sector info is good after running a replica update // check that sector info is good after running a replica update
func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, api SealingAPI) error { func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, api SealingAPI) error {
if err := checkPieces(ctx, maddr, si, api); err != nil { if err := checkPieces(ctx, maddr, si, api, true); err != nil {
return err return err
} }
if !si.CCUpdate { if !si.CCUpdate {

View File

@ -1,3 +1,4 @@
//stm: #unit
package sealing_test package sealing_test
import ( import (
@ -28,6 +29,7 @@ import (
) )
func TestCommitBatcher(t *testing.T) { func TestCommitBatcher(t *testing.T) {
//stm: @CHAIN_STATE_MINER_PRE_COM_INFO_001, @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001
t0123, err := address.NewFromString("t0123") t0123, err := address.NewFromString("t0123")
require.NoError(t, err) require.NoError(t, err)
@ -147,6 +149,7 @@ func TestCommitBatcher(t *testing.T) {
} }
} }
//stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001, @CHAIN_STATE_MINER_GET_COLLATERAL_001
expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action {
return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise {
s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil) s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil)

View File

@ -137,27 +137,32 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
SnapDealsWaitDeals: planOne( SnapDealsWaitDeals: planOne(
on(SectorAddPiece{}, SnapDealsAddPiece), on(SectorAddPiece{}, SnapDealsAddPiece),
on(SectorStartPacking{}, SnapDealsPacking), on(SectorStartPacking{}, SnapDealsPacking),
on(SectorAbortUpgrade{}, AbortUpgrade),
), ),
SnapDealsAddPiece: planOne( SnapDealsAddPiece: planOne(
on(SectorPieceAdded{}, SnapDealsWaitDeals), on(SectorPieceAdded{}, SnapDealsWaitDeals),
apply(SectorStartPacking{}), apply(SectorStartPacking{}),
apply(SectorAddPiece{}), apply(SectorAddPiece{}),
on(SectorAddPieceFailed{}, SnapDealsAddPieceFailed), on(SectorAddPieceFailed{}, SnapDealsAddPieceFailed),
on(SectorAbortUpgrade{}, AbortUpgrade),
), ),
SnapDealsPacking: planOne( SnapDealsPacking: planOne(
on(SectorPacked{}, UpdateReplica), on(SectorPacked{}, UpdateReplica),
on(SectorAbortUpgrade{}, AbortUpgrade),
), ),
UpdateReplica: planOne( UpdateReplica: planOne(
on(SectorReplicaUpdate{}, ProveReplicaUpdate), on(SectorReplicaUpdate{}, ProveReplicaUpdate),
on(SectorUpdateReplicaFailed{}, ReplicaUpdateFailed), on(SectorUpdateReplicaFailed{}, ReplicaUpdateFailed),
on(SectorDealsExpired{}, SnapDealsDealsExpired), on(SectorDealsExpired{}, SnapDealsDealsExpired),
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
on(SectorAbortUpgrade{}, AbortUpgrade),
), ),
ProveReplicaUpdate: planOne( ProveReplicaUpdate: planOne(
on(SectorProveReplicaUpdate{}, SubmitReplicaUpdate), on(SectorProveReplicaUpdate{}, SubmitReplicaUpdate),
on(SectorProveReplicaUpdateFailed{}, ReplicaUpdateFailed), on(SectorProveReplicaUpdateFailed{}, ReplicaUpdateFailed),
on(SectorDealsExpired{}, SnapDealsDealsExpired), on(SectorDealsExpired{}, SnapDealsDealsExpired),
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
on(SectorAbortUpgrade{}, AbortUpgrade),
), ),
SubmitReplicaUpdate: planOne( SubmitReplicaUpdate: planOne(
on(SectorReplicaUpdateSubmitted{}, ReplicaUpdateWait), on(SectorReplicaUpdateSubmitted{}, ReplicaUpdateWait),
@ -169,7 +174,14 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorAbortUpgrade{}, AbortUpgrade), on(SectorAbortUpgrade{}, AbortUpgrade),
), ),
FinalizeReplicaUpdate: planOne( FinalizeReplicaUpdate: planOne(
on(SectorFinalized{}, Proving), on(SectorFinalized{}, UpdateActivating),
),
UpdateActivating: planOne(
on(SectorUpdateActive{}, ReleaseSectorKey),
),
ReleaseSectorKey: planOne(
on(SectorKeyReleased{}, Proving),
on(SectorReleaseKeyFailed{}, ReleaseSectorKeyFailed),
), ),
// Sealing errors // Sealing errors
@ -231,6 +243,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorRetryWaitDeals{}, SnapDealsWaitDeals), on(SectorRetryWaitDeals{}, SnapDealsWaitDeals),
apply(SectorStartPacking{}), apply(SectorStartPacking{}),
apply(SectorAddPiece{}), apply(SectorAddPiece{}),
on(SectorAbortUpgrade{}, AbortUpgrade),
), ),
SnapDealsDealsExpired: planOne( SnapDealsDealsExpired: planOne(
on(SectorAbortUpgrade{}, AbortUpgrade), on(SectorAbortUpgrade{}, AbortUpgrade),
@ -249,6 +262,10 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorRetryProveReplicaUpdate{}, ProveReplicaUpdate), on(SectorRetryProveReplicaUpdate{}, ProveReplicaUpdate),
on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs),
on(SectorDealsExpired{}, SnapDealsDealsExpired), on(SectorDealsExpired{}, SnapDealsDealsExpired),
on(SectorAbortUpgrade{}, AbortUpgrade),
),
ReleaseSectorKeyFailed: planOne(
on(SectorUpdateActive{}, ReleaseSectorKey),
), ),
// Post-seal // Post-seal
@ -477,6 +494,10 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
return m.handleReplicaUpdateWait, processed, nil return m.handleReplicaUpdateWait, processed, nil
case FinalizeReplicaUpdate: case FinalizeReplicaUpdate:
return m.handleFinalizeReplicaUpdate, processed, nil return m.handleFinalizeReplicaUpdate, processed, nil
case UpdateActivating:
return m.handleUpdateActivating, processed, nil
case ReleaseSectorKey:
return m.handleReleaseSectorKey, processed, nil
// Handled failure modes // Handled failure modes
case AddPieceFailed: case AddPieceFailed:
@ -513,6 +534,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
return m.handleSnapDealsRecoverDealIDs, processed, nil return m.handleSnapDealsRecoverDealIDs, processed, nil
case ReplicaUpdateFailed: case ReplicaUpdateFailed:
return m.handleSubmitReplicaUpdateFailed, processed, nil return m.handleSubmitReplicaUpdateFailed, processed, nil
case ReleaseSectorKeyFailed:
return m.handleReleaseSectorKeyFailed, 0, err
case AbortUpgrade: case AbortUpgrade:
return m.handleAbortUpgrade, processed, nil return m.handleAbortUpgrade, processed, nil

View File

@ -335,6 +335,14 @@ type SectorReplicaUpdateLanded struct{}
func (evt SectorReplicaUpdateLanded) apply(state *SectorInfo) {} func (evt SectorReplicaUpdateLanded) apply(state *SectorInfo) {}
type SectorUpdateActive struct{}
func (evt SectorUpdateActive) apply(state *SectorInfo) {}
type SectorKeyReleased struct{}
func (evt SectorKeyReleased) apply(state *SectorInfo) {}
// Failed state recovery // Failed state recovery
type SectorRetrySealPreCommit1 struct{} type SectorRetrySealPreCommit1 struct{}
@ -445,6 +453,13 @@ type SectorSubmitReplicaUpdateFailed struct{}
func (evt SectorSubmitReplicaUpdateFailed) apply(state *SectorInfo) {} func (evt SectorSubmitReplicaUpdateFailed) apply(state *SectorInfo) {}
type SectorReleaseKeyFailed struct{ error }
func (evt SectorReleaseKeyFailed) FormatError(xerrors.Printer) (next error) {
return evt.error
}
func (evt SectorReleaseKeyFailed) apply(state *SectorInfo) {}
// Faults // Faults
type SectorFaulty struct{} type SectorFaulty struct{}

View File

@ -16,6 +16,7 @@ import (
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
@ -117,9 +118,25 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo,
return false, xerrors.Errorf("getting storage config: %w", err) return false, xerrors.Errorf("getting storage config: %w", err)
} }
// todo check deal age, start sealing if any deal has less than X (configurable) to start deadline
sealTime := time.Unix(sector.CreationTime, 0).Add(cfg.WaitDealsDelay) sealTime := time.Unix(sector.CreationTime, 0).Add(cfg.WaitDealsDelay)
// check deal age, start sealing when the deal closest to starting is within slack time
_, current, err := m.Api.ChainHead(ctx.Context())
blockTime := time.Second * time.Duration(build.BlockDelaySecs)
if err != nil {
return false, xerrors.Errorf("API error getting head: %w", err)
}
for _, piece := range sector.Pieces {
if piece.DealInfo == nil {
continue
}
dealSafeSealEpoch := piece.DealInfo.DealProposal.StartEpoch - cfg.StartEpochSealingBuffer
dealSafeSealTime := time.Now().Add(time.Duration(dealSafeSealEpoch-current) * blockTime)
if dealSafeSealTime.Before(sealTime) {
sealTime = dealSafeSealTime
}
}
if now.After(sealTime) { if now.After(sealTime) {
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout") log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout")
return true, ctx.Send(SectorStartPacking{}) return true, ctx.Send(SectorStartPacking{})
@ -475,6 +492,10 @@ func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSeal
return xerrors.Errorf("getting storage config: %w", err) return xerrors.Errorf("getting storage config: %w", err)
} }
if !cfg.MakeNewSectorForDeals {
return nil
}
if cfg.MaxSealingSectorsForDeals > 0 && m.stats.curSealing() >= cfg.MaxSealingSectorsForDeals { if cfg.MaxSealingSectorsForDeals > 0 && m.stats.curSealing() >= cfg.MaxSealingSectorsForDeals {
return nil return nil
} }
@ -524,6 +545,13 @@ func (m *Sealing) StartPacking(sid abi.SectorNumber) error {
return m.sectors.Send(uint64(sid), SectorStartPacking{}) return m.sectors.Send(uint64(sid), SectorStartPacking{})
} }
func (m *Sealing) AbortUpgrade(sid abi.SectorNumber) error {
m.startupWait.Wait()
log.Infow("aborting upgrade of sector", "sector", sid, "trigger", "user")
return m.sectors.Send(uint64(sid), SectorAbortUpgrade{xerrors.New("triggered by user")})
}
func proposalCID(deal api.PieceDealInfo) cid.Cid { func proposalCID(deal api.PieceDealInfo) cid.Cid {
pc, err := deal.DealProposal.Cid() pc, err := deal.DealProposal.Cid()
if err != nil { if err != nil {

View File

@ -1,3 +1,4 @@
//stm: #unit
package sealing_test package sealing_test
import ( import (
@ -38,6 +39,7 @@ var fc = config.MinerFeeConfig{
} }
func TestPrecommitBatcher(t *testing.T) { func TestPrecommitBatcher(t *testing.T) {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
t0123, err := address.NewFromString("t0123") t0123, err := address.NewFromString("t0123")
require.NoError(t, err) require.NoError(t, err)
@ -151,6 +153,7 @@ func TestPrecommitBatcher(t *testing.T) {
} }
} }
//stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001
expectSend := func(expect []abi.SectorNumber) action { expectSend := func(expect []abi.SectorNumber) action {
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil)
@ -171,6 +174,7 @@ func TestPrecommitBatcher(t *testing.T) {
} }
} }
//stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001
expectSendsSingle := func(expect []abi.SectorNumber) action { expectSendsSingle := func(expect []abi.SectorNumber) action {
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil)

View File

@ -18,6 +18,8 @@ type Config struct {
// includes failed, 0 = no limit // includes failed, 0 = no limit
MaxSealingSectorsForDeals uint64 MaxSealingSectorsForDeals uint64
MakeNewSectorForDeals bool
WaitDealsDelay time.Duration WaitDealsDelay time.Duration
CommittedCapacitySectorLifetime time.Duration CommittedCapacitySectorLifetime time.Duration

View File

@ -52,11 +52,14 @@ var ExistSectorStateList = map[SectorState]struct{}{
ProveReplicaUpdate: {}, ProveReplicaUpdate: {},
SubmitReplicaUpdate: {}, SubmitReplicaUpdate: {},
ReplicaUpdateWait: {}, ReplicaUpdateWait: {},
UpdateActivating: {},
ReleaseSectorKey: {},
FinalizeReplicaUpdate: {}, FinalizeReplicaUpdate: {},
SnapDealsAddPieceFailed: {}, SnapDealsAddPieceFailed: {},
SnapDealsDealsExpired: {}, SnapDealsDealsExpired: {},
SnapDealsRecoverDealIDs: {}, SnapDealsRecoverDealIDs: {},
ReplicaUpdateFailed: {}, ReplicaUpdateFailed: {},
ReleaseSectorKeyFailed: {},
AbortUpgrade: {}, AbortUpgrade: {},
} }
@ -104,6 +107,8 @@ const (
SubmitReplicaUpdate SectorState = "SubmitReplicaUpdate" SubmitReplicaUpdate SectorState = "SubmitReplicaUpdate"
ReplicaUpdateWait SectorState = "ReplicaUpdateWait" ReplicaUpdateWait SectorState = "ReplicaUpdateWait"
FinalizeReplicaUpdate SectorState = "FinalizeReplicaUpdate" FinalizeReplicaUpdate SectorState = "FinalizeReplicaUpdate"
UpdateActivating SectorState = "UpdateActivating"
ReleaseSectorKey SectorState = "ReleaseSectorKey"
// error modes // error modes
FailedUnrecoverable SectorState = "FailedUnrecoverable" FailedUnrecoverable SectorState = "FailedUnrecoverable"
@ -124,6 +129,7 @@ const (
SnapDealsRecoverDealIDs SectorState = "SnapDealsRecoverDealIDs" SnapDealsRecoverDealIDs SectorState = "SnapDealsRecoverDealIDs"
AbortUpgrade SectorState = "AbortUpgrade" AbortUpgrade SectorState = "AbortUpgrade"
ReplicaUpdateFailed SectorState = "ReplicaUpdateFailed" ReplicaUpdateFailed SectorState = "ReplicaUpdateFailed"
ReleaseSectorKeyFailed SectorState = "ReleaseSectorKeyFailed"
Faulty SectorState = "Faulty" // sector is corrupted or gone for some reason Faulty SectorState = "Faulty" // sector is corrupted or gone for some reason
FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain
@ -153,7 +159,7 @@ func toStatState(st SectorState, finEarly bool) statSectorState {
return sstProving return sstProving
} }
return sstSealing return sstSealing
case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: case Proving, UpdateActivating, ReleaseSectorKey, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed:
return sstProving return sstProving
} }

View File

@ -211,7 +211,7 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect
tok, _, err := m.Api.ChainHead(ctx.Context()) tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil { if err != nil {
log.Errorf("handleCommitting: api error, not proceeding: %+v", err) log.Errorf("handleSubmitReplicaUpdateFailed: api error, not proceeding: %+v", err)
return nil return nil
} }
@ -237,6 +237,17 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect
} }
} }
// Abort upgrade for sectors that went faulty since being marked for upgrade
active, err := sectorActive(ctx.Context(), m.Api, m.maddr, tok, sector.SectorNumber)
if err != nil {
log.Errorf("sector active check: api error, not proceeding: %+v", err)
return nil
}
if !active {
log.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber)
return ctx.Send(SectorAbortUpgrade{})
}
if err := failedCooldown(ctx, sector); err != nil { if err := failedCooldown(ctx, sector); err != nil {
return err return err
} }
@ -244,6 +255,16 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect
return ctx.Send(SectorRetrySubmitReplicaUpdate{}) return ctx.Send(SectorRetrySubmitReplicaUpdate{})
} }
func (m *Sealing) handleReleaseSectorKeyFailed(ctx statemachine.Context, sector SectorInfo) error {
// not much we can do, wait for a bit and try again
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorUpdateActive{})
}
func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo) error {
tok, _, err := m.Api.ChainHead(ctx.Context()) tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil { if err != nil {
@ -478,7 +499,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx statemachine.Context, sector SectorIn
} }
func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error {
return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{}) return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{xerrors.New("failed recovering deal ids")})
} }
func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) { func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) {

View File

@ -1,3 +1,4 @@
//stm: #unit
package sealing_test package sealing_test
import ( import (
@ -49,6 +50,7 @@ func TestStateRecoverDealIDs(t *testing.T) {
PieceCID: idCid("newPieceCID"), PieceCID: idCid("newPieceCID"),
} }
//stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001, @CHAIN_STATE_NETWORK_VERSION_001
api.EXPECT().StateMarketStorageDealProposal(ctx, dealId, nil).Return(dealProposal, nil) api.EXPECT().StateMarketStorageDealProposal(ctx, dealId, nil).Return(dealProposal, nil)
pc := idCid("publishCID") pc := idCid("publishCID")

View File

@ -2,17 +2,21 @@ package sealing
import ( import (
"bytes" "bytes"
"context"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/exitcode"
statemachine "github.com/filecoin-project/go-statemachine" statemachine "github.com/filecoin-project/go-statemachine"
api "github.com/filecoin-project/lotus/api" api "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
"golang.org/x/xerrors" "golang.org/x/xerrors"
) )
func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api, true); err != nil { // Sanity check state
return handleErrors(ctx, err, sector) return handleErrors(ctx, err, sector)
} }
out, err := m.sealer.ReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.pieceInfos()) out, err := m.sealer.ReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.pieceInfos())
@ -31,13 +35,28 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect
if sector.CommR == nil { if sector.CommR == nil {
return xerrors.Errorf("invalid sector %d with nil CommR", sector.SectorNumber) return xerrors.Errorf("invalid sector %d with nil CommR", sector.SectorNumber)
} }
// Abort upgrade for sectors that went faulty since being marked for upgrade
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
log.Errorf("handleProveReplicaUpdate: api error, not proceeding: %+v", err)
return nil
}
active, err := sectorActive(ctx.Context(), m.Api, m.maddr, tok, sector.SectorNumber)
if err != nil {
log.Errorf("sector active check: api error, not proceeding: %+v", err)
return nil
}
if !active {
log.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber)
return ctx.Send(SectorAbortUpgrade{})
}
vanillaProofs, err := m.sealer.ProveReplicaUpdate1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed) vanillaProofs, err := m.sealer.ProveReplicaUpdate1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed)
if err != nil { if err != nil {
return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (1) failed: %w", err)}) return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (1) failed: %w", err)})
} }
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api, true); err != nil { // Sanity check state
return handleErrors(ctx, err, sector) return handleErrors(ctx, err, sector)
} }
@ -59,10 +78,6 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec
return nil return nil
} }
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state
return handleErrors(ctx, err, sector)
}
if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil { if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil {
return ctx.Send(SectorSubmitReplicaUpdateFailed{}) return ctx.Send(SectorSubmitReplicaUpdateFailed{})
} }
@ -196,16 +211,76 @@ func (m *Sealing) handleReplicaUpdateWait(ctx statemachine.Context, sector Secto
} }
if !si.SealedCID.Equals(*sector.UpdateSealed) { if !si.SealedCID.Equals(*sector.UpdateSealed) {
log.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID) return ctx.Send(SectorAbortUpgrade{xerrors.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID)})
return ctx.Send(SectorAbortUpgrade{})
} }
return ctx.Send(SectorReplicaUpdateLanded{}) return ctx.Send(SectorReplicaUpdateLanded{})
} }
func (m *Sealing) handleFinalizeReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) handleFinalizeReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error {
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting sealing config: %w", err)
}
if err := m.sealer.FinalizeReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(false, cfg.AlwaysKeepUnsealedCopy)); err != nil {
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)})
}
return ctx.Send(SectorFinalized{}) return ctx.Send(SectorFinalized{})
} }
func (m *Sealing) handleUpdateActivating(ctx statemachine.Context, sector SectorInfo) error {
try := func() error {
mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.ReplicaUpdateMessage)
if err != nil {
return err
}
tok, _, err := m.Api.ChainHead(ctx.Context())
if err != nil {
return err
}
nv, err := m.Api.StateNetworkVersion(ctx.Context(), tok)
if err != nil {
return err
}
lb := policy.GetWinningPoStSectorSetLookback(nv)
targetHeight := mw.Height + lb + InteractivePoRepConfidence
return m.events.ChainAt(func(context.Context, TipSetToken, abi.ChainEpoch) error {
return ctx.Send(SectorUpdateActive{})
}, func(ctx context.Context, ts TipSetToken) error {
log.Warn("revert in handleUpdateActivating")
return nil
}, InteractivePoRepConfidence, targetHeight)
}
for {
err := try()
if err == nil {
break
}
log.Errorw("error in handleUpdateActivating", "error", err)
// likely an API issue, sleep for a bit and retry
time.Sleep(time.Minute)
}
return nil
}
func (m *Sealing) handleReleaseSectorKey(ctx statemachine.Context, sector SectorInfo) error {
if err := m.sealer.ReleaseSectorKey(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil {
return ctx.Send(SectorReleaseKeyFailed{err})
}
return ctx.Send(SectorKeyReleased{})
}
func handleErrors(ctx statemachine.Context, err error, sector SectorInfo) error { func handleErrors(ctx statemachine.Context, err error, sector SectorInfo) error {
switch err.(type) { switch err.(type) {
case *ErrApi: case *ErrApi:

View File

@ -198,7 +198,7 @@ func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) e
} }
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api, false); err != nil { // Sanity check state
switch err.(type) { switch err.(type) {
case *ErrApi: case *ErrApi:
log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err) log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err)

View File

@ -3,6 +3,7 @@ package sealing
import ( import (
"context" "context"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
@ -86,19 +87,11 @@ func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) e
return xerrors.Errorf("failed to read sector on chain info: %w", err) return xerrors.Errorf("failed to read sector on chain info: %w", err)
} }
active, err := m.Api.StateMinerActiveSectors(ctx, m.maddr, tok) active, err := sectorActive(ctx, m.Api, m.maddr, tok, id)
if err != nil { if err != nil {
return xerrors.Errorf("failed to check active sectors: %w", err) return xerrors.Errorf("failed to check if sector is active")
} }
// Ensure the upgraded sector is active if !active {
var found bool
for _, si := range active {
if si.SectorNumber == id {
found = true
break
}
}
if !found {
return xerrors.Errorf("cannot mark inactive sector for upgrade") return xerrors.Errorf("cannot mark inactive sector for upgrade")
} }
@ -110,6 +103,22 @@ func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) e
return m.sectors.Send(uint64(id), SectorStartCCUpdate{}) return m.sectors.Send(uint64(id), SectorStartCCUpdate{})
} }
func sectorActive(ctx context.Context, api SealingAPI, maddr address.Address, tok TipSetToken, sector abi.SectorNumber) (bool, error) {
active, err := api.StateMinerActiveSectors(ctx, maddr, tok)
if err != nil {
return false, xerrors.Errorf("failed to check active sectors: %w", err)
}
// Check if sector is among active sectors
var found bool
for _, si := range active {
if si.SectorNumber == sector {
found = true
break
}
}
return found, nil
}
func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreCommitInfo) big.Int { func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreCommitInfo) big.Int {
if len(params.DealIDs) == 0 { if len(params.DealIDs) == 0 {
return big.Zero() return big.Zero()

14
go.mod
View File

@ -39,7 +39,7 @@ require (
github.com/filecoin-project/go-fil-markets v1.19.0 github.com/filecoin-project/go-fil-markets v1.19.0
github.com/filecoin-project/go-jsonrpc v0.1.5 github.com/filecoin-project/go-jsonrpc v0.1.5
github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-padreader v0.0.1
github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 github.com/filecoin-project/go-paramfetch v0.0.4
github.com/filecoin-project/go-state-types v0.1.3 github.com/filecoin-project/go-state-types v0.1.3
github.com/filecoin-project/go-statemachine v1.0.1 github.com/filecoin-project/go-statemachine v1.0.1
github.com/filecoin-project/go-statestore v0.2.0 github.com/filecoin-project/go-statestore v0.2.0
@ -51,7 +51,7 @@ require (
github.com/filecoin-project/specs-actors/v5 v5.0.4 github.com/filecoin-project/specs-actors/v5 v5.0.4
github.com/filecoin-project/specs-actors/v6 v6.0.1 github.com/filecoin-project/specs-actors/v6 v6.0.1
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 github.com/filecoin-project/specs-storage v0.2.0
github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.1 github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gdamore/tcell/v2 v2.2.0 github.com/gdamore/tcell/v2 v2.2.0
@ -108,7 +108,7 @@ require (
github.com/kelseyhightower/envconfig v1.4.0 github.com/kelseyhightower/envconfig v1.4.0
github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-buffer-pool v0.0.2
github.com/libp2p/go-eventbus v0.2.1 github.com/libp2p/go-eventbus v0.2.1
github.com/libp2p/go-libp2p v0.18.0-rc1 github.com/libp2p/go-libp2p v0.18.0-rc4
github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect
github.com/libp2p/go-libp2p-core v0.14.0 github.com/libp2p/go-libp2p-core v0.14.0
github.com/libp2p/go-libp2p-discovery v0.6.0 github.com/libp2p/go-libp2p-discovery v0.6.0
@ -116,13 +116,13 @@ require (
github.com/libp2p/go-libp2p-noise v0.3.0 github.com/libp2p/go-libp2p-noise v0.3.0
github.com/libp2p/go-libp2p-peerstore v0.6.0 github.com/libp2p/go-libp2p-peerstore v0.6.0
github.com/libp2p/go-libp2p-pubsub v0.6.1 github.com/libp2p/go-libp2p-pubsub v0.6.1
github.com/libp2p/go-libp2p-quic-transport v0.16.0 github.com/libp2p/go-libp2p-quic-transport v0.16.1
github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-record v0.1.3
github.com/libp2p/go-libp2p-resource-manager v0.1.2 github.com/libp2p/go-libp2p-resource-manager v0.1.3
github.com/libp2p/go-libp2p-routing-helpers v0.2.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3
github.com/libp2p/go-libp2p-swarm v0.10.0 github.com/libp2p/go-libp2p-swarm v0.10.1
github.com/libp2p/go-libp2p-tls v0.3.1 github.com/libp2p/go-libp2p-tls v0.3.1
github.com/libp2p/go-libp2p-yamux v0.8.0 github.com/libp2p/go-libp2p-yamux v0.8.2
github.com/libp2p/go-maddr-filter v0.1.0 github.com/libp2p/go-maddr-filter v0.1.0
github.com/mattn/go-isatty v0.0.14 github.com/mattn/go-isatty v0.0.14
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1

28
go.sum
View File

@ -342,8 +342,8 @@ github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.m
github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ=
github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs=
github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ=
github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 h1:+nripp+UI/rhl01w9Gs4V0XDGaVPYPMGU/D/gNVLue0= github.com/filecoin-project/go-paramfetch v0.0.4 h1:H+Me8EL8T5+79z/KHYQQcT8NVOzYVqXIi7nhb48tdm8=
github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-paramfetch v0.0.4/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts=
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
@ -382,8 +382,8 @@ github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/g
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 h1:FuDaXIbcw2hRsFI8SDTmsGGCE+NumpF6aiBoU/2X5W4= github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 h1:FuDaXIbcw2hRsFI8SDTmsGGCE+NumpF6aiBoU/2X5W4=
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M= github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 h1:oUYOvF7EvdXS0Zmk9mNkaB6Bu0l+WXBYPzVodKMiLug= github.com/filecoin-project/specs-storage v0.2.0 h1:Y4UDv0apRQ3zI2GiPPubi8JblpUZZphEdaJUxCutfyg=
github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU= github.com/filecoin-project/specs-storage v0.2.0/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
@ -994,8 +994,9 @@ github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2
github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM=
github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4=
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
github.com/libp2p/go-libp2p v0.18.0-rc1 h1:CFHROLGmMwe/p8tR3sHahg/1NSaZa2EGbu7nDmdC+RY=
github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8= github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8=
github.com/libp2p/go-libp2p v0.18.0-rc4 h1:OUsSbeu7q+Ck/bV9wHDxFzb08ORqBupHhpCmRBhWrJ8=
github.com/libp2p/go-libp2p v0.18.0-rc4/go.mod h1:wzmsk1ioOq9FGQys2BN5BIw4nugP6+R+CyW3JbPEbbs=
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E=
github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I=
@ -1147,8 +1148,9 @@ github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p
github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc=
github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ=
github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ=
github.com/libp2p/go-libp2p-quic-transport v0.16.0 h1:aVg9/jr+R2esov5sH7wkXrmYmqJiUjtLMLYX3L9KYdY=
github.com/libp2p/go-libp2p-quic-transport v0.16.0/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= github.com/libp2p/go-libp2p-quic-transport v0.16.0/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ=
github.com/libp2p/go-libp2p-quic-transport v0.16.1 h1:N/XqYXHurphPLDfXYhll8NyqzdZYQqAF4GIr7+SmLV8=
github.com/libp2p/go-libp2p-quic-transport v0.16.1/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ=
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q=
github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg=
@ -1156,8 +1158,8 @@ github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGd
github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0=
github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4=
github.com/libp2p/go-libp2p-resource-manager v0.1.0/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= github.com/libp2p/go-libp2p-resource-manager v0.1.0/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y=
github.com/libp2p/go-libp2p-resource-manager v0.1.2 h1:t66B/6EF6ivWEUgvO34NKOT3oPtkb+JTBJHdsIMx+mg= github.com/libp2p/go-libp2p-resource-manager v0.1.3 h1:Umf0tW6WNXSb6Uoma0YT56azB5iikL/aeGAP7s7+f5o=
github.com/libp2p/go-libp2p-resource-manager v0.1.2/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= github.com/libp2p/go-libp2p-resource-manager v0.1.3/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y=
github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys=
github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE=
github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY=
@ -1178,8 +1180,9 @@ github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat
github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8=
github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc=
github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8=
github.com/libp2p/go-libp2p-swarm v0.10.0 h1:1yr7UCwxCN92cw9g9Q+fnJSlk7lOB1RetoEewxhGVL0=
github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA= github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA=
github.com/libp2p/go-libp2p-swarm v0.10.1 h1:lXW3pgGt+BVmkzcFX61erX7l6Lt+WAamNhwa2Kf3eJM=
github.com/libp2p/go-libp2p-swarm v0.10.1/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs=
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
@ -1209,8 +1212,9 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZb
github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk=
github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo=
github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo=
github.com/libp2p/go-libp2p-transport-upgrader v0.7.0 h1:ADnLrL7fC4Vy7HPjk9oGof7nDeTqGXuof85Ar6kin9Q=
github.com/libp2p/go-libp2p-transport-upgrader v0.7.0/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= github.com/libp2p/go-libp2p-transport-upgrader v0.7.0/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg=
github.com/libp2p/go-libp2p-transport-upgrader v0.7.1 h1:MSMe+tUfxpC9GArTz7a4G5zQKQgGh00Vio87d3j3xIg=
github.com/libp2p/go-libp2p-transport-upgrader v0.7.1/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg=
github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY=
github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8=
github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4=
@ -1225,8 +1229,10 @@ github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLw
github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE=
github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k=
github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08=
github.com/libp2p/go-libp2p-yamux v0.8.0 h1:APQYlttIj+Rr5sfa6siojwsi0ZwcIh/exHIUl9hZr6o=
github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8= github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8=
github.com/libp2p/go-libp2p-yamux v0.8.1/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE=
github.com/libp2p/go-libp2p-yamux v0.8.2 h1:6GKWntresp0TFxMP/oSoH96nV8XKJRdynXsdp43dn0Y=
github.com/libp2p/go-libp2p-yamux v0.8.2/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE=
github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M=

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -19,6 +20,12 @@ import (
) )
func TestAPI(t *testing.T) { func TestAPI(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_STATE_MINER_INFO_001
t.Run("direct", func(t *testing.T) { t.Run("direct", func(t *testing.T) {
runAPITest(t) runAPITest(t)
}) })
@ -116,11 +123,13 @@ func (ts *apiSuite) testSearchMsg(t *testing.T) {
sm, err := full.MpoolPushMessage(ctx, msg, nil) sm, err := full.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err) require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful") require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful")
//stm: @CHAIN_STATE_SEARCH_MSG_001
searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, searchRes) require.NotNil(t, searchRes)

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -18,25 +19,25 @@ import (
) )
func TestCCUpgrade(t *testing.T) { func TestCCUpgrade(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_STATE_MINER_GET_INFO_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
//stm: @MINER_SECTOR_LIST_001
kit.QuietMiningLogs() kit.QuietMiningLogs()
for _, height := range []abi.ChainEpoch{ runTestCCUpgrade(t)
-1, // before
162, // while sealing
560, // after upgrade deal
} {
height := height // make linters happy by copying
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
runTestCCUpgrade(t, height)
})
}
} }
func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) *kit.TestFullNode { func runTestCCUpgrade(t *testing.T) *kit.TestFullNode {
ctx := context.Background() ctx := context.Background()
blockTime := 1 * time.Millisecond blockTime := 1 * time.Millisecond
client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15)) client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC())
ens.InterconnectAll().BeginMiningMustPost(blockTime) ens.InterconnectAll().BeginMiningMustPost(blockTime)
maddr, err := miner.ActorAddress(ctx) maddr, err := miner.ActorAddress(ctx)
@ -80,6 +81,11 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) *kit.TestFullN
status, err := miner.SectorsStatus(ctx, CCUpgrade, true) status, err := miner.SectorsStatus(ctx, CCUpgrade, true)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, len(status.Deals)) assert.Equal(t, 1, len(status.Deals))
miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{
CCUpgrade: {},
})
return client return client
} }
@ -126,7 +132,7 @@ func TestCCUpgradeAndPoSt(t *testing.T) {
kit.QuietMiningLogs() kit.QuietMiningLogs()
t.Run("upgrade and then post", func(t *testing.T) { t.Run("upgrade and then post", func(t *testing.T) {
ctx := context.Background() ctx := context.Background()
n := runTestCCUpgrade(t, 100) n := runTestCCUpgrade(t)
ts, err := n.ChainHead(ctx) ts, err := n.ChainHead(ctx)
require.NoError(t, err) require.NoError(t, err)
start := ts.Height() start := ts.Height()

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -11,6 +12,11 @@ import (
// TestClient does a basic test to exercise the client CLI commands. // TestClient does a basic test to exercise the client CLI commands.
func TestClient(t *testing.T) { func TestClient(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs() kit.QuietMiningLogs()

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -52,6 +53,13 @@ import (
// * asserts that miner B loses power // * asserts that miner B loses power
// * asserts that miner D loses power, is inactive // * asserts that miner D loses power, is inactive
func TestDeadlineToggling(t *testing.T) { func TestDeadlineToggling(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
//stm: @MINER_SECTOR_LIST_001
kit.Expensive(t) kit.Expensive(t)
kit.QuietMiningLogs() kit.QuietMiningLogs()
@ -108,6 +116,7 @@ func TestDeadlineToggling(t *testing.T) {
{ {
minerC.PledgeSectors(ctx, sectorsC, 0, nil) minerC.PledgeSectors(ctx, sectorsC, 0, nil)
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK) di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK)
require.NoError(t, err) require.NoError(t, err)
@ -127,6 +136,7 @@ func TestDeadlineToggling(t *testing.T) {
expectedPower := types.NewInt(uint64(ssz) * sectorsC) expectedPower := types.NewInt(uint64(ssz) * sectorsC)
//stm: @CHAIN_STATE_MINER_POWER_001
p, err := client.StateMinerPower(ctx, maddrC, types.EmptyTSK) p, err := client.StateMinerPower(ctx, maddrC, types.EmptyTSK)
require.NoError(t, err) require.NoError(t, err)
@ -147,12 +157,14 @@ func TestDeadlineToggling(t *testing.T) {
} }
checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) { checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) {
//stm: @CHAIN_STATE_MINER_POWER_001
p, err := client.StateMinerPower(ctx, ma, tsk) p, err := client.StateMinerPower(ctx, ma, tsk)
require.NoError(t, err) require.NoError(t, err)
// make sure it has the expected power. // make sure it has the expected power.
require.Equal(t, p.MinerPower.RawBytePower, power) require.Equal(t, p.MinerPower.RawBytePower, power)
//stm: @CHAIN_STATE_GET_ACTOR_001
mact, err := client.StateGetActor(ctx, ma, tsk) mact, err := client.StateGetActor(ctx, ma, tsk)
require.NoError(t, err) require.NoError(t, err)
@ -187,6 +199,7 @@ func TestDeadlineToggling(t *testing.T) {
checkMiner(maddrB, types.NewInt(0), true, true, uts.Key()) checkMiner(maddrB, types.NewInt(0), true, true, uts.Key())
} }
//stm: @CHAIN_STATE_NETWORK_VERSION_001
nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK) nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK)
require.NoError(t, err) require.NoError(t, err)
require.GreaterOrEqual(t, nv, network.Version12) require.GreaterOrEqual(t, nv, network.Version12)
@ -246,6 +259,7 @@ func TestDeadlineToggling(t *testing.T) {
}, nil) }, nil)
require.NoError(t, err) require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
r, err := client.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true) r, err := client.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
@ -298,6 +312,7 @@ func TestDeadlineToggling(t *testing.T) {
sectorbit := bitfield.New() sectorbit := bitfield.New()
sectorbit.Set(uint64(sectorNum)) sectorbit.Set(uint64(sectorNum))
//stm: @CHAIN_STATE_SECTOR_PARTITION_001
loca, err := client.StateSectorPartition(ctx, maddrD, sectorNum, types.EmptyTSK) loca, err := client.StateSectorPartition(ctx, maddrD, sectorNum, types.EmptyTSK)
require.NoError(t, err) require.NoError(t, err)
@ -329,6 +344,7 @@ func TestDeadlineToggling(t *testing.T) {
t.Log("sent termination message:", smsg.Cid()) t.Log("sent termination message:", smsg.Cid())
//stm: @CHAIN_STATE_WAIT_MSG_001
r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true) r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -12,6 +13,13 @@ import (
) )
func TestStorageDealMissingBlock(t *testing.T) { func TestStorageDealMissingBlock(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
ctx := context.Background() ctx := context.Background()
// enable 512MiB proofs so we can conduct larger transfers. // enable 512MiB proofs so we can conduct larger transfers.

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -71,6 +72,12 @@ func TestDealWithMarketAndMinerNode(t *testing.T) {
} }
func TestDealCyclesConcurrent(t *testing.T) { func TestDealCyclesConcurrent(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode") t.Skip("skipping test in short mode")
} }

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -12,6 +13,13 @@ import (
) )
func TestMaxStagingDeals(t *testing.T) { func TestMaxStagingDeals(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
ctx := context.Background() ctx := context.Background()
// enable 512MiB proofs so we can conduct larger transfers. // enable 512MiB proofs so we can conduct larger transfers.

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -16,7 +17,13 @@ import (
) )
func TestOfflineDealFlow(t *testing.T) { func TestOfflineDealFlow(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
//stm: @CLIENT_DATA_CALCULATE_COMMP_001, @CLIENT_DATA_GENERATE_CAR_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001
runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) { runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) {
ctx := context.Background() ctx := context.Background()
client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs
@ -60,6 +67,7 @@ func TestOfflineDealFlow(t *testing.T) {
proposalCid := dh.StartDeal(ctx, dp) proposalCid := dh.StartDeal(ctx, dp)
//stm: @CLIENT_STORAGE_DEALS_GET_001
// Wait for the deal to reach StorageDealCheckForAcceptance on the client // Wait for the deal to reach StorageDealCheckForAcceptance on the client
cd, err := client.ClientGetDealInfo(ctx, *proposalCid) cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
require.NoError(t, err) require.NoError(t, err)

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -14,7 +15,13 @@ import (
) )
func TestDealPadding(t *testing.T) { func TestDealPadding(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
//stm: @CLIENT_DATA_GET_DEAL_PIECE_CID_001
kit.QuietMiningLogs() kit.QuietMiningLogs()
var blockTime = 250 * time.Millisecond var blockTime = 250 * time.Millisecond
@ -58,6 +65,7 @@ func TestDealPadding(t *testing.T) {
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second) time.Sleep(time.Second)
//stm: @CLIENT_STORAGE_DEALS_GET_001
di, err := client.ClientGetDealInfo(ctx, *proposalCid) di, err := client.ClientGetDealInfo(ctx, *proposalCid)
require.NoError(t, err) require.NoError(t, err)
require.True(t, di.PieceCID.Equals(pcid)) require.True(t, di.PieceCID.Equals(pcid))

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -38,7 +39,13 @@ var (
) )
func TestPartialRetrieval(t *testing.T) { func TestPartialRetrieval(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
//stm: @CLIENT_RETRIEVAL_RETRIEVE_001
ctx := context.Background() ctx := context.Background()
policy.SetPreCommitChallengeDelay(2) policy.SetPreCommitChallengeDelay(2)

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -9,6 +10,12 @@ import (
) )
func TestFirstDealEnablesMining(t *testing.T) { func TestFirstDealEnablesMining(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
// test making a deal with a fresh miner, and see if it starts to mine. // test making a deal with a fresh miner, and see if it starts to mine.
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode") t.Skip("skipping test in short mode")

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -12,6 +13,12 @@ import (
) )
func TestQuotePriceForUnsealedRetrieval(t *testing.T) { func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
var ( var (
ctx = context.Background() ctx = context.Background()
blocktime = 50 * time.Millisecond blocktime = 50 * time.Millisecond
@ -43,10 +50,12 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
_, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) _, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
require.Equal(t, res1.Root, res2.Root) require.Equal(t, res1.Root, res2.Root)
//stm: @CLIENT_STORAGE_DEALS_GET_001
// Retrieval // Retrieval
dealInfo, err := client.ClientGetDealInfo(ctx, *deal1) dealInfo, err := client.ClientGetDealInfo(ctx, *deal1)
require.NoError(t, err) require.NoError(t, err)
//stm: @CLIENT_RETRIEVAL_FIND_001
// fetch quote -> zero for unsealed price since unsealed file already exists. // fetch quote -> zero for unsealed price since unsealed file already exists.
offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
require.NoError(t, err) require.NoError(t, err)
@ -56,11 +65,13 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64()) require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64())
// remove ONLY one unsealed file // remove ONLY one unsealed file
//stm: @STORAGE_LIST_001, @MINER_SECTOR_LIST_001
ss, err := miner.StorageList(context.Background()) ss, err := miner.StorageList(context.Background())
require.NoError(t, err) require.NoError(t, err)
_, err = miner.SectorsList(ctx) _, err = miner.SectorsList(ctx)
require.NoError(t, err) require.NoError(t, err)
//stm: @STORAGE_DROP_SECTOR_001, @STORAGE_LIST_001
iLoop: iLoop:
for storeID, sd := range ss { for storeID, sd := range ss {
for _, sector := range sd { for _, sector := range sd {
@ -70,6 +81,7 @@ iLoop:
} }
} }
//stm: @CLIENT_RETRIEVAL_FIND_001
// get retrieval quote -> zero for unsealed price as unsealed file exists. // get retrieval quote -> zero for unsealed price as unsealed file exists.
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
require.NoError(t, err) require.NoError(t, err)
@ -89,6 +101,7 @@ iLoop:
} }
} }
//stm: @CLIENT_RETRIEVAL_FIND_001
// fetch quote -> non-zero for unseal price as we no more unsealed files. // fetch quote -> non-zero for unseal price as we no more unsealed files.
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
require.NoError(t, err) require.NoError(t, err)
@ -100,6 +113,10 @@ iLoop:
} }
func TestZeroPricePerByteRetrieval(t *testing.T) { func TestZeroPricePerByteRetrieval(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode") t.Skip("skipping test in short mode")
} }

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -23,6 +24,12 @@ import (
) )
func TestPublishDealsBatching(t *testing.T) { func TestPublishDealsBatching(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
var ( var (
ctx = context.Background() ctx = context.Background()
publishPeriod = 10 * time.Second publishPeriod = 10 * time.Second
@ -103,6 +110,7 @@ func TestPublishDealsBatching(t *testing.T) {
} }
// Expect a single PublishStorageDeals message that includes the first two deals // Expect a single PublishStorageDeals message that includes the first two deals
//stm: @CHAIN_STATE_LIST_MESSAGES_001
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1) msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
require.NoError(t, err) require.NoError(t, err)
count := 0 count := 0

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -26,6 +27,13 @@ var (
) )
func TestDealsRetryLackOfFunds(t *testing.T) { func TestDealsRetryLackOfFunds(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
ctx := context.Background() ctx := context.Background()
oldDelay := policy.GetPreCommitChallengeDelay() oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5) policy.SetPreCommitChallengeDelay(5)
@ -105,6 +113,11 @@ func TestDealsRetryLackOfFunds(t *testing.T) {
} }
func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) { func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
ctx := context.Background() ctx := context.Background()
oldDelay := policy.GetPreCommitChallengeDelay() oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5) policy.SetPreCommitChallengeDelay(5)
@ -181,6 +194,11 @@ func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) {
} }
func TestDealsRetryLackOfFunds_belowLimit(t *testing.T) { func TestDealsRetryLackOfFunds_belowLimit(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
ctx := context.Background() ctx := context.Background()
oldDelay := policy.GetPreCommitChallengeDelay() oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5) policy.SetPreCommitChallengeDelay(5)

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -9,6 +10,12 @@ import (
) )
func TestDealsWithSealingAndRPC(t *testing.T) { func TestDealsWithSealingAndRPC(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode") t.Skip("skipping test in short mode")
} }

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -38,6 +39,12 @@ const (
// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite // TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite
// node that is connected through a gateway to a full API node // node that is connected through a gateway to a full API node
func TestGatewayWalletMsig(t *testing.T) { func TestGatewayWalletMsig(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
kit.QuietMiningLogs() kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond blocktime := 5 * time.Millisecond
@ -116,6 +123,7 @@ func TestGatewayWalletMsig(t *testing.T) {
addProposal, err := doSend(proto) addProposal, err := doSend(proto)
require.NoError(t, err) require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
res, err := lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) res, err := lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode) require.EqualValues(t, 0, res.Receipt.ExitCode)
@ -127,6 +135,7 @@ func TestGatewayWalletMsig(t *testing.T) {
// Get available balance of msig: should be greater than zero and less // Get available balance of msig: should be greater than zero and less
// than initial amount // than initial amount
msig := execReturn.IDAddress msig := execReturn.IDAddress
//stm: @CHAIN_STATE_MINER_AVAILABLE_BALANCE_001
msigBalance, err := lite.MsigGetAvailableBalance(ctx, msig, types.EmptyTSK) msigBalance, err := lite.MsigGetAvailableBalance(ctx, msig, types.EmptyTSK)
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, msigBalance.Int64(), int64(0)) require.Greater(t, msigBalance.Int64(), int64(0))
@ -139,6 +148,7 @@ func TestGatewayWalletMsig(t *testing.T) {
addProposal, err = doSend(proto) addProposal, err = doSend(proto)
require.NoError(t, err) require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
res, err = lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) res, err = lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode) require.EqualValues(t, 0, res.Receipt.ExitCode)
@ -156,6 +166,7 @@ func TestGatewayWalletMsig(t *testing.T) {
approval1, err := doSend(proto) approval1, err := doSend(proto)
require.NoError(t, err) require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
res, err = lite.StateWaitMsg(ctx, approval1, 1, api.LookbackNoLimit, true) res, err = lite.StateWaitMsg(ctx, approval1, 1, api.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode) require.EqualValues(t, 0, res.Receipt.ExitCode)
@ -169,6 +180,10 @@ func TestGatewayWalletMsig(t *testing.T) {
// TestGatewayMsigCLI tests that msig CLI calls can be made // TestGatewayMsigCLI tests that msig CLI calls can be made
// on a lite node that is connected through a gateway to a full API node // on a lite node that is connected through a gateway to a full API node
func TestGatewayMsigCLI(t *testing.T) { func TestGatewayMsigCLI(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
kit.QuietMiningLogs() kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond blocktime := 5 * time.Millisecond
@ -180,6 +195,10 @@ func TestGatewayMsigCLI(t *testing.T) {
} }
func TestGatewayDealFlow(t *testing.T) { func TestGatewayDealFlow(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
kit.QuietMiningLogs() kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond blocktime := 5 * time.Millisecond
@ -202,6 +221,10 @@ func TestGatewayDealFlow(t *testing.T) {
} }
func TestGatewayCLIDealFlow(t *testing.T) { func TestGatewayCLIDealFlow(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
kit.QuietMiningLogs() kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond blocktime := 5 * time.Millisecond

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -16,6 +17,12 @@ import (
) )
func TestChainGetMessagesInTs(t *testing.T) { func TestChainGetMessagesInTs(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
ctx := context.Background() ctx := context.Background()
kit.QuietMiningLogs() kit.QuietMiningLogs()
@ -84,6 +91,7 @@ func TestChainGetMessagesInTs(t *testing.T) {
} }
for _, sm := range sms { for _, sm := range sms {
//stm: @CHAIN_STATE_WAIT_MSG_001
msgLookup, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) msgLookup, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)

View File

@ -87,7 +87,10 @@ type TestMiner struct {
func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) { func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) {
toCheck := tm.StartPledge(ctx, n, existing, blockNotif) toCheck := tm.StartPledge(ctx, n, existing, blockNotif)
tm.WaitSectorsProving(ctx, toCheck)
}
func (tm *TestMiner) WaitSectorsProving(ctx context.Context, toCheck map[abi.SectorNumber]struct{}) {
for len(toCheck) > 0 { for len(toCheck) > 0 {
tm.FlushSealingBatches(ctx) tm.FlushSealingBatches(ctx)
@ -105,9 +108,8 @@ func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNo
} }
build.Clock.Sleep(100 * time.Millisecond) build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) fmt.Printf("WaitSectorsProving: %d %+v\n", len(toCheck), states)
} }
} }
func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} {

View File

@ -39,6 +39,7 @@ func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode {
require.NoError(t, err) require.NoError(t, err)
srv, maddr := CreateRPCServer(t, handler, l) srv, maddr := CreateRPCServer(t, handler, l)
fmt.Printf("FULLNODE RPC ENV FOR CLI DEBUGGING `export FULLNODE_API_INFO=%s`\n", "ws://"+srv.Listener.Addr().String())
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil) cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
require.NoError(t, err) require.NoError(t, err)
@ -54,7 +55,9 @@ func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
srv, maddr := CreateRPCServer(t, handler, m.RemoteListener) srv, maddr := CreateRPCServer(t, handler, m.RemoteListener)
fmt.Println("creating RPC server for", m.ActorAddr, "at: ", srv.Listener.Addr().String()) fmt.Printf("creating RPC server for %s at %s\n", m.ActorAddr, srv.Listener.Addr().String())
fmt.Printf("SP RPC ENV FOR CLI DEBUGGING `export MINER_API_INFO=%s`\n", "ws://"+srv.Listener.Addr().String())
url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0" url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0"
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), url, nil) cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), url, nil)
require.NoError(t, err) require.NoError(t, err)

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -10,6 +11,12 @@ import (
// TestMultisig does a basic test to exercise the multisig CLI commands // TestMultisig does a basic test to exercise the multisig CLI commands
func TestMultisig(t *testing.T) { func TestMultisig(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
kit.QuietMiningLogs() kit.QuietMiningLogs()
blockTime := 5 * time.Millisecond blockTime := 5 * time.Millisecond

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -13,6 +14,12 @@ import (
) )
func TestNonceIncremental(t *testing.T) { func TestNonceIncremental(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
ctx := context.Background() ctx := context.Background()
kit.QuietMiningLogs() kit.QuietMiningLogs()
@ -51,6 +58,7 @@ func TestNonceIncremental(t *testing.T) {
} }
for _, sm := range sms { for _, sm := range sms {
//stm: @CHAIN_STATE_WAIT_MSG_001
_, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) _, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -27,6 +28,12 @@ import (
) )
func TestPaymentChannelsAPI(t *testing.T) { func TestPaymentChannelsAPI(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
kit.QuietMiningLogs() kit.QuietMiningLogs()
ctx := context.Background() ctx := context.Background()
@ -110,6 +117,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
preds := state.NewStatePredicates(paymentCreator) preds := state.NewStatePredicates(paymentCreator)
finished := make(chan struct{}) finished := make(chan struct{})
//stm: @CHAIN_STATE_GET_ACTOR_001
err = ev.StateChanged(func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { err = ev.StateChanged(func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) {
act, err := paymentCreator.StateGetActor(ctx, channel, ts.Key()) act, err := paymentCreator.StateGetActor(ctx, channel, ts.Key())
if err != nil { if err != nil {
@ -185,6 +193,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel) collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
require.NoError(t, err) require.NoError(t, err)
//stm: @CHAIN_STATE_WAIT_MSG_001
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true) res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
require.NoError(t, err) require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel") require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel")

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -30,6 +31,12 @@ import (
// TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI // TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI
// commands // commands
func TestPaymentChannelsBasic(t *testing.T) { func TestPaymentChannelsBasic(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs() kit.QuietMiningLogs()
@ -87,6 +94,10 @@ type voucherSpec struct {
// TestPaymentChannelStatus tests the payment channel status CLI command // TestPaymentChannelStatus tests the payment channel status CLI command
func TestPaymentChannelStatus(t *testing.T) { func TestPaymentChannelStatus(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs() kit.QuietMiningLogs()
@ -167,6 +178,12 @@ func TestPaymentChannelStatus(t *testing.T) {
// TestPaymentChannelVouchers does a basic test to exercise some payment // TestPaymentChannelVouchers does a basic test to exercise some payment
// channel voucher commands // channel voucher commands
func TestPaymentChannelVouchers(t *testing.T) { func TestPaymentChannelVouchers(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs() kit.QuietMiningLogs()
@ -299,6 +316,12 @@ func TestPaymentChannelVouchers(t *testing.T) {
// TestPaymentChannelVoucherCreateShortfall verifies that if a voucher amount // TestPaymentChannelVoucherCreateShortfall verifies that if a voucher amount
// is greater than what's left in the channel, voucher create fails // is greater than what's left in the channel, voucher create fails
func TestPaymentChannelVoucherCreateShortfall(t *testing.T) { func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs() kit.QuietMiningLogs()

View File

@ -1,3 +1,4 @@
//stm: #integration
package itests package itests
import ( import (
@ -17,6 +18,15 @@ import (
) )
func TestSDRUpgrade(t *testing.T) { func TestSDRUpgrade(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
//stm: @CHAIN_STATE_NETWORK_VERSION_001
//stm: @MINER_SECTOR_LIST_001
kit.QuietMiningLogs() kit.QuietMiningLogs()
// oldDelay := policy.GetPreCommitChallengeDelay() // oldDelay := policy.GetPreCommitChallengeDelay()

Some files were not shown because too many files have changed in this diff Show More