diff --git a/.circleci/config.yml b/.circleci/config.yml index 53611d565..1614daf8e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -44,13 +44,13 @@ commands: - restore_cache: name: Restore parameters cache keys: - - 'v25-2k-lotus-params' + - 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ - run: ./lotus fetch-params 2048 - save_cache: name: Save parameters cache - key: 'v25-2k-lotus-params' + key: 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ install_ipfs: diff --git a/.circleci/template.yml b/.circleci/template.yml index ef6818c6d..8f5995d56 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -44,13 +44,13 @@ commands: - restore_cache: name: Restore parameters cache keys: - - 'v25-2k-lotus-params' + - 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ - run: ./lotus fetch-params 2048 - save_cache: name: Save parameters cache - key: 'v25-2k-lotus-params' + key: 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ install_ipfs: diff --git a/api/api_storage.go b/api/api_storage.go index c032a8e1b..a66f22d04 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -113,6 +113,8 @@ type StorageMiner interface { // SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin + // SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it + SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin // WorkerConnect tells the node to connect to workers RPC WorkerConnect(context.Context, string) error //perm:admin retry:true @@ -130,6 +132,7 @@ type StorageMiner interface { ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true diff --git a/api/api_worker.go b/api/api_worker.go index 68d8e7baf..ba50a9459 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -39,6 +39,7 @@ type Worker interface { SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin + FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 9615181f5..c90ec4cdd 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -723,6 +723,8 @@ type StorageMinerStruct struct { ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnFinalizeReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` @@ -755,6 +757,8 @@ type StorageMinerStruct struct { SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` + SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` + SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"` SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` @@ -872,6 +876,8 @@ type WorkerStruct struct { Fetch func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"` + FinalizeReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` + FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"` @@ -4280,6 +4286,17 @@ func (s *StorageMinerStub) ReturnFetch(p0 context.Context, p1 storiface.CallID, return ErrNotSupported } +func (s *StorageMinerStruct) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnFinalizeReplicaUpdate == nil { + return ErrNotSupported + } + return s.Internal.ReturnFinalizeReplicaUpdate(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { if s.Internal.ReturnFinalizeSector == nil { return ErrNotSupported @@ -4456,6 +4473,17 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf return nil, ErrNotSupported } +func (s *StorageMinerStruct) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error { + if s.Internal.SectorAbortUpgrade == nil { + return ErrNotSupported + } + return s.Internal.SectorAbortUpgrade(p0, p1) +} + +func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) { if s.Internal.SectorAddPieceToAny == nil { return *new(SectorOffset), ErrNotSupported @@ -5006,6 +5034,17 @@ func (s *WorkerStub) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storifac return *new(storiface.CallID), ErrNotSupported } +func (s *WorkerStruct) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + if s.Internal.FinalizeReplicaUpdate == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.FinalizeReplicaUpdate(p0, p1, p2) +} + +func (s *WorkerStub) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { if s.Internal.FinalizeSector == nil { return *new(storiface.CallID), ErrNotSupported diff --git a/blockstore/splitstore/README.md b/blockstore/splitstore/README.md index f69a056ca..1490004cf 100644 --- a/blockstore/splitstore/README.md +++ b/blockstore/splitstore/README.md @@ -49,10 +49,11 @@ These are options in the `[Chainstore.Splitstore]` section of the configuration: blockstore and discards writes; this is necessary to support syncing from a snapshot. - `MarkSetType` -- specifies the type of markset to use during compaction. The markset is the data structure used by compaction/gc to track live objects. - The default value is `"map"`, which will use an in-memory map; if you are limited - in memory (or indeed see compaction run out of memory), you can also specify - `"badger"` which will use an disk backed markset, using badger. This will use - much less memory, but will also make compaction slower. + The default value is "badger", which will use a disk backed markset using badger. + If you have a lot of memory (48G or more) you can also use "map", which will use + an in memory markset, speeding up compaction at the cost of higher memory usage. + Note: If you are using a VPS with a network volume, you need to provision at least + 3000 IOPs with the badger markset. - `HotStoreMessageRetention` -- specifies how many finalities, beyond the 4 finalities maintained by default, to maintain messages and message receipts in the hotstore. This is useful for assistive nodes that want to support syncing for other @@ -105,6 +106,12 @@ Compaction works transactionally with the following algorithm: - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live - We then end the transaction and compact/gc the hotstore. +As of [#8008](https://github.com/filecoin-project/lotus/pull/8008) the compaction algorithm has been +modified to eliminate sorting and maintain the cold object set on disk. This drastically reduces +memory usage; in fact, when using badger as the markset compaction uses very little memory, and +it should be now possible to run splitstore with 32GB of RAM or less without danger of running out of +memory during compaction. + ## Garbage Collection TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577) diff --git a/blockstore/splitstore/checkpoint.go b/blockstore/splitstore/checkpoint.go new file mode 100644 index 000000000..d3cd4cba7 --- /dev/null +++ b/blockstore/splitstore/checkpoint.go @@ -0,0 +1,118 @@ +package splitstore + +import ( + "bufio" + "io" + "os" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +type Checkpoint struct { + file *os.File + buf *bufio.Writer +} + +func NewCheckpoint(path string) (*Checkpoint, error) { + file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_SYNC, 0644) + if err != nil { + return nil, xerrors.Errorf("error creating checkpoint: %w", err) + } + buf := bufio.NewWriter(file) + + return &Checkpoint{ + file: file, + buf: buf, + }, nil +} + +func OpenCheckpoint(path string) (*Checkpoint, cid.Cid, error) { + filein, err := os.Open(path) + if err != nil { + return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for reading: %w", err) + } + defer filein.Close() //nolint:errcheck + + bufin := bufio.NewReader(filein) + start, err := readRawCid(bufin, nil) + if err != nil && err != io.EOF { + return nil, cid.Undef, xerrors.Errorf("error reading cid from checkpoint: %w", err) + } + + fileout, err := os.OpenFile(path, os.O_WRONLY|os.O_SYNC, 0644) + if err != nil { + return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for writing: %w", err) + } + bufout := bufio.NewWriter(fileout) + + return &Checkpoint{ + file: fileout, + buf: bufout, + }, start, nil +} + +func (cp *Checkpoint) Set(c cid.Cid) error { + if _, err := cp.file.Seek(0, io.SeekStart); err != nil { + return xerrors.Errorf("error seeking beginning of checkpoint: %w", err) + } + + if err := writeRawCid(cp.buf, c, true); err != nil { + return xerrors.Errorf("error writing cid to checkpoint: %w", err) + } + + return nil +} + +func (cp *Checkpoint) Close() error { + if cp.file == nil { + return nil + } + + err := cp.file.Close() + cp.file = nil + cp.buf = nil + + return err +} + +func readRawCid(buf *bufio.Reader, hbuf []byte) (cid.Cid, error) { + sz, err := buf.ReadByte() + if err != nil { + return cid.Undef, err // don't wrap EOF as it is not an error here + } + + if hbuf == nil { + hbuf = make([]byte, int(sz)) + } else { + hbuf = hbuf[:int(sz)] + } + + if _, err := io.ReadFull(buf, hbuf); err != nil { + return cid.Undef, xerrors.Errorf("error reading hash: %w", err) // wrap EOF, it's corrupt + } + + hash, err := mh.Cast(hbuf) + if err != nil { + return cid.Undef, xerrors.Errorf("error casting multihash: %w", err) + } + + return cid.NewCidV1(cid.Raw, hash), nil +} + +func writeRawCid(buf *bufio.Writer, c cid.Cid, flush bool) error { + hash := c.Hash() + if err := buf.WriteByte(byte(len(hash))); err != nil { + return err + } + if _, err := buf.Write(hash); err != nil { + return err + } + if flush { + return buf.Flush() + } + + return nil +} diff --git a/blockstore/splitstore/checkpoint_test.go b/blockstore/splitstore/checkpoint_test.go new file mode 100644 index 000000000..4fefe40cf --- /dev/null +++ b/blockstore/splitstore/checkpoint_test.go @@ -0,0 +1,147 @@ +package splitstore + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +func TestCheckpoint(t *testing.T) { + dir, err := ioutil.TempDir("", "checkpoint.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(dir) + }) + + path := filepath.Join(dir, "checkpoint") + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + cp, err := NewCheckpoint(path) + if err != nil { + t.Fatal(err) + } + + if err := cp.Set(k1); err != nil { + t.Fatal(err) + } + if err := cp.Set(k2); err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err := OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + if !start.Equals(k2) { + t.Fatalf("expected start to be %s; got %s", k2, start) + } + + if err := cp.Set(k3); err != nil { + t.Fatal(err) + } + if err := cp.Set(k4); err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err = OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + if !start.Equals(k4) { + t.Fatalf("expected start to be %s; got %s", k4, start) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + // also test correct operation with an empty checkpoint + cp, err = NewCheckpoint(path) + if err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err = OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + + if start.Defined() { + t.Fatal("expected start to be undefined") + } + + if err := cp.Set(k1); err != nil { + t.Fatal(err) + } + if err := cp.Set(k2); err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err = OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + if !start.Equals(k2) { + t.Fatalf("expected start to be %s; got %s", k2, start) + } + + if err := cp.Set(k3); err != nil { + t.Fatal(err) + } + if err := cp.Set(k4); err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err = OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + if !start.Equals(k4) { + t.Fatalf("expected start to be %s; got %s", k4, start) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + +} diff --git a/blockstore/splitstore/coldset.go b/blockstore/splitstore/coldset.go new file mode 100644 index 000000000..129e2ed92 --- /dev/null +++ b/blockstore/splitstore/coldset.go @@ -0,0 +1,102 @@ +package splitstore + +import ( + "bufio" + "io" + "os" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" +) + +type ColdSetWriter struct { + file *os.File + buf *bufio.Writer +} + +type ColdSetReader struct { + file *os.File + buf *bufio.Reader +} + +func NewColdSetWriter(path string) (*ColdSetWriter, error) { + file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return nil, xerrors.Errorf("error creating coldset: %w", err) + } + buf := bufio.NewWriter(file) + + return &ColdSetWriter{ + file: file, + buf: buf, + }, nil +} + +func NewColdSetReader(path string) (*ColdSetReader, error) { + file, err := os.Open(path) + if err != nil { + return nil, xerrors.Errorf("error opening coldset: %w", err) + } + buf := bufio.NewReader(file) + + return &ColdSetReader{ + file: file, + buf: buf, + }, nil +} + +func (s *ColdSetWriter) Write(c cid.Cid) error { + return writeRawCid(s.buf, c, false) +} + +func (s *ColdSetWriter) Close() error { + if s.file == nil { + return nil + } + + err1 := s.buf.Flush() + err2 := s.file.Close() + s.buf = nil + s.file = nil + + if err1 != nil { + return err1 + } + return err2 +} + +func (s *ColdSetReader) ForEach(f func(cid.Cid) error) error { + hbuf := make([]byte, 256) + for { + next, err := readRawCid(s.buf, hbuf) + if err != nil { + if err == io.EOF { + return nil + } + + return xerrors.Errorf("error reading coldset: %w", err) + } + + if err := f(next); err != nil { + return err + } + } +} + +func (s *ColdSetReader) Reset() error { + _, err := s.file.Seek(0, io.SeekStart) + return err +} + +func (s *ColdSetReader) Close() error { + if s.file == nil { + return nil + } + + err := s.file.Close() + s.file = nil + s.buf = nil + + return err +} diff --git a/blockstore/splitstore/coldset_test.go b/blockstore/splitstore/coldset_test.go new file mode 100644 index 000000000..60216ebd4 --- /dev/null +++ b/blockstore/splitstore/coldset_test.go @@ -0,0 +1,99 @@ +package splitstore + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +func TestColdSet(t *testing.T) { + dir, err := ioutil.TempDir("", "coldset.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(dir) + }) + + path := filepath.Join(dir, "coldset") + + makeCid := func(i int) cid.Cid { + h, err := multihash.Sum([]byte(fmt.Sprintf("cid.%d", i)), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + const count = 1000 + cids := make([]cid.Cid, 0, count) + for i := 0; i < count; i++ { + cids = append(cids, makeCid(i)) + } + + cw, err := NewColdSetWriter(path) + if err != nil { + t.Fatal(err) + } + + for _, c := range cids { + if err := cw.Write(c); err != nil { + t.Fatal(err) + } + } + + if err := cw.Close(); err != nil { + t.Fatal(err) + } + + cr, err := NewColdSetReader(path) + if err != nil { + t.Fatal(err) + } + + index := 0 + err = cr.ForEach(func(c cid.Cid) error { + if index >= count { + t.Fatal("too many cids") + } + + if !c.Equals(cids[index]) { + t.Fatalf("wrong cid %d; expected %s but got %s", index, cids[index], c) + } + + index++ + return nil + }) + if err != nil { + t.Fatal(err) + } + + if err := cr.Reset(); err != nil { + t.Fatal(err) + } + + index = 0 + err = cr.ForEach(func(c cid.Cid) error { + if index >= count { + t.Fatal("too many cids") + } + + if !c.Equals(cids[index]) { + t.Fatalf("wrong cid; expected %s but got %s", cids[index], c) + } + + index++ + return nil + }) + if err != nil { + t.Fatal(err) + } + +} diff --git a/blockstore/splitstore/markset.go b/blockstore/splitstore/markset.go index 218681e13..e67494538 100644 --- a/blockstore/splitstore/markset.go +++ b/blockstore/splitstore/markset.go @@ -10,39 +10,36 @@ import ( var errMarkSetClosed = errors.New("markset closed") -// MarkSet is a utility to keep track of seen CID, and later query for them. -// -// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt). -// * If a probabilistic result is acceptable, it can be backed by a bloom filter +// MarkSet is an interface for tracking CIDs during chain and object walks type MarkSet interface { + ObjectVisitor Mark(cid.Cid) error + MarkMany([]cid.Cid) error Has(cid.Cid) (bool, error) Close() error - SetConcurrent() -} -type MarkSetVisitor interface { - MarkSet - ObjectVisitor + // BeginCriticalSection ensures that the markset is persisted to disk for recovery in case + // of abnormal termination during the critical section span. + BeginCriticalSection() error + // EndCriticalSection ends the critical section span. + EndCriticalSection() } type MarkSetEnv interface { - // Create creates a new markset within the environment. - // name is a unique name for this markset, mapped to the filesystem in disk-backed environments + // New creates a new markset within the environment. + // name is a unique name for this markset, mapped to the filesystem for on-disk persistence. // sizeHint is a hint about the expected size of the markset - Create(name string, sizeHint int64) (MarkSet, error) - // CreateVisitor is like Create, but returns a wider interface that supports atomic visits. - // It may not be supported by some markset types (e.g. bloom). - CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) - // SupportsVisitor returns true if the marksets created by this environment support the visitor interface. - SupportsVisitor() bool + New(name string, sizeHint int64) (MarkSet, error) + // Recover recovers an existing markset persisted on-disk. + Recover(name string) (MarkSet, error) + // Close closes the markset Close() error } func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) { switch mtype { case "map": - return NewMapMarkSetEnv() + return NewMapMarkSetEnv(path) case "badger": return NewBadgerMarkSetEnv(path) default: diff --git a/blockstore/splitstore/markset_badger.go b/blockstore/splitstore/markset_badger.go index ae06a69f8..659d3b5dd 100644 --- a/blockstore/splitstore/markset_badger.go +++ b/blockstore/splitstore/markset_badger.go @@ -3,6 +3,7 @@ package splitstore import ( "os" "path/filepath" + "runtime" "sync" "golang.org/x/xerrors" @@ -28,13 +29,13 @@ type BadgerMarkSet struct { writers int seqno int version int + persist bool db *badger.DB path string } var _ MarkSet = (*BadgerMarkSet)(nil) -var _ MarkSetVisitor = (*BadgerMarkSet)(nil) var badgerMarkSetBatchSize = 16384 @@ -48,11 +49,10 @@ func NewBadgerMarkSetEnv(path string) (MarkSetEnv, error) { return &BadgerMarkSetEnv{path: msPath}, nil } -func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet, error) { - name += ".tmp" +func (e *BadgerMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) { path := filepath.Join(e.path, name) - db, err := openTransientBadgerDB(path) + db, err := openBadgerDB(path, false) if err != nil { return nil, xerrors.Errorf("error creating badger db: %w", err) } @@ -68,18 +68,72 @@ func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet, return ms, nil } -func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { - return e.create(name, sizeHint) -} +func (e *BadgerMarkSetEnv) Recover(name string) (MarkSet, error) { + path := filepath.Join(e.path, name) -func (e *BadgerMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) { - return e.create(name, sizeHint) -} + if _, err := os.Stat(path); err != nil { + return nil, xerrors.Errorf("error stating badger db path: %w", err) + } -func (e *BadgerMarkSetEnv) SupportsVisitor() bool { return true } + db, err := openBadgerDB(path, true) + if err != nil { + return nil, xerrors.Errorf("error creating badger db: %w", err) + } + + ms := &BadgerMarkSet{ + pend: make(map[string]struct{}), + writing: make(map[int]map[string]struct{}), + db: db, + path: path, + persist: true, + } + ms.cond.L = &ms.mx + + return ms, nil +} func (e *BadgerMarkSetEnv) Close() error { - return os.RemoveAll(e.path) + return nil +} + +func (s *BadgerMarkSet) BeginCriticalSection() error { + s.mx.Lock() + + if s.persist { + s.mx.Unlock() + return nil + } + + var write bool + var seqno int + if len(s.pend) > 0 { + write = true + seqno = s.nextBatch() + } + + s.persist = true + s.mx.Unlock() + + if write { + // all writes sync once perist is true + return s.write(seqno) + } + + // wait for any pending writes and sync + s.mx.Lock() + for s.writers > 0 { + s.cond.Wait() + } + s.mx.Unlock() + + return s.db.Sync() +} + +func (s *BadgerMarkSet) EndCriticalSection() { + s.mx.Lock() + defer s.mx.Unlock() + + s.persist = false } func (s *BadgerMarkSet) Mark(c cid.Cid) error { @@ -99,6 +153,23 @@ func (s *BadgerMarkSet) Mark(c cid.Cid) error { return nil } +func (s *BadgerMarkSet) MarkMany(batch []cid.Cid) error { + s.mx.Lock() + if s.pend == nil { + s.mx.Unlock() + return errMarkSetClosed + } + + write, seqno := s.putMany(batch) + s.mx.Unlock() + + if write { + return s.write(seqno) + } + + return nil +} + func (s *BadgerMarkSet) Has(c cid.Cid) (bool, error) { s.mx.RLock() defer s.mx.RUnlock() @@ -204,16 +275,34 @@ func (s *BadgerMarkSet) tryDB(key []byte) (has bool, err error) { // writer holds the exclusive lock func (s *BadgerMarkSet) put(key string) (write bool, seqno int) { s.pend[key] = struct{}{} - if len(s.pend) < badgerMarkSetBatchSize { + if !s.persist && len(s.pend) < badgerMarkSetBatchSize { return false, 0 } - seqno = s.seqno + seqno = s.nextBatch() + return true, seqno +} + +func (s *BadgerMarkSet) putMany(batch []cid.Cid) (write bool, seqno int) { + for _, c := range batch { + key := string(c.Hash()) + s.pend[key] = struct{}{} + } + + if !s.persist && len(s.pend) < badgerMarkSetBatchSize { + return false, 0 + } + + seqno = s.nextBatch() + return true, seqno +} + +func (s *BadgerMarkSet) nextBatch() int { + seqno := s.seqno s.seqno++ s.writing[seqno] = s.pend s.pend = make(map[string]struct{}) - - return true, seqno + return seqno } func (s *BadgerMarkSet) write(seqno int) (err error) { @@ -258,6 +347,14 @@ func (s *BadgerMarkSet) write(seqno int) (err error) { return xerrors.Errorf("error flushing batch to badger markset: %w", err) } + s.mx.RLock() + persist := s.persist + s.mx.RUnlock() + + if persist { + return s.db.Sync() + } + return nil } @@ -277,26 +374,29 @@ func (s *BadgerMarkSet) Close() error { db := s.db s.db = nil - return closeTransientBadgerDB(db, s.path) + return closeBadgerDB(db, s.path, s.persist) } -func (s *BadgerMarkSet) SetConcurrent() {} +func openBadgerDB(path string, recover bool) (*badger.DB, error) { + // if it is not a recovery, clean up first + if !recover { + err := os.RemoveAll(path) + if err != nil { + return nil, xerrors.Errorf("error clearing markset directory: %w", err) + } -func openTransientBadgerDB(path string) (*badger.DB, error) { - // clean up first - err := os.RemoveAll(path) - if err != nil { - return nil, xerrors.Errorf("error clearing markset directory: %w", err) - } - - err = os.MkdirAll(path, 0755) //nolint:gosec - if err != nil { - return nil, xerrors.Errorf("error creating markset directory: %w", err) + err = os.MkdirAll(path, 0755) //nolint:gosec + if err != nil { + return nil, xerrors.Errorf("error creating markset directory: %w", err) + } } opts := badger.DefaultOptions(path) + // we manually sync when we are in critical section opts.SyncWrites = false + // no need to do that opts.CompactL0OnClose = false + // we store hashes, not much to gain by compression opts.Compression = options.None // Note: We use FileIO for loading modes to avoid memory thrashing and interference // between the system blockstore and the markset. @@ -305,6 +405,15 @@ func openTransientBadgerDB(path string) (*badger.DB, error) { // exceeded 1GB in size. opts.TableLoadingMode = options.FileIO opts.ValueLogLoadingMode = options.FileIO + // We increase the number of L0 tables before compaction to make it unlikely to + // be necessary. + opts.NumLevelZeroTables = 20 // default is 5 + opts.NumLevelZeroTablesStall = 30 // default is 10 + // increase the number of compactors from default 2 so that if we ever have to + // compact, it is fast + if runtime.NumCPU()/2 > opts.NumCompactors { + opts.NumCompactors = runtime.NumCPU() / 2 + } opts.Logger = &badgerLogger{ SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), @@ -313,12 +422,16 @@ func openTransientBadgerDB(path string) (*badger.DB, error) { return badger.Open(opts) } -func closeTransientBadgerDB(db *badger.DB, path string) error { +func closeBadgerDB(db *badger.DB, path string, persist bool) error { err := db.Close() if err != nil { return xerrors.Errorf("error closing badger markset: %w", err) } + if persist { + return nil + } + err = os.RemoveAll(path) if err != nil { return xerrors.Errorf("error deleting badger markset: %w", err) diff --git a/blockstore/splitstore/markset_map.go b/blockstore/splitstore/markset_map.go index 07a7ae70d..8216bcd81 100644 --- a/blockstore/splitstore/markset_map.go +++ b/blockstore/splitstore/markset_map.go @@ -1,12 +1,20 @@ package splitstore import ( + "bufio" + "io" + "os" + "path/filepath" "sync" + "golang.org/x/xerrors" + cid "github.com/ipfs/go-cid" ) -type MapMarkSetEnv struct{} +type MapMarkSetEnv struct { + path string +} var _ MarkSetEnv = (*MapMarkSetEnv)(nil) @@ -14,55 +22,194 @@ type MapMarkSet struct { mx sync.RWMutex set map[string]struct{} - ts bool + persist bool + file *os.File + buf *bufio.Writer + + path string } var _ MarkSet = (*MapMarkSet)(nil) -var _ MarkSetVisitor = (*MapMarkSet)(nil) -func NewMapMarkSetEnv() (*MapMarkSetEnv, error) { - return &MapMarkSetEnv{}, nil +func NewMapMarkSetEnv(path string) (*MapMarkSetEnv, error) { + msPath := filepath.Join(path, "markset.map") + err := os.MkdirAll(msPath, 0755) //nolint:gosec + if err != nil { + return nil, xerrors.Errorf("error creating markset directory: %w", err) + } + + return &MapMarkSetEnv{path: msPath}, nil } -func (e *MapMarkSetEnv) create(name string, sizeHint int64) (*MapMarkSet, error) { +func (e *MapMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) { + path := filepath.Join(e.path, name) return &MapMarkSet{ - set: make(map[string]struct{}, sizeHint), + set: make(map[string]struct{}, sizeHint), + path: path, }, nil } -func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { - return e.create(name, sizeHint) -} +func (e *MapMarkSetEnv) Recover(name string) (MarkSet, error) { + path := filepath.Join(e.path, name) + s := &MapMarkSet{ + set: make(map[string]struct{}), + path: path, + } -func (e *MapMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) { - return e.create(name, sizeHint) -} + in, err := os.Open(path) + if err != nil { + return nil, xerrors.Errorf("error opening markset file for read: %w", err) + } + defer in.Close() //nolint:errcheck -func (e *MapMarkSetEnv) SupportsVisitor() bool { return true } + // wrap a buffered reader to make this faster + buf := bufio.NewReader(in) + for { + var sz byte + if sz, err = buf.ReadByte(); err != nil { + break + } + + key := make([]byte, int(sz)) + if _, err = io.ReadFull(buf, key); err != nil { + break + } + + s.set[string(key)] = struct{}{} + } + + if err != io.EOF { + return nil, xerrors.Errorf("error reading markset file: %w", err) + } + + file, err := os.OpenFile(s.path, os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return nil, xerrors.Errorf("error opening markset file for write: %w", err) + } + + s.persist = true + s.file = file + s.buf = bufio.NewWriter(file) + + return s, nil +} func (e *MapMarkSetEnv) Close() error { return nil } -func (s *MapMarkSet) Mark(cid cid.Cid) error { - if s.ts { - s.mx.Lock() - defer s.mx.Unlock() - } +func (s *MapMarkSet) BeginCriticalSection() error { + s.mx.Lock() + defer s.mx.Unlock() if s.set == nil { return errMarkSetClosed } - s.set[string(cid.Hash())] = struct{}{} + if s.persist { + return nil + } + + file, err := os.OpenFile(s.path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return xerrors.Errorf("error opening markset file: %w", err) + } + + // wrap a buffered writer to make this faster + s.buf = bufio.NewWriter(file) + for key := range s.set { + if err := s.writeKey([]byte(key), false); err != nil { + _ = file.Close() + s.buf = nil + return err + } + } + if err := s.buf.Flush(); err != nil { + _ = file.Close() + s.buf = nil + return xerrors.Errorf("error flushing markset file buffer: %w", err) + } + + s.file = file + s.persist = true + + return nil +} + +func (s *MapMarkSet) EndCriticalSection() { + s.mx.Lock() + defer s.mx.Unlock() + + if !s.persist { + return + } + + _ = s.file.Close() + _ = os.Remove(s.path) + s.file = nil + s.buf = nil + s.persist = false +} + +func (s *MapMarkSet) Mark(c cid.Cid) error { + s.mx.Lock() + defer s.mx.Unlock() + + if s.set == nil { + return errMarkSetClosed + } + + hash := c.Hash() + s.set[string(hash)] = struct{}{} + + if s.persist { + if err := s.writeKey(hash, true); err != nil { + return err + } + + if err := s.file.Sync(); err != nil { + return xerrors.Errorf("error syncing markset: %w", err) + } + } + + return nil +} + +func (s *MapMarkSet) MarkMany(batch []cid.Cid) error { + s.mx.Lock() + defer s.mx.Unlock() + + if s.set == nil { + return errMarkSetClosed + } + + for _, c := range batch { + hash := c.Hash() + s.set[string(hash)] = struct{}{} + + if s.persist { + if err := s.writeKey(hash, false); err != nil { + return err + } + } + } + + if s.persist { + if err := s.buf.Flush(); err != nil { + return xerrors.Errorf("error flushing markset buffer to disk: %w", err) + } + + if err := s.file.Sync(); err != nil { + return xerrors.Errorf("error syncing markset: %w", err) + } + } + return nil } func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) { - if s.ts { - s.mx.RLock() - defer s.mx.RUnlock() - } + s.mx.RLock() + defer s.mx.RUnlock() if s.set == nil { return false, errMarkSetClosed @@ -73,33 +220,70 @@ func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) { } func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) { - if s.ts { - s.mx.Lock() - defer s.mx.Unlock() - } + s.mx.Lock() + defer s.mx.Unlock() if s.set == nil { return false, errMarkSetClosed } - key := string(c.Hash()) + hash := c.Hash() + key := string(hash) if _, ok := s.set[key]; ok { return false, nil } s.set[key] = struct{}{} + + if s.persist { + if err := s.writeKey(hash, true); err != nil { + return false, err + } + if err := s.file.Sync(); err != nil { + return false, xerrors.Errorf("error syncing markset: %w", err) + } + } + return true, nil } func (s *MapMarkSet) Close() error { - if s.ts { - s.mx.Lock() - defer s.mx.Unlock() + s.mx.Lock() + defer s.mx.Unlock() + + if s.set == nil { + return nil } + s.set = nil + + if s.file != nil { + if err := s.file.Close(); err != nil { + log.Warnf("error closing markset file: %s", err) + } + + if !s.persist { + if err := os.Remove(s.path); err != nil { + log.Warnf("error removing markset file: %s", err) + } + } + } + return nil } -func (s *MapMarkSet) SetConcurrent() { - s.ts = true +func (s *MapMarkSet) writeKey(k []byte, flush bool) error { + if err := s.buf.WriteByte(byte(len(k))); err != nil { + return xerrors.Errorf("error writing markset key length to disk: %w", err) + } + if _, err := s.buf.Write(k); err != nil { + return xerrors.Errorf("error writing markset key to disk: %w", err) + } + if flush { + if err := s.buf.Flush(); err != nil { + return xerrors.Errorf("error flushing markset buffer to disk: %w", err) + } + } + + return nil } diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go index a4a42e860..b4b871602 100644 --- a/blockstore/splitstore/markset_test.go +++ b/blockstore/splitstore/markset_test.go @@ -11,7 +11,10 @@ import ( func TestMapMarkSet(t *testing.T) { testMarkSet(t, "map") + testMarkSetRecovery(t, "map") + testMarkSetMarkMany(t, "map") testMarkSetVisitor(t, "map") + testMarkSetVisitorRecovery(t, "map") } func TestBadgerMarkSet(t *testing.T) { @@ -21,12 +24,13 @@ func TestBadgerMarkSet(t *testing.T) { badgerMarkSetBatchSize = bs }) testMarkSet(t, "badger") + testMarkSetRecovery(t, "badger") + testMarkSetMarkMany(t, "badger") testMarkSetVisitor(t, "badger") + testMarkSetVisitorRecovery(t, "badger") } func testMarkSet(t *testing.T, lsType string) { - t.Helper() - path, err := ioutil.TempDir("", "markset.*") if err != nil { t.Fatal(err) @@ -42,12 +46,12 @@ func testMarkSet(t *testing.T, lsType string) { } defer env.Close() //nolint:errcheck - hotSet, err := env.Create("hot", 0) + hotSet, err := env.New("hot", 0) if err != nil { t.Fatal(err) } - coldSet, err := env.Create("cold", 0) + coldSet, err := env.New("cold", 0) if err != nil { t.Fatal(err) } @@ -62,6 +66,7 @@ func testMarkSet(t *testing.T, lsType string) { } mustHave := func(s MarkSet, cid cid.Cid) { + t.Helper() has, err := s.Has(cid) if err != nil { t.Fatal(err) @@ -73,6 +78,7 @@ func testMarkSet(t *testing.T, lsType string) { } mustNotHave := func(s MarkSet, cid cid.Cid) { + t.Helper() has, err := s.Has(cid) if err != nil { t.Fatal(err) @@ -114,12 +120,12 @@ func testMarkSet(t *testing.T, lsType string) { t.Fatal(err) } - hotSet, err = env.Create("hot", 0) + hotSet, err = env.New("hot", 0) if err != nil { t.Fatal(err) } - coldSet, err = env.Create("cold", 0) + coldSet, err = env.New("cold", 0) if err != nil { t.Fatal(err) } @@ -150,8 +156,6 @@ func testMarkSet(t *testing.T, lsType string) { } func testMarkSetVisitor(t *testing.T, lsType string) { - t.Helper() - path, err := ioutil.TempDir("", "markset.*") if err != nil { t.Fatal(err) @@ -167,7 +171,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) { } defer env.Close() //nolint:errcheck - visitor, err := env.CreateVisitor("test", 0) + visitor, err := env.New("test", 0) if err != nil { t.Fatal(err) } @@ -219,3 +223,322 @@ func testMarkSetVisitor(t *testing.T, lsType string) { mustNotVisit(visitor, k3) mustNotVisit(visitor, k4) } + +func testMarkSetVisitorRecovery(t *testing.T, lsType string) { + path, err := ioutil.TempDir("", "markset.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + env, err := OpenMarkSetEnv(path, lsType) + if err != nil { + t.Fatal(err) + } + defer env.Close() //nolint:errcheck + + visitor, err := env.New("test", 0) + if err != nil { + t.Fatal(err) + } + defer visitor.Close() //nolint:errcheck + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + mustVisit := func(v ObjectVisitor, cid cid.Cid) { + visit, err := v.Visit(cid) + if err != nil { + t.Fatal(err) + } + + if !visit { + t.Fatal("object should be visited") + } + } + + mustNotVisit := func(v ObjectVisitor, cid cid.Cid) { + visit, err := v.Visit(cid) + if err != nil { + t.Fatal(err) + } + + if visit { + t.Fatal("unexpected visit") + } + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + mustVisit(visitor, k1) + mustVisit(visitor, k2) + + if err := visitor.BeginCriticalSection(); err != nil { + t.Fatal(err) + } + + mustVisit(visitor, k3) + mustVisit(visitor, k4) + + mustNotVisit(visitor, k1) + mustNotVisit(visitor, k2) + mustNotVisit(visitor, k3) + mustNotVisit(visitor, k4) + + if err := visitor.Close(); err != nil { + t.Fatal(err) + } + + visitor, err = env.Recover("test") + if err != nil { + t.Fatal(err) + } + + mustNotVisit(visitor, k1) + mustNotVisit(visitor, k2) + mustNotVisit(visitor, k3) + mustNotVisit(visitor, k4) + + visitor.EndCriticalSection() + + if err := visitor.Close(); err != nil { + t.Fatal(err) + } + + _, err = env.Recover("test") + if err == nil { + t.Fatal("expected recovery to fail") + } +} + +func testMarkSetRecovery(t *testing.T, lsType string) { + path, err := ioutil.TempDir("", "markset.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + env, err := OpenMarkSetEnv(path, lsType) + if err != nil { + t.Fatal(err) + } + defer env.Close() //nolint:errcheck + + markSet, err := env.New("test", 0) + if err != nil { + t.Fatal(err) + } + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + mustHave := func(s MarkSet, cid cid.Cid) { + t.Helper() + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("mark not found") + } + } + + mustNotHave := func(s MarkSet, cid cid.Cid) { + t.Helper() + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("unexpected mark") + } + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + if err := markSet.Mark(k1); err != nil { + t.Fatal(err) + } + if err := markSet.Mark(k2); err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustNotHave(markSet, k3) + mustNotHave(markSet, k4) + + if err := markSet.BeginCriticalSection(); err != nil { + t.Fatal(err) + } + + if err := markSet.Mark(k3); err != nil { + t.Fatal(err) + } + if err := markSet.Mark(k4); err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustHave(markSet, k3) + mustHave(markSet, k4) + + if err := markSet.Close(); err != nil { + t.Fatal(err) + } + + markSet, err = env.Recover("test") + if err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustHave(markSet, k3) + mustHave(markSet, k4) + + markSet.EndCriticalSection() + + if err := markSet.Close(); err != nil { + t.Fatal(err) + } + + _, err = env.Recover("test") + if err == nil { + t.Fatal("expected recovery to fail") + } +} + +func testMarkSetMarkMany(t *testing.T, lsType string) { + path, err := ioutil.TempDir("", "markset.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + env, err := OpenMarkSetEnv(path, lsType) + if err != nil { + t.Fatal(err) + } + defer env.Close() //nolint:errcheck + + markSet, err := env.New("test", 0) + if err != nil { + t.Fatal(err) + } + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + mustHave := func(s MarkSet, cid cid.Cid) { + t.Helper() + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("mark not found") + } + } + + mustNotHave := func(s MarkSet, cid cid.Cid) { + t.Helper() + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("unexpected mark") + } + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + if err := markSet.MarkMany([]cid.Cid{k1, k2}); err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustNotHave(markSet, k3) + mustNotHave(markSet, k4) + + if err := markSet.BeginCriticalSection(); err != nil { + t.Fatal(err) + } + + if err := markSet.MarkMany([]cid.Cid{k3, k4}); err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustHave(markSet, k3) + mustHave(markSet, k4) + + if err := markSet.Close(); err != nil { + t.Fatal(err) + } + + markSet, err = env.Recover("test") + if err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustHave(markSet, k3) + mustHave(markSet, k4) + + markSet.EndCriticalSection() + + if err := markSet.Close(); err != nil { + t.Fatal(err) + } + + _, err = env.Recover("test") + if err == nil { + t.Fatal("expected recovery to fail") + } +} diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index f6715ea33..6a65e01df 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -129,8 +129,6 @@ type SplitStore struct { headChangeMx sync.Mutex - coldPurgeSize int - chain ChainAccessor ds dstore.Datastore cold bstore.Blockstore @@ -158,6 +156,10 @@ type SplitStore struct { txnRefsMx sync.Mutex txnRefs map[cid.Cid]struct{} txnMissing map[cid.Cid]struct{} + txnMarkSet MarkSet + txnSyncMx sync.Mutex + txnSyncCond sync.Cond + txnSync bool // registered protectors protectors []func(func(cid.Cid) error) error @@ -186,10 +188,6 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co return nil, err } - if !markSetEnv.SupportsVisitor() { - return nil, xerrors.Errorf("markset type does not support atomic visitors") - } - // and now we can make a SplitStore ss := &SplitStore{ cfg: cfg, @@ -198,11 +196,10 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co cold: cold, hot: hots, markSetEnv: markSetEnv, - - coldPurgeSize: defaultColdPurgeSize, } ss.txnViewsCond.L = &ss.txnViewsMx + ss.txnSyncCond.L = &ss.txnSyncMx ss.ctx, ss.cancel = context.WithCancel(context.Background()) if enableDebugLog { @@ -212,6 +209,14 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co } } + if ss.checkpointExists() { + log.Info("found compaction checkpoint; resuming compaction") + if err := ss.completeCompaction(); err != nil { + markSetEnv.Close() //nolint:errcheck + return nil, xerrors.Errorf("error resuming compaction: %w", err) + } + } + return ss, nil } @@ -234,6 +239,20 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() + // critical section + if s.txnMarkSet != nil { + has, err := s.txnMarkSet.Has(cid) + if err != nil { + return false, err + } + + if has { + return s.has(cid) + } + + return s.cold.Has(ctx, cid) + } + has, err := s.hot.Has(ctx, cid) if err != nil { @@ -261,6 +280,20 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) s.txnLk.RLock() defer s.txnLk.RUnlock() + // critical section + if s.txnMarkSet != nil { + has, err := s.txnMarkSet.Has(cid) + if err != nil { + return nil, err + } + + if has { + return s.get(cid) + } + + return s.cold.Get(ctx, cid) + } + blk, err := s.hot.Get(ctx, cid) switch err { @@ -298,6 +331,20 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() + // critical section + if s.txnMarkSet != nil { + has, err := s.txnMarkSet.Has(cid) + if err != nil { + return 0, err + } + + if has { + return s.getSize(cid) + } + + return s.cold.GetSize(ctx, cid) + } + size, err := s.hot.GetSize(ctx, cid) switch err { @@ -336,6 +383,12 @@ func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error { s.debug.LogWrite(blk) + // critical section + if s.txnMarkSet != nil { + s.markLiveRefs([]cid.Cid{blk.Cid()}) + return nil + } + s.trackTxnRef(blk.Cid()) return nil } @@ -381,6 +434,12 @@ func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error { s.debug.LogWriteMany(blks) + // critical section + if s.txnMarkSet != nil { + s.markLiveRefs(batch) + return nil + } + s.trackTxnRefMany(batch) return nil } @@ -440,6 +499,23 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro return cb(data) } + // critical section + s.txnLk.RLock() // the lock is released in protectView if we are not in critical section + if s.txnMarkSet != nil { + has, err := s.txnMarkSet.Has(cid) + s.txnLk.RUnlock() + + if err != nil { + return err + } + + if has { + return s.view(cid, cb) + } + + return s.cold.View(ctx, cid, cb) + } + // views are (optimistically) protected two-fold: // - if there is an active transaction, then the reference is protected. // - if there is no active transaction, active views are tracked in a @@ -589,6 +665,11 @@ func (s *SplitStore) Close() error { } if atomic.LoadInt32(&s.compacting) == 1 { + s.txnSyncMx.Lock() + s.txnSync = true + s.txnSyncCond.Broadcast() + s.txnSyncMx.Unlock() + log.Warn("close with ongoing compaction in progress; waiting for it to finish...") for atomic.LoadInt32(&s.compacting) == 1 { time.Sleep(time.Second) diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go index c83ed7b28..d7c9b2ef9 100644 --- a/blockstore/splitstore/splitstore_check.go +++ b/blockstore/splitstore/splitstore_check.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "sync" "sync/atomic" "time" @@ -67,7 +68,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { } defer output.Close() //nolint:errcheck + var mx sync.Mutex write := func(format string, args ...interface{}) { + mx.Lock() + defer mx.Unlock() _, err := fmt.Fprintf(output, format+"\n", args...) if err != nil { log.Warnf("error writing check output: %s", err) @@ -82,9 +86,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { write("compaction index: %d", s.compactionIndex) write("--") - var coldCnt, missingCnt int64 + coldCnt := new(int64) + missingCnt := new(int64) - visitor, err := s.markSetEnv.CreateVisitor("check", 0) + visitor, err := s.markSetEnv.New("check", 0) if err != nil { return xerrors.Errorf("error creating visitor: %w", err) } @@ -111,10 +116,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { } if has { - coldCnt++ + atomic.AddInt64(coldCnt, 1) write("cold object reference: %s", c) } else { - missingCnt++ + atomic.AddInt64(missingCnt, 1) write("missing object reference: %s", c) return errStopWalk } @@ -128,9 +133,9 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return err } - log.Infow("check done", "cold", coldCnt, "missing", missingCnt) + log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt) write("--") - write("cold: %d missing: %d", coldCnt, missingCnt) + write("cold: %d missing: %d", *coldCnt, *missingCnt) write("DONE") return nil diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 13ab90ac0..ae123abc9 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -3,8 +3,10 @@ package splitstore import ( "bytes" "errors" + "os" + "path/filepath" "runtime" - "sort" + "sync" "sync/atomic" "time" @@ -47,6 +49,10 @@ var ( // SyncGapTime is the time delay from a tipset's min timestamp before we decide // there is a sync gap SyncGapTime = time.Minute + + // SyncWaitTime is the time delay from a tipset's min timestamp before we decide + // we have synced. + SyncWaitTime = 30 * time.Second ) var ( @@ -56,8 +62,6 @@ var ( const ( batchSize = 16384 - - defaultColdPurgeSize = 7_000_000 ) func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { @@ -140,9 +144,9 @@ func (s *SplitStore) isNearUpgrade(epoch abi.ChainEpoch) bool { // transactionally protect incoming tipsets func (s *SplitStore) protectTipSets(apply []*types.TipSet) { s.txnLk.RLock() - defer s.txnLk.RUnlock() if !s.txnActive { + s.txnLk.RUnlock() return } @@ -151,12 +155,115 @@ func (s *SplitStore) protectTipSets(apply []*types.TipSet) { cids = append(cids, ts.Cids()...) } + if len(cids) == 0 { + s.txnLk.RUnlock() + return + } + + // critical section + if s.txnMarkSet != nil { + curTs := apply[len(apply)-1] + timestamp := time.Unix(int64(curTs.MinTimestamp()), 0) + doSync := time.Since(timestamp) < SyncWaitTime + go func() { + if doSync { + defer func() { + s.txnSyncMx.Lock() + defer s.txnSyncMx.Unlock() + s.txnSync = true + s.txnSyncCond.Broadcast() + }() + } + defer s.txnLk.RUnlock() + s.markLiveRefs(cids) + + }() + return + } + s.trackTxnRefMany(cids) + s.txnLk.RUnlock() +} + +func (s *SplitStore) markLiveRefs(cids []cid.Cid) { + log.Debugf("marking %d live refs", len(cids)) + startMark := time.Now() + + count := new(int32) + visitor := newConcurrentVisitor() + walkObject := func(c cid.Cid) error { + return s.walkObjectIncomplete(c, visitor, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + visit, err := s.txnMarkSet.Visit(c) + if err != nil { + return xerrors.Errorf("error visiting object: %w", err) + } + + if !visit { + return errStopWalk + } + + atomic.AddInt32(count, 1) + return nil + }, + func(missing cid.Cid) error { + log.Warnf("missing object reference %s in %s", missing, c) + return errStopWalk + }) + } + + // optimize the common case of single put + if len(cids) == 1 { + if err := walkObject(cids[0]); err != nil { + log.Errorf("error marking tipset refs: %s", err) + } + log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) + return + } + + workch := make(chan cid.Cid, len(cids)) + for _, c := range cids { + workch <- c + } + close(workch) + + worker := func() error { + for c := range workch { + if err := walkObject(c); err != nil { + return err + } + } + + return nil + } + + workers := runtime.NumCPU() / 2 + if workers < 2 { + workers = 2 + } + if workers > len(cids) { + workers = len(cids) + } + + g := new(errgroup.Group) + for i := 0; i < workers; i++ { + g.Go(worker) + } + + if err := g.Wait(); err != nil { + log.Errorf("error marking tipset refs: %s", err) + } + + log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) } // transactionally protect a view func (s *SplitStore) protectView(c cid.Cid) { - s.txnLk.RLock() + // the txnLk is held for read defer s.txnLk.RUnlock() if s.txnActive { @@ -227,7 +334,7 @@ func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) { } // protect all pending transactional references -func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error { +func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { for { var txnRefs map[cid.Cid]struct{} @@ -299,14 +406,14 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error { // transactionally protect a reference by walking the object and marking. // concurrent markings are short circuited by checking the markset. -func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSetVisitor) error { +func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error { if err := s.checkClosing(); err != nil { return err } // Note: cold objects are deleted heaviest first, so the consituents of an object // cannot be deleted before the object itself. - return s.walkObjectIncomplete(root, tmpVisitor(), + return s.walkObjectIncomplete(root, newTmpVisitor(), func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk @@ -386,6 +493,12 @@ func (s *SplitStore) compact(curTs *types.TipSet) { } func (s *SplitStore) doCompact(curTs *types.TipSet) error { + if s.checkpointExists() { + // this really shouldn't happen, but if it somehow does, it means that the hotstore + // might be potentially inconsistent; abort compaction and notify the user to intervene. + return xerrors.Errorf("checkpoint exists; aborting compaction") + } + currentEpoch := curTs.Height() boundaryEpoch := currentEpoch - CompactionBoundary @@ -397,7 +510,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex) - markSet, err := s.markSetEnv.CreateVisitor("live", s.markSetSize) + markSet, err := s.markSetEnv.New("live", s.markSetSize) if err != nil { return xerrors.Errorf("error creating mark set: %w", err) } @@ -408,9 +521,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return err } - // we are ready for concurrent marking - s.beginTxnMarking(markSet) - // 0. track all protected references at beginning of compaction; anything added later should // be transactionally protected by the write log.Info("protecting references with registered protectors") @@ -424,7 +534,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Info("marking reachable objects") startMark := time.Now() - var count int64 + count := new(int64) err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, func(c cid.Cid) error { if isUnitaryObject(c) { @@ -440,7 +550,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return errStopWalk } - count++ + atomic.AddInt64(count, 1) return nil }) @@ -448,9 +558,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error marking: %w", err) } - s.markSetSize = count + count>>2 // overestimate a bit + s.markSetSize = *count + *count>>2 // overestimate a bit - log.Infow("marking done", "took", time.Since(startMark), "marked", count) + log.Infow("marking done", "took", time.Since(startMark), "marked", *count) if err := s.checkClosing(); err != nil { return err @@ -470,10 +580,15 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Info("collecting cold objects") startCollect := time.Now() + coldw, err := NewColdSetWriter(s.coldSetPath()) + if err != nil { + return xerrors.Errorf("error creating coldset: %w", err) + } + defer coldw.Close() //nolint:errcheck + // some stats for logging var hotCnt, coldCnt int - cold := make([]cid.Cid, 0, s.coldPurgeSize) err = s.hot.ForEachKey(func(c cid.Cid) error { // was it marked? mark, err := markSet.Has(c) @@ -487,7 +602,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { } // it's cold, mark it as candidate for move - cold = append(cold, c) + if err := coldw.Write(c); err != nil { + return xerrors.Errorf("error writing cid to coldstore: %w", err) + } coldCnt++ return nil @@ -497,12 +614,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error collecting cold objects: %w", err) } - log.Infow("cold collection done", "took", time.Since(startCollect)) - - if coldCnt > 0 { - s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit + if err := coldw.Close(); err != nil { + return xerrors.Errorf("error closing coldset: %w", err) } + log.Infow("cold collection done", "took", time.Since(startCollect)) + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) @@ -520,11 +637,17 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return err } + coldr, err := NewColdSetReader(s.coldSetPath()) + if err != nil { + return xerrors.Errorf("error opening coldset: %w", err) + } + defer coldr.Close() //nolint:errcheck + // 3. copy the cold objects to the coldstore -- if we have one if !s.cfg.DiscardColdBlocks { log.Info("moving cold objects to the coldstore") startMove := time.Now() - err = s.moveColdBlocks(cold) + err = s.moveColdBlocks(coldr) if err != nil { return xerrors.Errorf("error moving cold objects: %w", err) } @@ -533,41 +656,64 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { if err := s.checkClosing(); err != nil { return err } + + if err := coldr.Reset(); err != nil { + return xerrors.Errorf("error resetting coldset: %w", err) + } } - // 4. sort cold objects so that the dags with most references are deleted first - // this ensures that we can't refer to a dag with its consituents already deleted, ie - // we lave no dangling references. - log.Info("sorting cold objects") - startSort := time.Now() - err = s.sortObjects(cold) - if err != nil { - return xerrors.Errorf("error sorting objects: %w", err) - } - log.Infow("sorting done", "took", time.Since(startSort)) - - // 4.1 protect transactional refs once more - // strictly speaking, this is not necessary as purge will do it before deleting each - // batch. however, there is likely a largish number of references accumulated during - // ths sort and this protects before entering pruge context. - err = s.protectTxnRefs(markSet) - if err != nil { - return xerrors.Errorf("error protecting transactional refs: %w", err) + // 4. Purge cold objects with checkpointing for recovery. + // This is the critical section of compaction, whereby any cold object not in the markSet is + // considered already deleted. + // We delete cold objects in batches, holding the transaction lock, where we check the markSet + // again for new references created by the VM. + // After each batch, we write a checkpoint to disk; if the process is interrupted before completion, + // the process will continue from the checkpoint in the next recovery. + if err := s.beginCriticalSection(markSet); err != nil { + return xerrors.Errorf("error beginning critical section: %w", err) } if err := s.checkClosing(); err != nil { return err } + // wait for the head to catch up so that the current tipset is marked + s.waitForSync() + + if err := s.checkClosing(); err != nil { + return err + } + + checkpoint, err := NewCheckpoint(s.checkpointPath()) + if err != nil { + return xerrors.Errorf("error creating checkpoint: %w", err) + } + defer checkpoint.Close() //nolint:errcheck + // 5. purge cold objects from the hotstore, taking protected references into account log.Info("purging cold objects from the hotstore") startPurge := time.Now() - err = s.purge(cold, markSet) + err = s.purge(coldr, checkpoint, markSet) if err != nil { - return xerrors.Errorf("error purging cold blocks: %w", err) + return xerrors.Errorf("error purging cold objects: %w", err) } log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) + s.endCriticalSection() + + if err := checkpoint.Close(); err != nil { + log.Warnf("error closing checkpoint: %s", err) + } + if err := os.Remove(s.checkpointPath()); err != nil { + log.Warnf("error removing checkpoint: %s", err) + } + if err := coldr.Close(); err != nil { + log.Warnf("error closing coldset: %s", err) + } + if err := os.Remove(s.coldSetPath()); err != nil { + log.Warnf("error removing coldset: %s", err) + } + // we are done; do some housekeeping s.endTxnProtect() s.gcHotstore() @@ -598,12 +744,51 @@ func (s *SplitStore) beginTxnProtect() { defer s.txnLk.Unlock() s.txnActive = true + s.txnSync = false s.txnRefs = make(map[cid.Cid]struct{}) s.txnMissing = make(map[cid.Cid]struct{}) } -func (s *SplitStore) beginTxnMarking(markSet MarkSetVisitor) { - markSet.SetConcurrent() +func (s *SplitStore) beginCriticalSection(markSet MarkSet) error { + log.Info("beginning critical section") + + // do that once first to get the bulk before the markset is in critical section + if err := s.protectTxnRefs(markSet); err != nil { + return xerrors.Errorf("error protecting transactional references: %w", err) + } + + if err := markSet.BeginCriticalSection(); err != nil { + return xerrors.Errorf("error beginning critical section for markset: %w", err) + } + + s.txnLk.Lock() + defer s.txnLk.Unlock() + + s.txnMarkSet = markSet + + // and do it again while holding the lock to mark references that might have been created + // in the meantime and avoid races of the type Has->txnRef->enterCS->Get fails because + // it's not in the markset + if err := s.protectTxnRefs(markSet); err != nil { + return xerrors.Errorf("error protecting transactional references: %w", err) + } + + return nil +} + +func (s *SplitStore) waitForSync() { + log.Info("waiting for sync") + startWait := time.Now() + defer func() { + log.Infow("waiting for sync done", "took", time.Since(startWait)) + }() + + s.txnSyncMx.Lock() + defer s.txnSyncMx.Unlock() + + for !s.txnSync { + s.txnSyncCond.Wait() + } } func (s *SplitStore) endTxnProtect() { @@ -615,32 +800,51 @@ func (s *SplitStore) endTxnProtect() { } s.txnActive = false + s.txnSync = false s.txnRefs = nil s.txnMissing = nil + s.txnMarkSet = nil +} + +func (s *SplitStore) endCriticalSection() { + log.Info("ending critical section") + + s.txnLk.Lock() + defer s.txnLk.Unlock() + + s.txnMarkSet.EndCriticalSection() + s.txnMarkSet = nil } func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch, visitor ObjectVisitor, f func(cid.Cid) error) error { - var walked *cid.Set - toWalk := ts.Cids() - walkCnt := 0 - scanCnt := 0 + var walked ObjectVisitor + var mx sync.Mutex + // we copy the tipset first into a new slice, which allows us to reuse it in every epoch. + toWalk := make([]cid.Cid, len(ts.Cids())) + copy(toWalk, ts.Cids()) + walkCnt := new(int64) + scanCnt := new(int64) stopWalk := func(_ cid.Cid) error { return errStopWalk } walkBlock := func(c cid.Cid) error { - if !walked.Visit(c) { + visit, err := walked.Visit(c) + if err != nil { + return err + } + if !visit { return nil } - walkCnt++ + atomic.AddInt64(walkCnt, 1) if err := f(c); err != nil { return err } var hdr types.BlockHeader - err := s.view(c, func(data []byte) error { + err = s.view(c, func(data []byte) error { return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) }) @@ -676,11 +880,13 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err := s.walkObject(hdr.ParentStateRoot, visitor, f); err != nil { return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) } - scanCnt++ + atomic.AddInt64(scanCnt, 1) } if hdr.Height > 0 { + mx.Lock() toWalk = append(toWalk, hdr.Parents...) + mx.Unlock() } return nil @@ -692,20 +898,43 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp return err } + workers := len(toWalk) + if workers > runtime.NumCPU()/2 { + workers = runtime.NumCPU() / 2 + } + if workers < 2 { + workers = 2 + } + // the walk is BFS, so we can reset the walked set in every iteration and avoid building up // a set that contains all blocks (1M epochs -> 5M blocks -> 200MB worth of memory and growing // over time) - walked = cid.NewSet() - walking := toWalk - toWalk = nil - for _, c := range walking { - if err := walkBlock(c); err != nil { - return xerrors.Errorf("error walking block (cid: %s): %w", c, err) - } + walked = newConcurrentVisitor() + workch := make(chan cid.Cid, len(toWalk)) + for _, c := range toWalk { + workch <- c + } + close(workch) + toWalk = toWalk[:0] + + g := new(errgroup.Group) + for i := 0; i < workers; i++ { + g.Go(func() error { + for c := range workch { + if err := walkBlock(c); err != nil { + return xerrors.Errorf("error walking block (cid: %s): %w", c, err) + } + } + return nil + }) + } + + if err := g.Wait(); err != nil { + return err } } - log.Infow("chain walk done", "walked", walkCnt, "scanned", scanCnt) + log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt) return nil } @@ -824,7 +1053,7 @@ func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, m return nil } -// internal version used by walk +// internal version used during compaction and related operations func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) @@ -859,10 +1088,34 @@ func (s *SplitStore) has(c cid.Cid) (bool, error) { return s.cold.Has(s.ctx, c) } -func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { +func (s *SplitStore) get(c cid.Cid) (blocks.Block, error) { + blk, err := s.hot.Get(s.ctx, c) + switch err { + case nil: + return blk, nil + case bstore.ErrNotFound: + return s.cold.Get(s.ctx, c) + default: + return nil, err + } +} + +func (s *SplitStore) getSize(c cid.Cid) (int, error) { + sz, err := s.hot.GetSize(s.ctx, c) + switch err { + case nil: + return sz, nil + case bstore.ErrNotFound: + return s.cold.GetSize(s.ctx, c) + default: + return 0, err + } +} + +func (s *SplitStore) moveColdBlocks(coldr *ColdSetReader) error { batch := make([]blocks.Block, 0, batchSize) - for _, c := range cold { + err := coldr.ForEach(func(c cid.Cid) error { if err := s.checkClosing(); err != nil { return err } @@ -871,7 +1124,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { if err != nil { if err == bstore.ErrNotFound { log.Warnf("hotstore missing block %s", c) - continue + return nil } return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err) @@ -885,6 +1138,12 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { } batch = batch[:0] } + + return nil + }) + + if err != nil { + return xerrors.Errorf("error iterating coldset: %w", err) } if len(batch) > 0 { @@ -897,177 +1156,202 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { return nil } -// sorts a slice of objects heaviest first -- it's a little expensive but worth the -// guarantee that we don't leave dangling references behind, e.g. if we die in the middle -// of a purge. -func (s *SplitStore) sortObjects(cids []cid.Cid) error { - // we cache the keys to avoid making a gazillion of strings - keys := make(map[cid.Cid]string) - key := func(c cid.Cid) string { - s, ok := keys[c] - if !ok { - s = string(c.Hash()) - keys[c] = s - } - return s - } - - // compute sorting weights as the cumulative number of DAG links - weights := make(map[string]int) - for _, c := range cids { - // this can take quite a while, so check for shutdown with every opportunity - if err := s.checkClosing(); err != nil { - return err - } - - w := s.getObjectWeight(c, weights, key) - weights[key(c)] = w - } - - // sort! - sort.Slice(cids, func(i, j int) bool { - wi := weights[key(cids[i])] - wj := weights[key(cids[j])] - if wi == wj { - return bytes.Compare(cids[i].Hash(), cids[j].Hash()) > 0 - } - - return wi > wj - }) - - return nil -} - -func (s *SplitStore) getObjectWeight(c cid.Cid, weights map[string]int, key func(cid.Cid) string) int { - w, ok := weights[key(c)] - if ok { - return w - } - - // we treat block headers specially to avoid walking the entire chain - var hdr types.BlockHeader - err := s.view(c, func(data []byte) error { - return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) - }) - if err == nil { - w1 := s.getObjectWeight(hdr.ParentStateRoot, weights, key) - weights[key(hdr.ParentStateRoot)] = w1 - - w2 := s.getObjectWeight(hdr.Messages, weights, key) - weights[key(hdr.Messages)] = w2 - - return 1 + w1 + w2 - } - - var links []cid.Cid - err = s.view(c, func(data []byte) error { - return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { - links = append(links, c) - }) - }) - if err != nil { - return 1 - } - - w = 1 - for _, c := range links { - // these are internal refs, so dags will be dags - if c.Prefix().Codec != cid.DagCBOR { - w++ - continue - } - - wc := s.getObjectWeight(c, weights, key) - weights[key(c)] = wc - - w += wc - } - - return w -} - -func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error { - if len(cids) == 0 { - return nil - } - - // we don't delete one giant batch of millions of objects, but rather do smaller batches - // so that we don't stop the world for an extended period of time - done := false - for i := 0; !done; i++ { - start := i * batchSize - end := start + batchSize - if end >= len(cids) { - end = len(cids) - done = true - } - - err := deleteBatch(cids[start:end]) - if err != nil { - return xerrors.Errorf("error deleting batch: %w", err) - } - } - - return nil -} - -func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error { +func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet MarkSet) error { + batch := make([]cid.Cid, 0, batchSize) deadCids := make([]cid.Cid, 0, batchSize) + var purgeCnt, liveCnt int defer func() { log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt) }() - return s.purgeBatch(cids, - func(cids []cid.Cid) error { - deadCids := deadCids[:0] + deleteBatch := func() error { + pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet) - for { - if err := s.checkClosing(); err != nil { - return err - } + purgeCnt += pc + liveCnt += lc + batch = batch[:0] - s.txnLk.Lock() - if len(s.txnRefs) == 0 { - // keep the lock! - break - } + return err + } - // unlock and protect - s.txnLk.Unlock() + err := coldr.ForEach(func(c cid.Cid) error { + batch = append(batch, c) + if len(batch) == batchSize { + return deleteBatch() + } - err := s.protectTxnRefs(markSet) - if err != nil { - return xerrors.Errorf("error protecting transactional refs: %w", err) - } + return nil + }) + + if err != nil { + return err + } + + if len(batch) > 0 { + return deleteBatch() + } + + return nil +} + +func (s *SplitStore) purgeBatch(batch, deadCids []cid.Cid, checkpoint *Checkpoint, markSet MarkSet) (purgeCnt int, liveCnt int, err error) { + if err := s.checkClosing(); err != nil { + return 0, 0, err + } + + s.txnLk.Lock() + defer s.txnLk.Unlock() + + for _, c := range batch { + has, err := markSet.Has(c) + if err != nil { + return 0, 0, xerrors.Errorf("error checking markset for liveness: %w", err) + } + + if has { + liveCnt++ + continue + } + + deadCids = append(deadCids, c) + } + + if len(deadCids) == 0 { + if err := checkpoint.Set(batch[len(batch)-1]); err != nil { + return 0, 0, xerrors.Errorf("error setting checkpoint: %w", err) + } + + return 0, liveCnt, nil + } + + if err := s.hot.DeleteMany(s.ctx, deadCids); err != nil { + return 0, liveCnt, xerrors.Errorf("error purging cold objects: %w", err) + } + + s.debug.LogDelete(deadCids) + purgeCnt = len(deadCids) + + if err := checkpoint.Set(batch[len(batch)-1]); err != nil { + return purgeCnt, liveCnt, xerrors.Errorf("error setting checkpoint: %w", err) + } + + return purgeCnt, liveCnt, nil +} + +func (s *SplitStore) coldSetPath() string { + return filepath.Join(s.path, "coldset") +} + +func (s *SplitStore) checkpointPath() string { + return filepath.Join(s.path, "checkpoint") +} + +func (s *SplitStore) checkpointExists() bool { + _, err := os.Stat(s.checkpointPath()) + return err == nil +} + +func (s *SplitStore) completeCompaction() error { + checkpoint, last, err := OpenCheckpoint(s.checkpointPath()) + if err != nil { + return xerrors.Errorf("error opening checkpoint: %w", err) + } + defer checkpoint.Close() //nolint:errcheck + + coldr, err := NewColdSetReader(s.coldSetPath()) + if err != nil { + return xerrors.Errorf("error opening coldset: %w", err) + } + defer coldr.Close() //nolint:errcheck + + markSet, err := s.markSetEnv.Recover("live") + if err != nil { + return xerrors.Errorf("error recovering markset: %w", err) + } + defer markSet.Close() //nolint:errcheck + + // PURGE + log.Info("purging cold objects from the hotstore") + startPurge := time.Now() + err = s.completePurge(coldr, checkpoint, last, markSet) + if err != nil { + return xerrors.Errorf("error purging cold objects: %w", err) + } + log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) + + markSet.EndCriticalSection() + + if err := checkpoint.Close(); err != nil { + log.Warnf("error closing checkpoint: %s", err) + } + if err := os.Remove(s.checkpointPath()); err != nil { + log.Warnf("error removing checkpoint: %s", err) + } + if err := coldr.Close(); err != nil { + log.Warnf("error closing coldset: %s", err) + } + if err := os.Remove(s.coldSetPath()); err != nil { + log.Warnf("error removing coldset: %s", err) + } + + // Note: at this point we can start the splitstore; a compaction should run on + // the first head change, which will trigger gc on the hotstore. + // We don't mind the second (back-to-back) compaction as the head will + // have advanced during marking and coldset accumulation. + return nil +} + +func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint, start cid.Cid, markSet MarkSet) error { + if !start.Defined() { + return s.purge(coldr, checkpoint, markSet) + } + + seeking := true + batch := make([]cid.Cid, 0, batchSize) + deadCids := make([]cid.Cid, 0, batchSize) + + var purgeCnt, liveCnt int + defer func() { + log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt) + }() + + deleteBatch := func() error { + pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet) + + purgeCnt += pc + liveCnt += lc + batch = batch[:0] + + return err + } + + err := coldr.ForEach(func(c cid.Cid) error { + if seeking { + if start.Equals(c) { + seeking = false } - defer s.txnLk.Unlock() - - for _, c := range cids { - live, err := markSet.Has(c) - if err != nil { - return xerrors.Errorf("error checking for liveness: %w", err) - } - - if live { - liveCnt++ - continue - } - - deadCids = append(deadCids, c) - } - - err := s.hot.DeleteMany(s.ctx, deadCids) - if err != nil { - return xerrors.Errorf("error purging cold objects: %w", err) - } - - s.debug.LogDelete(deadCids) - - purgeCnt += len(deadCids) return nil - }) + } + + batch = append(batch, c) + if len(batch) == batchSize { + return deleteBatch() + } + + return nil + }) + + if err != nil { + return err + } + + if len(batch) > 0 { + return deleteBatch() + } + + return nil } // I really don't like having this code, but we seem to have some occasional DAG references with @@ -1077,7 +1361,7 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error { // have this gem[TM]. // My best guess is that they are parent message receipts or yet to be computed state roots; magik // thinks the cause may be block validation. -func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) { +func (s *SplitStore) waitForMissingRefs(markSet MarkSet) { s.txnLk.Lock() missing := s.txnMissing s.txnMissing = nil @@ -1106,7 +1390,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) { } towalk := missing - visitor := tmpVisitor() + visitor := newTmpVisitor() missing = make(map[cid.Cid]struct{}) for c := range towalk { diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index 7d84e0a4c..27d58bf10 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "io/ioutil" + "os" "sync" "sync/atomic" "testing" @@ -20,12 +22,14 @@ import ( datastore "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" logging "github.com/ipfs/go-log/v2" + mh "github.com/multiformats/go-multihash" ) func init() { CompactionThreshold = 5 CompactionBoundary = 2 WarmupBoundary = 0 + SyncWaitTime = time.Millisecond logging.SetLogLevel("splitstore", "DEBUG") } @@ -80,8 +84,17 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal(err) } + path, err := ioutil.TempDir("", "splitstore.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + // open the splitstore - ss, err := Open("", ds, hot, cold, cfg) + ss, err := Open(path, ds, hot, cold, cfg) if err != nil { t.Fatal(err) } @@ -125,6 +138,10 @@ func testSplitStore(t *testing.T, cfg *Config) { } waitForCompaction := func() { + ss.txnSyncMx.Lock() + ss.txnSync = true + ss.txnSyncCond.Broadcast() + ss.txnSyncMx.Unlock() for atomic.LoadInt32(&ss.compacting) == 1 { time.Sleep(100 * time.Millisecond) } @@ -259,8 +276,17 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { t.Fatal(err) } + path, err := ioutil.TempDir("", "splitstore.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + // open the splitstore - ss, err := Open("", ds, hot, cold, &Config{MarkSetType: "map"}) + ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"}) if err != nil { t.Fatal(err) } @@ -305,6 +331,10 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { } waitForCompaction := func() { + ss.txnSyncMx.Lock() + ss.txnSync = true + ss.txnSyncCond.Broadcast() + ss.txnSyncMx.Unlock() for atomic.LoadInt32(&ss.compacting) == 1 { time.Sleep(100 * time.Millisecond) } @@ -426,17 +456,25 @@ func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, app type mockStore struct { mx sync.Mutex - set map[cid.Cid]blocks.Block + set map[string]blocks.Block } func newMockStore() *mockStore { - return &mockStore{set: make(map[cid.Cid]blocks.Block)} + return &mockStore{set: make(map[string]blocks.Block)} +} + +func (b *mockStore) keyOf(c cid.Cid) string { + return string(c.Hash()) +} + +func (b *mockStore) cidOf(k string) cid.Cid { + return cid.NewCidV1(cid.Raw, mh.Multihash([]byte(k))) } func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) { b.mx.Lock() defer b.mx.Unlock() - _, ok := b.set[cid] + _, ok := b.set[b.keyOf(cid)] return ok, nil } @@ -446,7 +484,7 @@ func (b *mockStore) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { b.mx.Lock() defer b.mx.Unlock() - blk, ok := b.set[cid] + blk, ok := b.set[b.keyOf(cid)] if !ok { return nil, blockstore.ErrNotFound } @@ -474,7 +512,7 @@ func (b *mockStore) Put(_ context.Context, blk blocks.Block) error { b.mx.Lock() defer b.mx.Unlock() - b.set[blk.Cid()] = blk + b.set[b.keyOf(blk.Cid())] = blk return nil } @@ -483,7 +521,7 @@ func (b *mockStore) PutMany(_ context.Context, blks []blocks.Block) error { defer b.mx.Unlock() for _, blk := range blks { - b.set[blk.Cid()] = blk + b.set[b.keyOf(blk.Cid())] = blk } return nil } @@ -492,7 +530,7 @@ func (b *mockStore) DeleteBlock(_ context.Context, cid cid.Cid) error { b.mx.Lock() defer b.mx.Unlock() - delete(b.set, cid) + delete(b.set, b.keyOf(cid)) return nil } @@ -501,7 +539,7 @@ func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error { defer b.mx.Unlock() for _, c := range cids { - delete(b.set, c) + delete(b.set, b.keyOf(c)) } return nil } @@ -515,7 +553,7 @@ func (b *mockStore) ForEachKey(f func(cid.Cid) error) error { defer b.mx.Unlock() for c := range b.set { - err := f(c) + err := f(b.cidOf(c)) if err != nil { return err } diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go index 2a39f6c9d..b564f03c7 100644 --- a/blockstore/splitstore/splitstore_warmup.go +++ b/blockstore/splitstore/splitstore_warmup.go @@ -1,6 +1,7 @@ package splitstore import ( + "sync" "sync/atomic" "time" @@ -55,12 +56,13 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { if WarmupBoundary < epoch { boundaryEpoch = epoch - WarmupBoundary } + var mx sync.Mutex batchHot := make([]blocks.Block, 0, batchSize) - count := int64(0) - xcount := int64(0) - missing := int64(0) + count := new(int64) + xcount := new(int64) + missing := new(int64) - visitor, err := s.markSetEnv.CreateVisitor("warmup", 0) + visitor, err := s.markSetEnv.New("warmup", 0) if err != nil { return xerrors.Errorf("error creating visitor: %w", err) } @@ -73,7 +75,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { return errStopWalk } - count++ + atomic.AddInt64(count, 1) has, err := s.hot.Has(s.ctx, c) if err != nil { @@ -87,22 +89,25 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { blk, err := s.cold.Get(s.ctx, c) if err != nil { if err == bstore.ErrNotFound { - missing++ + atomic.AddInt64(missing, 1) return errStopWalk } return err } - xcount++ + atomic.AddInt64(xcount, 1) + mx.Lock() batchHot = append(batchHot, blk) if len(batchHot) == batchSize { err = s.hot.PutMany(s.ctx, batchHot) if err != nil { + mx.Unlock() return err } batchHot = batchHot[:0] } + mx.Unlock() return nil }) @@ -118,9 +123,9 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { } } - log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing) + log.Infow("warmup stats", "visited", *count, "warm", *xcount, "missing", *missing) - s.markSetSize = count + count>>2 // overestimate a bit + s.markSetSize = *count + *count>>2 // overestimate a bit err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize)) if err != nil { log.Warnf("error saving mark set size: %s", err) diff --git a/blockstore/splitstore/visitor.go b/blockstore/splitstore/visitor.go index f89c8f389..9dfbb78e7 100644 --- a/blockstore/splitstore/visitor.go +++ b/blockstore/splitstore/visitor.go @@ -1,6 +1,8 @@ package splitstore import ( + "sync" + cid "github.com/ipfs/go-cid" ) @@ -17,16 +19,34 @@ func (v *noopVisitor) Visit(_ cid.Cid) (bool, error) { return true, nil } -type cidSetVisitor struct { +type tmpVisitor struct { set *cid.Set } -var _ ObjectVisitor = (*cidSetVisitor)(nil) +var _ ObjectVisitor = (*tmpVisitor)(nil) -func (v *cidSetVisitor) Visit(c cid.Cid) (bool, error) { +func (v *tmpVisitor) Visit(c cid.Cid) (bool, error) { return v.set.Visit(c), nil } -func tmpVisitor() ObjectVisitor { - return &cidSetVisitor{set: cid.NewSet()} +func newTmpVisitor() ObjectVisitor { + return &tmpVisitor{set: cid.NewSet()} +} + +type concurrentVisitor struct { + mx sync.Mutex + set *cid.Set +} + +var _ ObjectVisitor = (*concurrentVisitor)(nil) + +func newConcurrentVisitor() *concurrentVisitor { + return &concurrentVisitor{set: cid.NewSet()} +} + +func (v *concurrentVisitor) Visit(c cid.Cid) (bool, error) { + v.mx.Lock() + defer v.mx.Unlock() + + return v.set.Visit(c), nil } diff --git a/build/bootstrap/butterflynet.pi b/build/bootstrap/butterflynet.pi index 1972adc5a..68bd4f74f 100644 --- a/build/bootstrap/butterflynet.pi +++ b/build/bootstrap/butterflynet.pi @@ -1,2 +1,2 @@ -/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBdRCBLUeKvoy22u5DcXs61adFn31v8WWCZgmBjDCjbsC -/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWDUQJBA18njjXnG9RtLxoN3muvdU7PEy55QorUEsdAqdy +/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWFHDtFx7CVTy4xoCDutVo1cScvSnQjDeaM8UzwVS1qwkh +/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWKt8cwpkiumkT8x32c3YFxsPRwhV5J8hCYPn9mhUmcAXt diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 55e96a3aa..b486a5123 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 2cb9ab35a..5e9c824f5 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index d6c6806ca..6eb21407b 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_2k.go b/build/params_2k.go index 0c31ce5ce..aa1beed36 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -90,6 +90,7 @@ func init() { UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight) UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight) UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight) + UpgradeOhSnapHeight = getUpgradeHeight("LOTUS_OHSNAP_HEIGHT", UpgradeOhSnapHeight) BuildType |= Build2k diff --git a/build/params_butterfly.go b/build/params_butterfly.go index 776a31714..804bdde93 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -42,8 +42,7 @@ const UpgradeTurboHeight = -15 const UpgradeHyperdriveHeight = -16 const UpgradeChocolateHeight = -17 -// 2022-01-17T19:00:00Z -const UpgradeOhSnapHeight = 30262 +const UpgradeOhSnapHeight = 240 func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30)) diff --git a/build/params_calibnet.go b/build/params_calibnet.go index 4da2269ee..a8f5b4720 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -54,7 +54,8 @@ const UpgradeHyperdriveHeight = 420 const UpgradeChocolateHeight = 312746 -const UpgradeOhSnapHeight = 99999999 +// 2022-02-10T19:23:00Z +const UpgradeOhSnapHeight = 682006 func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30)) diff --git a/build/proof-params/parameters.json b/build/proof-params/parameters.json index 1d4584454..88bb0bfa3 100644 --- a/build/proof-params/parameters.json +++ b/build/proof-params/parameters.json @@ -1,4 +1,54 @@ { + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.params": { + "cid": "Qma5WL6abSqYg9uUQAZ3EHS286bsNsha7oAGsJBD48Bq2q", + "digest": "c3ad7bb549470b82ad52ed070aebb4f4", + "sector_size": 536870912 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.vk": { + "cid": "QmUa7f9JtJMsqJJ3s3ZXk6WyF4xJLE8FiqYskZGgk8GCDv", + "digest": "994c5b7d450ca9da348c910689f2dc7f", + "sector_size": 536870912 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.params": { + "cid": "QmQiT4qBGodrVNEgVTDXxBNDdPbaD8Ag7Sx3ZTq1zHX79S", + "digest": "5aedd2cf3e5c0a15623d56a1b43110ad", + "sector_size": 8388608 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.vk": { + "cid": "QmdcpKUQvHM8RFRVKbk1yHfEqMcBzhtFWKRp9SNEmWq37i", + "digest": "abd80269054d391a734febdac0d2e687", + "sector_size": 8388608 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.params": { + "cid": "QmYM6Hg7mjmvA3ZHTsqkss1fkdyDju5dDmLiBZGJ5pz9y9", + "digest": "311f92a3e75036ced01b1c0025f1fa0c", + "sector_size": 2048 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.vk": { + "cid": "QmaQsTLL3nc5dw6wAvaioJSBfd1jhQrA2o6ucFf7XeV74P", + "digest": "eadad9784969890d30f2749708c79771", + "sector_size": 2048 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": { + "cid": "QmNPc75iEfcahCwNKdqnWLtxnjspUGGR4iscjiz3wP3RtS", + "digest": "1b3cfd761a961543f9eb273e435a06a2", + "sector_size": 34359738368 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": { + "cid": "QmdFFUe1gcz9MMHc6YW8aoV48w4ckvcERjt7PkydQAMfCN", + "digest": "3a6941983754737fde880d29c7094905", + "sector_size": 34359738368 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": { + "cid": "QmUB6xTVjzBQGuDNeyJMrrJ1byk58vhPm8eY2Lv9pgwanp", + "digest": "1a392e7b759fb18e036c7559b5ece816", + "sector_size": 68719476736 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": { + "cid": "Qmd794Jty7k26XJ8Eg4NDEks65Qk8G4GVfGkwqvymv8HAg", + "digest": "80e366df2f1011953c2d01c7b7c9ee8e", + "sector_size": 68719476736 + }, "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { "cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR", "digest": "7610b9f82bfc88405b7a832b651ce2f6", diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index 7dfd02233..118ab36e6 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -142,7 +142,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re go func() { start := build.Clock.Now() - log.Infow("start fetching randomness", "round", round) + log.Debugw("start fetching randomness", "round", round) resp, err := db.client.Get(ctx, round) var br beacon.Response @@ -152,7 +152,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re br.Entry.Round = resp.Round() br.Entry.Data = resp.Signature() } - log.Infow("done fetching randomness", "round", round, "took", build.Clock.Since(start)) + log.Debugw("done fetching randomness", "round", round, "took", build.Clock.Since(start)) out <- br close(out) }() diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index be7628b4f..0adb79191 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -182,7 +182,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) } } - return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) + return xerrors.Errorf("parent state root did not match computed state (%s != %s)", h.ParentStateRoot, stateroot) } if precp != h.ParentMessageReceipts { diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 5d1857bf2..76647e331 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -173,10 +173,17 @@ type MessagePool struct { sigValCache *lru.TwoQueueCache + nonceCache *lru.Cache + evtTypes [3]journal.EventType journal journal.Journal } +type nonceCacheKey struct { + tsk types.TipSetKey + addr address.Address +} + type msgSet struct { msgs map[uint64]*types.SignedMessage nextNonce uint64 @@ -361,6 +368,7 @@ func (ms *msgSet) toSlice() []*types.SignedMessage { func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { cache, _ := lru.New2Q(build.BlsSignatureCacheSize) verifcache, _ := lru.New2Q(build.VerifSigCacheSize) + noncecache, _ := lru.New(256) cfg, err := loadConfig(ctx, ds) if err != nil { @@ -386,6 +394,7 @@ func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.Upgra pruneCooldown: make(chan struct{}, 1), blsSigCache: cache, sigValCache: verifcache, + nonceCache: noncecache, changes: lps.New(50), localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)), api: api, @@ -1016,11 +1025,23 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration) defer done() + nk := nonceCacheKey{ + tsk: ts.Key(), + addr: addr, + } + + n, ok := mp.nonceCache.Get(nk) + if ok { + return n.(uint64), nil + } + act, err := mp.api.GetActorAfter(addr, ts) if err != nil { return 0, err } + mp.nonceCache.Add(nk, act.Nonce) + return act.Nonce, nil } diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index 47cf56eb3..6bd60da34 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -1,3 +1,4 @@ +//stm: #unit package messagepool import ( @@ -206,6 +207,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { t.Helper() + //stm: @CHAIN_MEMPOOL_GET_NONCE_001 n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK) if err != nil { t.Fatal(err) @@ -366,8 +368,10 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) { tma.applyBlock(t, a) tsa := mock.TipSet(a) + //stm: @CHAIN_MEMPOOL_PENDING_001 _, _ = mp.Pending(context.TODO()) + //stm: @CHAIN_MEMPOOL_SELECT_001 selm, _ := mp.SelectMessages(context.Background(), tsa, 1) if len(selm) == 0 { t.Fatal("should have returned the rest of the messages") @@ -428,6 +432,7 @@ func TestRevertMessages(t *testing.T) { assertNonce(t, mp, sender, 4) + //stm: @CHAIN_MEMPOOL_PENDING_001 p, _ := mp.Pending(context.TODO()) fmt.Printf("%+v\n", p) if len(p) != 3 { @@ -486,6 +491,7 @@ func TestPruningSimple(t *testing.T) { mp.Prune() + //stm: @CHAIN_MEMPOOL_PENDING_001 msgs, _ := mp.Pending(context.TODO()) if len(msgs) != 5 { t.Fatal("expected only 5 messages in pool, got: ", len(msgs)) @@ -528,6 +534,7 @@ func TestLoadLocal(t *testing.T) { msgs := make(map[cid.Cid]struct{}) for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + //stm: @CHAIN_MEMPOOL_PUSH_001 cid, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) @@ -544,6 +551,7 @@ func TestLoadLocal(t *testing.T) { t.Fatal(err) } + //stm: @CHAIN_MEMPOOL_PENDING_001 pmsgs, _ := mp.Pending(context.TODO()) if len(msgs) != len(pmsgs) { t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs)) @@ -599,6 +607,7 @@ func TestClearAll(t *testing.T) { gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + //stm: @CHAIN_MEMPOOL_PUSH_001 _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) @@ -610,8 +619,10 @@ func TestClearAll(t *testing.T) { mustAdd(t, mp, m) } + //stm: @CHAIN_MEMPOOL_CLEAR_001 mp.Clear(context.Background(), true) + //stm: @CHAIN_MEMPOOL_PENDING_001 pending, _ := mp.Pending(context.TODO()) if len(pending) > 0 { t.Fatalf("cleared the mpool, but got %d pending messages", len(pending)) @@ -654,6 +665,7 @@ func TestClearNonLocal(t *testing.T) { gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + //stm: @CHAIN_MEMPOOL_PUSH_001 _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) @@ -665,8 +677,10 @@ func TestClearNonLocal(t *testing.T) { mustAdd(t, mp, m) } + //stm: @CHAIN_MEMPOOL_CLEAR_001 mp.Clear(context.Background(), false) + //stm: @CHAIN_MEMPOOL_PENDING_001 pending, _ := mp.Pending(context.TODO()) if len(pending) != 10 { t.Fatalf("expected 10 pending messages, but got %d instead", len(pending)) @@ -724,6 +738,7 @@ func TestUpdates(t *testing.T) { for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + //stm: @CHAIN_MEMPOOL_PUSH_001 _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go index 20d9af38b..00a09fc95 100644 --- a/chain/messagesigner/messagesigner_test.go +++ b/chain/messagesigner/messagesigner_test.go @@ -1,3 +1,4 @@ +//stm: #unit package messagesigner import ( @@ -60,6 +61,7 @@ func TestMessageSignerSignMessage(t *testing.T) { to2, err := w.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) + //stm: @CHAIN_MESSAGE_SIGNER_NEW_SIGNER_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_005 type msgSpec struct { msg *types.Message mpoolNonce [1]uint64 diff --git a/chain/rand/rand_test.go b/chain/rand/rand_test.go index 5e5dae3f1..b5e2482b7 100644 --- a/chain/rand/rand_test.go +++ b/chain/rand/rand_test.go @@ -1,3 +1,4 @@ +//stm:#unit package rand_test import ( @@ -55,11 +56,13 @@ func TestNullRandomnessV1(t *testing.T) { randEpoch := ts.TipSet.TipSet().Height() - 2 + //stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V1_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_02 rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key()) if err != nil { t.Fatal(err) } + //stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01 bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(beforeNullHeight)+offset) select { @@ -68,6 +71,7 @@ func TestNullRandomnessV1(t *testing.T) { t.Fatal(resp.Err) } + //stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01 rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy) if err != nil { t.Fatal(err) @@ -131,11 +135,13 @@ func TestNullRandomnessV2(t *testing.T) { randEpoch := ts.TipSet.TipSet().Height() - 2 + //stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V2_01 rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key()) if err != nil { t.Fatal(err) } + //stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01 bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(ts.TipSet.TipSet().Height())+offset) select { @@ -144,6 +150,7 @@ func TestNullRandomnessV2(t *testing.T) { t.Fatal(resp.Err) } + //stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_03 // note that the randEpoch passed to DrawRandomness is still randEpoch (not the latest ts height) rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy) if err != nil { @@ -212,11 +219,13 @@ func TestNullRandomnessV3(t *testing.T) { randEpoch := ts.TipSet.TipSet().Height() - 2 + //stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V3_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01 rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key()) if err != nil { t.Fatal(err) } + //stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01 bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(randEpoch)+offset) select { @@ -225,6 +234,7 @@ func TestNullRandomnessV3(t *testing.T) { t.Fatal(resp.Err) } + //stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01 rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy) if err != nil { t.Fatal(err) diff --git a/chain/sub/incoming_test.go b/chain/sub/incoming_test.go index 215439209..1a3ab2785 100644 --- a/chain/sub/incoming_test.go +++ b/chain/sub/incoming_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sub import ( @@ -49,6 +50,7 @@ func TestFetchCidsWithDedup(t *testing.T) { } g := &getter{msgs} + //stm: @CHAIN_INCOMING_FETCH_MESSAGES_BY_CID_001 // the cids have a duplicate res, err := FetchMessagesByCids(context.TODO(), g, append(cids, cids[0])) diff --git a/chain/sync_test.go b/chain/sync_test.go index ae22b251c..35566169f 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -1,3 +1,4 @@ +//stm: #unit package chain_test import ( @@ -462,6 +463,8 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) { } func TestSyncSimple(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 50 tu := prepSyncTest(t, H) @@ -478,6 +481,8 @@ func TestSyncSimple(t *testing.T) { } func TestSyncMining(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 50 tu := prepSyncTest(t, H) @@ -500,6 +505,8 @@ func TestSyncMining(t *testing.T) { } func TestSyncBadTimestamp(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 50 tu := prepSyncTest(t, H) @@ -554,6 +561,8 @@ func (wpp badWpp) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi } func TestSyncBadWinningPoSt(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 15 tu := prepSyncTest(t, H) @@ -583,6 +592,9 @@ func (tu *syncTestUtil) loadChainToNode(to int) { } func TestSyncFork(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -650,6 +662,9 @@ func TestSyncFork(t *testing.T) { // A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X). // We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected. func TestDuplicateNonce(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -704,6 +719,7 @@ func TestDuplicateNonce(t *testing.T) { var includedMsg cid.Cid var skippedMsg cid.Cid + //stm: @CHAIN_STATE_SEARCH_MSG_001 r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true) r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true) @@ -745,6 +761,9 @@ func TestDuplicateNonce(t *testing.T) { // This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't // be applied on the parent state. func TestBadNonce(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -792,6 +811,9 @@ func TestBadNonce(t *testing.T) { // One of the messages uses the sender's robust address, the other uses the ID address. // Such a block is invalid and should not sync. func TestMismatchedNoncesRobustID(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 v5h := abi.ChainEpoch(4) tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) @@ -804,6 +826,7 @@ func TestMismatchedNoncesRobustID(t *testing.T) { require.NoError(t, err) // Produce a message from the banker + //stm: @CHAIN_STATE_LOOKUP_ID_001 makeMsg := func(id bool) *types.SignedMessage { sender := tu.g.Banker() if id { @@ -846,6 +869,9 @@ func TestMismatchedNoncesRobustID(t *testing.T) { // One of the messages uses the sender's robust address, the other uses the ID address. // Such a block is valid and should sync. func TestMatchedNoncesRobustID(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 v5h := abi.ChainEpoch(4) tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) @@ -858,6 +884,7 @@ func TestMatchedNoncesRobustID(t *testing.T) { require.NoError(t, err) // Produce a message from the banker with specified nonce + //stm: @CHAIN_STATE_LOOKUP_ID_001 makeMsg := func(n uint64, id bool) *types.SignedMessage { sender := tu.g.Banker() if id { @@ -917,6 +944,8 @@ func runSyncBenchLength(b *testing.B, l int) { } func TestSyncInputs(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_VALIDATE_BLOCK_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -944,6 +973,9 @@ func TestSyncInputs(t *testing.T) { } func TestSyncCheckpointHead(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -963,6 +995,7 @@ func TestSyncCheckpointHead(t *testing.T) { a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) tu.waitUntilSyncTarget(p1, a.TipSet()) + //stm: @CHAIN_SYNCER_CHECKPOINT_001 tu.checkpointTs(p1, a.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) @@ -982,15 +1015,20 @@ func TestSyncCheckpointHead(t *testing.T) { tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) p1Head := tu.getHead(p1) require.True(tu.t, p1Head.Equals(a.TipSet())) + //stm: @CHAIN_SYNCER_CHECK_BAD_001 tu.assertBad(p1, b.TipSet()) // Should be able to switch forks. + //stm: @CHAIN_SYNCER_CHECKPOINT_001 tu.checkpointTs(p1, b.TipSet().Key()) p1Head = tu.getHead(p1) require.True(tu.t, p1Head.Equals(b.TipSet())) } func TestSyncCheckpointEarlierThanHead(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -1010,6 +1048,7 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) tu.waitUntilSyncTarget(p1, a.TipSet()) + //stm: @CHAIN_SYNCER_CHECKPOINT_001 tu.checkpointTs(p1, a1.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) @@ -1029,15 +1068,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) p1Head := tu.getHead(p1) require.True(tu.t, p1Head.Equals(a.TipSet())) + //stm: @CHAIN_SYNCER_CHECK_BAD_001 tu.assertBad(p1, b.TipSet()) // Should be able to switch forks. + //stm: @CHAIN_SYNCER_CHECKPOINT_001 tu.checkpointTs(p1, b.TipSet().Key()) p1Head = tu.getHead(p1) require.True(tu.t, p1Head.Equals(b.TipSet())) } func TestInvalidHeight(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 50 tu := prepSyncTest(t, H) diff --git a/cmd/lotus-miner/info.go b/cmd/lotus-miner/info.go index 39de942aa..1133908ca 100644 --- a/cmd/lotus-miner/info.go +++ b/cmd/lotus-miner/info.go @@ -466,6 +466,7 @@ var stateOrder = map[sealing.SectorState]stateMeta{} var stateList = []stateMeta{ {col: 39, state: "Total"}, {col: color.FgGreen, state: sealing.Proving}, + {col: color.FgGreen, state: sealing.UpdateActivating}, {col: color.FgBlue, state: sealing.Empty}, {col: color.FgBlue, state: sealing.WaitDeals}, @@ -496,6 +497,7 @@ var stateList = []stateMeta{ {col: color.FgYellow, state: sealing.SubmitReplicaUpdate}, {col: color.FgYellow, state: sealing.ReplicaUpdateWait}, {col: color.FgYellow, state: sealing.FinalizeReplicaUpdate}, + {col: color.FgYellow, state: sealing.ReleaseSectorKey}, {col: color.FgCyan, state: sealing.Terminating}, {col: color.FgCyan, state: sealing.TerminateWait}, @@ -524,6 +526,7 @@ var stateList = []stateMeta{ {col: color.FgRed, state: sealing.SnapDealsAddPieceFailed}, {col: color.FgRed, state: sealing.SnapDealsDealsExpired}, {col: color.FgRed, state: sealing.ReplicaUpdateFailed}, + {col: color.FgRed, state: sealing.ReleaseSectorKeyFailed}, } func init() { diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index cc5668334..c779f5a8b 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -55,6 +55,7 @@ var sectorsCmd = &cli.Command{ sectorsTerminateCmd, sectorsRemoveCmd, sectorsSnapUpCmd, + sectorsSnapAbortCmd, sectorsMarkForUpgradeCmd, sectorsStartSealCmd, sectorsSealDelayCmd, @@ -1520,6 +1521,31 @@ var sectorsSnapUpCmd = &cli.Command{ }, } +var sectorsSnapAbortCmd = &cli.Command{ + Name: "abort-upgrade", + Usage: "Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before", + ArgsUsage: "", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number")) + } + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) + if err != nil { + return xerrors.Errorf("could not parse sector number: %w", err) + } + + return nodeApi.SectorAbortUpgrade(ctx, abi.SectorNumber(id)) + }, +} + var sectorsMarkForUpgradeCmd = &cli.Command{ Name: "mark-for-upgrade", Usage: "Mark a committed capacity sector for replacement by a sector with deals", diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index 2e326e9c7..84ff1ccdd 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -261,7 +261,7 @@ var runCmd = &cli.Command{ var taskTypes []sealtasks.TaskType - taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize) + taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFinalizeReplicaUpdate) if cctx.Bool("addpiece") { taskTypes = append(taskTypes, sealtasks.TTAddPiece) diff --git a/cmd/lotus-shed/storage-stats.go b/cmd/lotus-shed/storage-stats.go index a9a5744a6..b4e5991fd 100644 --- a/cmd/lotus-shed/storage-stats.go +++ b/cmd/lotus-shed/storage-stats.go @@ -4,10 +4,12 @@ import ( "encoding/json" corebig "math/big" "os" + "strconv" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" filbig "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" @@ -22,29 +24,50 @@ type networkTotalsOutput struct { Payload networkTotals `json:"payload"` } -type networkTotals struct { - QaNetworkPower filbig.Int `json:"total_qa_power"` - RawNetworkPower filbig.Int `json:"total_raw_capacity"` - CapacityCarryingData float64 `json:"capacity_fraction_carrying_data"` - UniqueCids int `json:"total_unique_cids"` - UniqueProviders int `json:"total_unique_providers"` - UniqueClients int `json:"total_unique_clients"` +type providerMeta struct { + nonidentifiable bool +} + +// force formatting as decimal to aid human readers +type humanFloat float64 + +func (f humanFloat) MarshalJSON() ([]byte, error) { + // 'f' uses decimal digits without exponents. + // The bit size of 32 ensures we don't use too many decimal places. + return []byte(strconv.FormatFloat(float64(f), 'f', -1, 32)), nil +} + +type Totals struct { TotalDeals int `json:"total_num_deals"` TotalBytes int64 `json:"total_stored_data_size"` - FilplusTotalDeals int `json:"filplus_total_num_deals"` - FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` + PrivateTotalDeals int `json:"private_total_num_deals"` + PrivateTotalBytes int64 `json:"private_total_stored_data_size"` + CapacityCarryingData humanFloat `json:"capacity_fraction_carrying_data"` +} - seenClient map[address.Address]bool - seenProvider map[address.Address]bool - seenPieceCid map[cid.Cid]bool +type networkTotals struct { + QaNetworkPower filbig.Int `json:"total_qa_power"` + RawNetworkPower filbig.Int `json:"total_raw_capacity"` + UniqueCids int `json:"total_unique_cids"` + UniqueBytes int64 `json:"total_unique_data_size"` + UniqueClients int `json:"total_unique_clients"` + UniqueProviders int `json:"total_unique_providers"` + UniquePrivateProviders int `json:"total_unique_private_providers"` + Totals + FilPlus Totals `json:"filecoin_plus_subset"` + + pieces map[cid.Cid]struct{} + clients map[address.Address]struct{} + providers map[address.Address]providerMeta } var storageStatsCmd = &cli.Command{ Name: "storage-stats", Usage: "Translates current lotus state into a json summary suitable for driving https://storage.filecoin.io/", Flags: []cli.Flag{ - &cli.Int64Flag{ - Name: "height", + &cli.StringFlag{ + Name: "tipset", + Usage: "Comma separated array of cids, or @height", }, }, Action: func(cctx *cli.Context) error { @@ -56,22 +79,24 @@ var storageStatsCmd = &cli.Command{ } defer apiCloser() - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - - requestedHeight := cctx.Int64("height") - if requestedHeight > 0 { - head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(requestedHeight), head.Key()) + var ts *types.TipSet + if cctx.String("tipset") == "" { + ts, err = api.ChainHead(ctx) + if err != nil { + return err + } + ts, err = api.ChainGetTipSetByHeight(ctx, ts.Height()-defaultEpochLookback, ts.Key()) + if err != nil { + return err + } } else { - head, err = api.ChainGetTipSetByHeight(ctx, head.Height()-defaultEpochLookback, head.Key()) - } - if err != nil { - return err + ts, err = lcli.ParseTipSetRef(ctx, api, cctx.String("tipset")) + if err != nil { + return err + } } - power, err := api.StateMinerPower(ctx, address.Address{}, head.Key()) + power, err := api.StateMinerPower(ctx, address.Address{}, ts.Key()) if err != nil { return err } @@ -79,12 +104,12 @@ var storageStatsCmd = &cli.Command{ netTotals := networkTotals{ QaNetworkPower: power.TotalPower.QualityAdjPower, RawNetworkPower: power.TotalPower.RawBytePower, - seenClient: make(map[address.Address]bool), - seenProvider: make(map[address.Address]bool), - seenPieceCid: make(map[cid.Cid]bool), + pieces: make(map[cid.Cid]struct{}), + clients: make(map[address.Address]struct{}), + providers: make(map[address.Address]providerMeta), } - deals, err := api.StateMarketDeals(ctx, head.Key()) + deals, err := api.StateMarketDeals(ctx, ts.Key()) if err != nil { return err } @@ -94,35 +119,76 @@ var storageStatsCmd = &cli.Command{ // Only count deals that have properly started, not past/future ones // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85 // Bail on 0 as well in case SectorStartEpoch is uninitialized due to some bug + // + // Additionally if the SlashEpoch is set this means the underlying sector is + // terminated for whatever reason ( not just slashed ), and the deal record + // will soon be removed from the state entirely if dealInfo.State.SectorStartEpoch <= 0 || - dealInfo.State.SectorStartEpoch > head.Height() { + dealInfo.State.SectorStartEpoch > ts.Height() || + dealInfo.State.SlashEpoch > -1 { continue } - netTotals.seenClient[dealInfo.Proposal.Client] = true + netTotals.clients[dealInfo.Proposal.Client] = struct{}{} + + if _, seen := netTotals.providers[dealInfo.Proposal.Provider]; !seen { + pm := providerMeta{} + + mi, err := api.StateMinerInfo(ctx, dealInfo.Proposal.Provider, ts.Key()) + if err != nil { + return err + } + + if mi.PeerId == nil || *mi.PeerId == "" { + log.Infof("private provider %s", dealInfo.Proposal.Provider) + pm.nonidentifiable = true + netTotals.UniquePrivateProviders++ + } + + netTotals.providers[dealInfo.Proposal.Provider] = pm + netTotals.UniqueProviders++ + } + + if _, seen := netTotals.pieces[dealInfo.Proposal.PieceCID]; !seen { + netTotals.pieces[dealInfo.Proposal.PieceCID] = struct{}{} + netTotals.UniqueBytes += int64(dealInfo.Proposal.PieceSize) + netTotals.UniqueCids++ + } + netTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize) - netTotals.seenProvider[dealInfo.Proposal.Provider] = true - netTotals.seenPieceCid[dealInfo.Proposal.PieceCID] = true netTotals.TotalDeals++ + if netTotals.providers[dealInfo.Proposal.Provider].nonidentifiable { + netTotals.PrivateTotalBytes += int64(dealInfo.Proposal.PieceSize) + netTotals.PrivateTotalDeals++ + } if dealInfo.Proposal.VerifiedDeal { - netTotals.FilplusTotalDeals++ - netTotals.FilplusTotalBytes += int64(dealInfo.Proposal.PieceSize) + netTotals.FilPlus.TotalBytes += int64(dealInfo.Proposal.PieceSize) + netTotals.FilPlus.TotalDeals++ + if netTotals.providers[dealInfo.Proposal.Provider].nonidentifiable { + netTotals.FilPlus.PrivateTotalBytes += int64(dealInfo.Proposal.PieceSize) + netTotals.FilPlus.PrivateTotalDeals++ + } } } - netTotals.UniqueCids = len(netTotals.seenPieceCid) - netTotals.UniqueClients = len(netTotals.seenClient) - netTotals.UniqueProviders = len(netTotals.seenProvider) + netTotals.UniqueClients = len(netTotals.clients) - netTotals.CapacityCarryingData, _ = new(corebig.Rat).SetFrac( + ccd, _ := new(corebig.Rat).SetFrac( corebig.NewInt(netTotals.TotalBytes), netTotals.RawNetworkPower.Int, ).Float64() + netTotals.CapacityCarryingData = humanFloat(ccd) + + ccdfp, _ := new(corebig.Rat).SetFrac( + corebig.NewInt(netTotals.FilPlus.TotalBytes), + netTotals.RawNetworkPower.Int, + ).Float64() + netTotals.FilPlus.CapacityCarryingData = humanFloat(ccdfp) return json.NewEncoder(os.Stdout).Encode( networkTotalsOutput{ - Epoch: int64(head.Height()), + Epoch: int64(ts.Height()), Endpoint: "NETWORK_WIDE_TOTALS", Payload: netTotals, }, diff --git a/cmd/tvx/codenames.go b/cmd/tvx/codenames.go index f8da07e8d..81143c85c 100644 --- a/cmd/tvx/codenames.go +++ b/cmd/tvx/codenames.go @@ -24,6 +24,15 @@ var ProtocolCodenames = []struct { {build.UpgradeTapeHeight + 1, "tape"}, {build.UpgradeLiftoffHeight + 1, "liftoff"}, {build.UpgradeKumquatHeight + 1, "postliftoff"}, + {build.UpgradeCalicoHeight + 1, "calico"}, + {build.UpgradePersianHeight + 1, "persian"}, + {build.UpgradeOrangeHeight + 1, "orange"}, + {build.UpgradeTrustHeight + 1, "trust"}, + {build.UpgradeNorwegianHeight + 1, "norwegian"}, + {build.UpgradeTurboHeight + 1, "turbo"}, + {build.UpgradeHyperdriveHeight + 1, "hyperdrive"}, + {build.UpgradeChocolateHeight + 1, "chocolate"}, + {build.UpgradeOhSnapHeight + 1, "ohsnap"}, } // GetProtocolCodename gets the protocol codename associated with a height. diff --git a/cmd/tvx/extract_message.go b/cmd/tvx/extract_message.go index 71035867f..68376654a 100644 --- a/cmd/tvx/extract_message.go +++ b/cmd/tvx/extract_message.go @@ -8,12 +8,11 @@ import ( "io" "log" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/fatih/color" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/actors/builtin" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" @@ -43,6 +42,15 @@ func doExtractMessage(opts extractOpts) error { return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err) } + // Assumes that the desired message isn't at the boundary of network versions. + // Otherwise this will be inaccurate. But it's such a tiny edge case that + // it's not worth spending the time to support boundary messages unless + // actually needed. + nv, err := FullAPI.StateNetworkVersion(ctx, incTs.Key()) + if err != nil { + return fmt.Errorf("failed to resolve network version from inclusion height: %w", err) + } + // get the circulating supply before the message was executed. circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key()) if err != nil { @@ -53,6 +61,7 @@ func doExtractMessage(opts extractOpts) error { log.Printf("message was executed in tipset: %s", execTs.Key()) log.Printf("message was included in tipset: %s", incTs.Key()) + log.Printf("network version at inclusion: %d", nv) log.Printf("circulating supply at inclusion tipset: %d", circSupply) log.Printf("finding precursor messages using mode: %s", opts.precursor) @@ -110,7 +119,8 @@ func doExtractMessage(opts extractOpts) error { CircSupply: circSupplyDetail.FilCirculating, BaseFee: basefee, // recorded randomness will be discarded. - Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), + Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), + NetworkVersion: nv, }) if err != nil { return fmt.Errorf("failed to execute precursor message: %w", err) @@ -140,12 +150,13 @@ func doExtractMessage(opts extractOpts) error { preroot = root applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ - Preroot: preroot, - Epoch: execTs.Height(), - Message: msg, - CircSupply: circSupplyDetail.FilCirculating, - BaseFee: basefee, - Rand: recordingRand, + Preroot: preroot, + Epoch: execTs.Height(), + Message: msg, + CircSupply: circSupplyDetail.FilCirculating, + BaseFee: basefee, + Rand: recordingRand, + NetworkVersion: nv, }) if err != nil { return fmt.Errorf("failed to execute message: %w", err) @@ -263,11 +274,6 @@ func doExtractMessage(opts extractOpts) error { return err } - nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key()) - if err != nil { - return err - } - codename := GetProtocolCodename(execTs.Height()) // Write out the test vector. diff --git a/cmd/tvx/simulate.go b/cmd/tvx/simulate.go index da9a034e9..5428e16ee 100644 --- a/cmd/tvx/simulate.go +++ b/cmd/tvx/simulate.go @@ -129,6 +129,7 @@ func runSimulateCmd(_ *cli.Context) error { CircSupply: circSupply.FilCirculating, BaseFee: baseFee, Rand: rand, + // TODO NetworkVersion }) if err != nil { return fmt.Errorf("failed to apply message: %w", err) diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 63891c3f2..7bacd0e5d 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -97,6 +97,7 @@ * [Return](#Return) * [ReturnAddPiece](#ReturnAddPiece) * [ReturnFetch](#ReturnFetch) + * [ReturnFinalizeReplicaUpdate](#ReturnFinalizeReplicaUpdate) * [ReturnFinalizeSector](#ReturnFinalizeSector) * [ReturnGenerateSectorKeyFromData](#ReturnGenerateSectorKeyFromData) * [ReturnMoveStorage](#ReturnMoveStorage) @@ -116,6 +117,7 @@ * [SealingAbort](#SealingAbort) * [SealingSchedDiag](#SealingSchedDiag) * [Sector](#Sector) + * [SectorAbortUpgrade](#SectorAbortUpgrade) * [SectorAddPieceToAny](#SectorAddPieceToAny) * [SectorCommitFlush](#SectorCommitFlush) * [SectorCommitPending](#SectorCommitPending) @@ -2054,6 +2056,30 @@ Response: `{}` ### ReturnFetch +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnFinalizeReplicaUpdate + + Perms: admin Inputs: @@ -2474,6 +2500,21 @@ Response: `{}` ## Sector +### SectorAbortUpgrade +SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it + + +Perms: admin + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + ### SectorAddPieceToAny Add piece to an open sector. If no sectors with enough space are open, either a new sector will be created, or this call will block until more diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md index 959265a4d..382d43b37 100644 --- a/documentation/en/api-v0-methods-worker.md +++ b/documentation/en/api-v0-methods-worker.md @@ -10,6 +10,7 @@ * [Add](#Add) * [AddPiece](#AddPiece) * [Finalize](#Finalize) + * [FinalizeReplicaUpdate](#FinalizeReplicaUpdate) * [FinalizeSector](#FinalizeSector) * [Generate](#Generate) * [GenerateSectorKeyFromData](#GenerateSectorKeyFromData) @@ -1112,6 +1113,41 @@ Response: ## Finalize +### FinalizeReplicaUpdate + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + [ + { + "Offset": 1024, + "Size": 1024 + } + ] +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + ### FinalizeSector diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 6543beab7..25eed6c84 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -1580,6 +1580,7 @@ COMMANDS: terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) snap-up Mark a committed capacity sector to be filled with deals + abort-upgrade Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals seal Manually start sealing a sector (filling any unused space with junk) set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts @@ -1815,6 +1816,19 @@ OPTIONS: ``` +### lotus-miner sectors abort-upgrade +``` +NAME: + lotus-miner sectors abort-upgrade - Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before + +USAGE: + lotus-miner sectors abort-upgrade [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + ### lotus-miner sectors mark-for-upgrade ``` NAME: diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 016e1290c..7ef8f6309 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -171,11 +171,11 @@ #HotStoreType = "badger" # MarkSetType specifies the type of the markset. - # It can be "map" (default) for in memory marking or "badger" for on-disk marking. + # It can be "map" for in memory marking or "badger" (default) for on-disk marking. # # type: string # env var: LOTUS_CHAINSTORE_SPLITSTORE_MARKSETTYPE - #MarkSetType = "map" + #MarkSetType = "badger" # HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond # the compaction boundary; default is 0. diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index 47ac9f1e7..d8c774c75 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -167,6 +167,14 @@ # env var: LOTUS_DEALMAKING_EXPECTEDSEALDURATION #ExpectedSealDuration = "24h0m0s" + # Whether new sectors are created to pack incoming deals + # When this is set to false no new sectors will be created for sealing incoming deals + # This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade + # + # type: bool + # env var: LOTUS_DEALMAKING_MAKENEWSECTORFORDEALS + #MakeNewSectorForDeals = true + # Maximum amount of time proposed deal StartEpoch can be in future # # type: Duration diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index e660df561..5ec5d805c 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit e660df5616e397b2d8ac316f45ddfa7a44637971 +Subproject commit 5ec5d805c01ea85224f6448dd6c6fa0a2a73c028 diff --git a/extern/sector-storage/faults.go b/extern/sector-storage/faults.go index 7fdf5c337..f7a764e50 100644 --- a/extern/sector-storage/faults.go +++ b/extern/sector-storage/faults.go @@ -55,6 +55,25 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, return nil } + // temporary hack to make the check work with snapdeals + // will go away in https://github.com/filecoin-project/lotus/pull/7971 + if lp.Sealed == "" || lp.Cache == "" { + // maybe it's update + lockedUpdate, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone) + if err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + if lockedUpdate { + lp, _, err = m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) + bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err) + return nil + } + lp.Sealed, lp.Cache = lp.Update, lp.UpdateCache + } + } + if lp.Sealed == "" || lp.Cache == "" { log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache) bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", lp.Cache, lp.Sealed) diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index 88ab50f05..5b7f2acc5 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -669,7 +669,7 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, pha func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { empty := storage.ReplicaUpdateOut{} - paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) if err != nil { return empty, xerrors.Errorf("failed to acquire sector paths: %w", err) } @@ -718,7 +718,7 @@ func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p } func (sb *Sealer) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdateCache|storiface.FTUpdate, storiface.FTNone, storiface.PathSealing) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("failed to acquire sector paths: %w", err) } @@ -769,7 +769,7 @@ func (sb *Sealer) ReleaseSealed(ctx context.Context, sector storage.SectorRef) e return xerrors.Errorf("not supported at this layer") } -func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { +func (sb *Sealer) freeUnsealed(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { ssize, err := sector.ProofType.SectorSize() if err != nil { return err @@ -834,6 +834,19 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, } + return nil +} + +func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return err + } + + if err := sb.freeUnsealed(ctx, sector, keepUnsealed); err != nil { + return err + } + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, 0, storiface.PathStorage) if err != nil { return xerrors.Errorf("acquiring sector cache path: %w", err) @@ -843,6 +856,43 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, return ffi.ClearCache(uint64(ssize), paths.Cache) } +func (sb *Sealer) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return err + } + + if err := sb.freeUnsealed(ctx, sector, keepUnsealed); err != nil { + return err + } + + { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, 0, storiface.PathStorage) + if err != nil { + return xerrors.Errorf("acquiring sector cache path: %w", err) + } + defer done() + + if err := ffi.ClearCache(uint64(ssize), paths.Cache); err != nil { + return xerrors.Errorf("clear cache: %w", err) + } + } + + { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdateCache, 0, storiface.PathStorage) + if err != nil { + return xerrors.Errorf("acquiring sector cache path: %w", err) + } + defer done() + + if err := ffi.ClearCache(uint64(ssize), paths.UpdateCache); err != nil { + return xerrors.Errorf("clear cache: %w", err) + } + } + + return nil +} + func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { // This call is meant to mark storage as 'freeable'. Given that unsealing is // very expensive, we don't remove data as soon as we can - instead we only diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index ecabf0398..fcbfa2e69 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -146,7 +146,7 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store go m.sched.runSched() localTasks := []sealtasks.TaskType{ - sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFetch, + sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFetch, sealtasks.TTFinalizeReplicaUpdate, } if sc.AllowAddPiece { localTasks = append(localTasks, sealtasks.TTAddPiece) @@ -577,6 +577,74 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef, return nil } +func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + fts := storiface.FTUnsealed + { + unsealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false) + if err != nil { + return xerrors.Errorf("finding unsealed sector: %w", err) + } + + if len(unsealedStores) == 0 { // Is some edge-cases unsealed sector may not exist already, that's fine + fts = storiface.FTNone + } + } + + pathType := storiface.PathStorage + { + sealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUpdate, 0, false) + if err != nil { + return xerrors.Errorf("finding sealed sector: %w", err) + } + + for _, store := range sealedStores { + if store.CanSeal { + pathType = storiface.PathSealing + break + } + } + } + + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache, false) + + err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalizeReplicaUpdate, selector, + m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache|fts, pathType, storiface.AcquireMove), + func(ctx context.Context, w Worker) error { + _, err := m.waitSimpleCall(ctx)(w.FinalizeReplicaUpdate(ctx, sector, keepUnsealed)) + return err + }) + if err != nil { + return err + } + + fetchSel := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathStorage) + moveUnsealed := fts + { + if len(keepUnsealed) == 0 { + moveUnsealed = storiface.FTNone + } + } + + err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel, + m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache|moveUnsealed, storiface.PathStorage, storiface.AcquireMove), + func(ctx context.Context, w Worker) error { + _, err := m.waitSimpleCall(ctx)(w.MoveStorage(ctx, sector, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache|moveUnsealed)) + return err + }) + if err != nil { + return xerrors.Errorf("moving sector to storage: %w", err) + } + + return nil +} + func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { return nil } @@ -715,14 +783,13 @@ func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p return out, waitErr } - if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil { return storage.ReplicaUpdateOut{}, xerrors.Errorf("acquiring sector lock: %w", err) } selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) - err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { - log.Errorf("scheduled work for replica update") + err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces)) if err != nil { return xerrors.Errorf("startWork: %w", err) @@ -768,9 +835,12 @@ func (m *Manager) ProveReplicaUpdate1(ctx context.Context, sector storage.Sector return nil, xerrors.Errorf("acquiring sector lock: %w", err) } - selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, true) + // NOTE: We set allowFetch to false in so that we always execute on a worker + // with direct access to the data. We want to do that because this step is + // generally very cheap / fast, and transferring data is not worth the effort + selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, false) - err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed)) if err != nil { @@ -873,6 +943,10 @@ func (m *Manager) ReturnProveReplicaUpdate2(ctx context.Context, callID storifac return m.returnResult(ctx, callID, proof, err) } +func (m *Manager) ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + return m.returnResult(ctx, callID, nil, err) +} + func (m *Manager) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return m.returnResult(ctx, callID, nil, err) } diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go index 04ab0c5ce..cc1f02a9a 100644 --- a/extern/sector-storage/manager_test.go +++ b/extern/sector-storage/manager_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sectorstorage import ( @@ -363,6 +364,7 @@ func TestRedoPC1(t *testing.T) { // Manager restarts in the middle of a task, restarts it, it completes func TestRestartManager(t *testing.T) { + //stm: @WORKER_JOBS_001 test := func(returnBeforeCall bool) func(*testing.T) { return func(t *testing.T) { logging.SetAllLoggers(logging.LevelDebug) @@ -507,6 +509,7 @@ func TestRestartWorker(t *testing.T) { <-arch require.NoError(t, w.Close()) + //stm: @WORKER_STATS_001 for { if len(m.WorkerStats()) == 0 { break @@ -569,6 +572,7 @@ func TestReenableWorker(t *testing.T) { // disable atomic.StoreInt64(&w.testDisable, 1) + //stm: @WORKER_STATS_001 for i := 0; i < 100; i++ { if !m.WorkerStats()[w.session].Enabled { break diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index c99af89e7..771265176 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -477,6 +477,10 @@ func (mgr *SectorMgr) FinalizeSector(context.Context, storage.SectorRef, []stora return nil } +func (mgr *SectorMgr) FinalizeReplicaUpdate(context.Context, storage.SectorRef, []storage.Range) error { + return nil +} + func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { return nil } @@ -577,6 +581,10 @@ func (mgr *SectorMgr) ReturnGenerateSectorKeyFromData(ctx context.Context, callI panic("not supported") } +func (mgr *SectorMgr) ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + panic("not supported") +} + func (m mockVerifProver) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { plen, err := svi.SealProof.ProofSize() if err != nil { diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index 29e791ba3..10d182973 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sectorstorage import ( @@ -118,6 +119,10 @@ func (s *schedTestWorker) GenerateSectorKeyFromData(ctx context.Context, sector panic("implement me") } +func (s *schedTestWorker) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { + panic("implement me") +} + func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { panic("implement me") } @@ -206,6 +211,7 @@ func TestSchedStartStop(t *testing.T) { } func TestSched(t *testing.T) { + //stm: @WORKER_JOBS_001 storiface.ParallelNum = 1 storiface.ParallelDenom = 1 diff --git a/extern/sector-storage/sealtasks/task.go b/extern/sector-storage/sealtasks/task.go index f6104878b..654ad25b1 100644 --- a/extern/sector-storage/sealtasks/task.go +++ b/extern/sector-storage/sealtasks/task.go @@ -6,7 +6,7 @@ const ( TTAddPiece TaskType = "seal/v0/addpiece" TTPreCommit1 TaskType = "seal/v0/precommit/1" TTPreCommit2 TaskType = "seal/v0/precommit/2" - TTCommit1 TaskType = "seal/v0/commit/1" // NOTE: We use this to transfer the sector into miner-local storage for now; Don't use on workers! + TTCommit1 TaskType = "seal/v0/commit/1" TTCommit2 TaskType = "seal/v0/commit/2" TTFinalize TaskType = "seal/v0/finalize" @@ -14,10 +14,11 @@ const ( TTFetch TaskType = "seal/v0/fetch" TTUnseal TaskType = "seal/v0/unseal" - TTReplicaUpdate TaskType = "seal/v0/replicaupdate" - TTProveReplicaUpdate1 TaskType = "seal/v0/provereplicaupdate/1" - TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2" - TTRegenSectorKey TaskType = "seal/v0/regensectorkey" + TTReplicaUpdate TaskType = "seal/v0/replicaupdate" + TTProveReplicaUpdate1 TaskType = "seal/v0/provereplicaupdate/1" + TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2" + TTRegenSectorKey TaskType = "seal/v0/regensectorkey" + TTFinalizeReplicaUpdate TaskType = "seal/v0/finalize/replicaupdate" ) var order = map[TaskType]int{ @@ -48,10 +49,11 @@ var shortNames = map[TaskType]string{ TTFetch: "GET", TTUnseal: "UNS", - TTReplicaUpdate: "RU", - TTProveReplicaUpdate1: "PR1", - TTProveReplicaUpdate2: "PR2", - TTRegenSectorKey: "GSK", + TTReplicaUpdate: "RU", + TTProveReplicaUpdate1: "PR1", + TTProveReplicaUpdate2: "PR2", + TTRegenSectorKey: "GSK", + TTFinalizeReplicaUpdate: "FRU", } func (a TaskType) MuchLess(b TaskType) (bool, bool) { diff --git a/extern/sector-storage/stores/remote_test.go b/extern/sector-storage/stores/remote_test.go index 0bc439dee..a7a82a728 100644 --- a/extern/sector-storage/stores/remote_test.go +++ b/extern/sector-storage/stores/remote_test.go @@ -1,3 +1,4 @@ +//stm: #unit package stores_test import ( @@ -154,6 +155,7 @@ func TestMoveShared(t *testing.T) { } func TestReader(t *testing.T) { + //stm: @STORAGE_INFO_001 logging.SetAllLoggers(logging.LevelDebug) bz := []byte("Hello World") diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go index 8bb6a256a..eedbc8207 100644 --- a/extern/sector-storage/storiface/worker.go +++ b/extern/sector-storage/storiface/worker.go @@ -120,6 +120,7 @@ type WorkerCalls interface { SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error) + FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (CallID, error) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (CallID, error) @@ -182,6 +183,7 @@ type WorkerReturn interface { ReturnProveReplicaUpdate1(ctx context.Context, callID CallID, proofs storage.ReplicaVanillaProofs, err *CallError) error ReturnProveReplicaUpdate2(ctx context.Context, callID CallID, proof storage.ReplicaUpdateProof, err *CallError) error ReturnGenerateSectorKeyFromData(ctx context.Context, callID CallID, err *CallError) error + ReturnFinalizeReplicaUpdate(ctx context.Context, callID CallID, err *CallError) error ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error diff --git a/extern/sector-storage/teststorage_test.go b/extern/sector-storage/teststorage_test.go index cb15184be..6c6eef0a6 100644 --- a/extern/sector-storage/teststorage_test.go +++ b/extern/sector-storage/teststorage_test.go @@ -87,6 +87,10 @@ func (t *testExec) GenerateSectorKeyFromData(ctx context.Context, sector storage panic("implement me") } +func (t *testExec) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { + panic("implement me") +} + func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go index a5f5a0b9d..572d482ed 100644 --- a/extern/sector-storage/worker_local.go +++ b/extern/sector-storage/worker_local.go @@ -162,20 +162,21 @@ func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) { type ReturnType string const ( - AddPiece ReturnType = "AddPiece" - SealPreCommit1 ReturnType = "SealPreCommit1" - SealPreCommit2 ReturnType = "SealPreCommit2" - SealCommit1 ReturnType = "SealCommit1" - SealCommit2 ReturnType = "SealCommit2" - FinalizeSector ReturnType = "FinalizeSector" - ReplicaUpdate ReturnType = "ReplicaUpdate" - ProveReplicaUpdate1 ReturnType = "ProveReplicaUpdate1" - ProveReplicaUpdate2 ReturnType = "ProveReplicaUpdate2" - GenerateSectorKey ReturnType = "GenerateSectorKey" - ReleaseUnsealed ReturnType = "ReleaseUnsealed" - MoveStorage ReturnType = "MoveStorage" - UnsealPiece ReturnType = "UnsealPiece" - Fetch ReturnType = "Fetch" + AddPiece ReturnType = "AddPiece" + SealPreCommit1 ReturnType = "SealPreCommit1" + SealPreCommit2 ReturnType = "SealPreCommit2" + SealCommit1 ReturnType = "SealCommit1" + SealCommit2 ReturnType = "SealCommit2" + FinalizeSector ReturnType = "FinalizeSector" + FinalizeReplicaUpdate ReturnType = "FinalizeReplicaUpdate" + ReplicaUpdate ReturnType = "ReplicaUpdate" + ProveReplicaUpdate1 ReturnType = "ProveReplicaUpdate1" + ProveReplicaUpdate2 ReturnType = "ProveReplicaUpdate2" + GenerateSectorKey ReturnType = "GenerateSectorKey" + ReleaseUnsealed ReturnType = "ReleaseUnsealed" + MoveStorage ReturnType = "MoveStorage" + UnsealPiece ReturnType = "UnsealPiece" + Fetch ReturnType = "Fetch" ) // in: func(WorkerReturn, context.Context, CallID, err string) @@ -213,20 +214,21 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor } var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{ - AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), - SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), - SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), - SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), - SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), - FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), - ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), - ReplicaUpdate: rfunc(storiface.WorkerReturn.ReturnReplicaUpdate), - ProveReplicaUpdate1: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate1), - ProveReplicaUpdate2: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate2), - GenerateSectorKey: rfunc(storiface.WorkerReturn.ReturnGenerateSectorKeyFromData), - MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), - UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), - Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), + AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), + SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), + SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), + SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), + SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), + FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), + ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), + ReplicaUpdate: rfunc(storiface.WorkerReturn.ReturnReplicaUpdate), + ProveReplicaUpdate1: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate1), + ProveReplicaUpdate2: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate2), + GenerateSectorKey: rfunc(storiface.WorkerReturn.ReturnGenerateSectorKeyFromData), + FinalizeReplicaUpdate: rfunc(storiface.WorkerReturn.ReturnFinalizeReplicaUpdate), + MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), + UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), + Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), } func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) { @@ -456,6 +458,27 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorR }) } +func (l *LocalWorker) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, FinalizeReplicaUpdate, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + if err := sb.FinalizeReplicaUpdate(ctx, sector, keepUnsealed); err != nil { + return nil, xerrors.Errorf("finalizing sector: %w", err) + } + + if len(keepUnsealed) == 0 { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true, nil); err != nil { + return nil, xerrors.Errorf("removing unsealed data: %w", err) + } + } + + return nil, err + }) +} + func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) { return storiface.UndefCall, xerrors.Errorf("implement me") } diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go index a1c647422..91da0fee5 100644 --- a/extern/sector-storage/worker_tracked.go +++ b/extern/sector-storage/worker_tracked.go @@ -215,4 +215,8 @@ func (t *trackedWorker) ProveReplicaUpdate2(ctx context.Context, sector storage. }) } +func (t *trackedWorker) FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTFinalizeReplicaUpdate, func() (storiface.CallID, error) { return t.Worker.FinalizeReplicaUpdate(ctx, sector, keepUnsealed) }) +} + var _ Worker = &trackedWorker{} diff --git a/extern/storage-sealing/checks.go b/extern/storage-sealing/checks.go index 3525c84a7..56b0677c4 100644 --- a/extern/storage-sealing/checks.go +++ b/extern/storage-sealing/checks.go @@ -20,6 +20,7 @@ import ( // We should implement some wait-for-api logic type ErrApi struct{ error } +type ErrNoDeals struct{ error } type ErrInvalidDeals struct{ error } type ErrInvalidPiece struct{ error } type ErrExpiredDeals struct{ error } @@ -38,12 +39,14 @@ type ErrCommitWaitFailed struct{ error } type ErrBadRU struct{ error } type ErrBadPR struct{ error } -func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error { +func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI, mustHaveDeals bool) error { tok, height, err := api.ChainHead(ctx) if err != nil { return &ErrApi{xerrors.Errorf("getting chain head: %w", err)} } + dealCount := 0 + for i, p := range si.Pieces { // if no deal is associated with the piece, ensure that we added it as // filler (i.e. ensure that it has a zero PieceCID) @@ -55,6 +58,8 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api continue } + dealCount++ + proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok) if err != nil { return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)} @@ -77,13 +82,17 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api } } + if mustHaveDeals && dealCount <= 0 { + return &ErrNoDeals{(xerrors.Errorf("sector %d must have deals, but does not", si.SectorNumber))} + } + return nil } // checkPrecommit checks that data commitment generated in the sealing process // matches pieces, and that the seal ticket isn't expired func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, height abi.ChainEpoch, api SealingAPI) (err error) { - if err := checkPieces(ctx, maddr, si, api); err != nil { + if err := checkPieces(ctx, maddr, si, api, false); err != nil { return err } @@ -184,7 +193,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, return &ErrInvalidProof{xerrors.New("invalid proof (compute error?)")} } - if err := checkPieces(ctx, m.maddr, si, m.Api); err != nil { + if err := checkPieces(ctx, m.maddr, si, m.Api, false); err != nil { return err } @@ -194,7 +203,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, // check that sector info is good after running a replica update func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, api SealingAPI) error { - if err := checkPieces(ctx, maddr, si, api); err != nil { + if err := checkPieces(ctx, maddr, si, api, true); err != nil { return err } if !si.CCUpdate { diff --git a/extern/storage-sealing/commit_batch_test.go b/extern/storage-sealing/commit_batch_test.go index e03c34693..3bda6d3fd 100644 --- a/extern/storage-sealing/commit_batch_test.go +++ b/extern/storage-sealing/commit_batch_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sealing_test import ( @@ -28,6 +29,7 @@ import ( ) func TestCommitBatcher(t *testing.T) { + //stm: @CHAIN_STATE_MINER_PRE_COM_INFO_001, @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 t0123, err := address.NewFromString("t0123") require.NoError(t, err) @@ -147,6 +149,7 @@ func TestCommitBatcher(t *testing.T) { } } + //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001, @CHAIN_STATE_MINER_GET_COLLATERAL_001 expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil) diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index 83874e907..251d3a74a 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -137,27 +137,32 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto SnapDealsWaitDeals: planOne( on(SectorAddPiece{}, SnapDealsAddPiece), on(SectorStartPacking{}, SnapDealsPacking), + on(SectorAbortUpgrade{}, AbortUpgrade), ), SnapDealsAddPiece: planOne( on(SectorPieceAdded{}, SnapDealsWaitDeals), apply(SectorStartPacking{}), apply(SectorAddPiece{}), on(SectorAddPieceFailed{}, SnapDealsAddPieceFailed), + on(SectorAbortUpgrade{}, AbortUpgrade), ), SnapDealsPacking: planOne( on(SectorPacked{}, UpdateReplica), + on(SectorAbortUpgrade{}, AbortUpgrade), ), UpdateReplica: planOne( on(SectorReplicaUpdate{}, ProveReplicaUpdate), on(SectorUpdateReplicaFailed{}, ReplicaUpdateFailed), on(SectorDealsExpired{}, SnapDealsDealsExpired), on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), + on(SectorAbortUpgrade{}, AbortUpgrade), ), ProveReplicaUpdate: planOne( on(SectorProveReplicaUpdate{}, SubmitReplicaUpdate), on(SectorProveReplicaUpdateFailed{}, ReplicaUpdateFailed), on(SectorDealsExpired{}, SnapDealsDealsExpired), on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), + on(SectorAbortUpgrade{}, AbortUpgrade), ), SubmitReplicaUpdate: planOne( on(SectorReplicaUpdateSubmitted{}, ReplicaUpdateWait), @@ -169,7 +174,14 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorAbortUpgrade{}, AbortUpgrade), ), FinalizeReplicaUpdate: planOne( - on(SectorFinalized{}, Proving), + on(SectorFinalized{}, UpdateActivating), + ), + UpdateActivating: planOne( + on(SectorUpdateActive{}, ReleaseSectorKey), + ), + ReleaseSectorKey: planOne( + on(SectorKeyReleased{}, Proving), + on(SectorReleaseKeyFailed{}, ReleaseSectorKeyFailed), ), // Sealing errors @@ -231,6 +243,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorRetryWaitDeals{}, SnapDealsWaitDeals), apply(SectorStartPacking{}), apply(SectorAddPiece{}), + on(SectorAbortUpgrade{}, AbortUpgrade), ), SnapDealsDealsExpired: planOne( on(SectorAbortUpgrade{}, AbortUpgrade), @@ -249,6 +262,10 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorRetryProveReplicaUpdate{}, ProveReplicaUpdate), on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), on(SectorDealsExpired{}, SnapDealsDealsExpired), + on(SectorAbortUpgrade{}, AbortUpgrade), + ), + ReleaseSectorKeyFailed: planOne( + on(SectorUpdateActive{}, ReleaseSectorKey), ), // Post-seal @@ -477,6 +494,10 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handleReplicaUpdateWait, processed, nil case FinalizeReplicaUpdate: return m.handleFinalizeReplicaUpdate, processed, nil + case UpdateActivating: + return m.handleUpdateActivating, processed, nil + case ReleaseSectorKey: + return m.handleReleaseSectorKey, processed, nil // Handled failure modes case AddPieceFailed: @@ -513,6 +534,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handleSnapDealsRecoverDealIDs, processed, nil case ReplicaUpdateFailed: return m.handleSubmitReplicaUpdateFailed, processed, nil + case ReleaseSectorKeyFailed: + return m.handleReleaseSectorKeyFailed, 0, err case AbortUpgrade: return m.handleAbortUpgrade, processed, nil diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index 395c4b94a..fc3b774f9 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -335,6 +335,14 @@ type SectorReplicaUpdateLanded struct{} func (evt SectorReplicaUpdateLanded) apply(state *SectorInfo) {} +type SectorUpdateActive struct{} + +func (evt SectorUpdateActive) apply(state *SectorInfo) {} + +type SectorKeyReleased struct{} + +func (evt SectorKeyReleased) apply(state *SectorInfo) {} + // Failed state recovery type SectorRetrySealPreCommit1 struct{} @@ -445,6 +453,13 @@ type SectorSubmitReplicaUpdateFailed struct{} func (evt SectorSubmitReplicaUpdateFailed) apply(state *SectorInfo) {} +type SectorReleaseKeyFailed struct{ error } + +func (evt SectorReleaseKeyFailed) FormatError(xerrors.Printer) (next error) { + return evt.error +} +func (evt SectorReleaseKeyFailed) apply(state *SectorInfo) {} + // Faults type SectorFaulty struct{} diff --git a/extern/storage-sealing/input.go b/extern/storage-sealing/input.go index f3259f0cc..13dc40f0f 100644 --- a/extern/storage-sealing/input.go +++ b/extern/storage-sealing/input.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" @@ -117,9 +118,25 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, return false, xerrors.Errorf("getting storage config: %w", err) } - // todo check deal age, start sealing if any deal has less than X (configurable) to start deadline sealTime := time.Unix(sector.CreationTime, 0).Add(cfg.WaitDealsDelay) + // check deal age, start sealing when the deal closest to starting is within slack time + _, current, err := m.Api.ChainHead(ctx.Context()) + blockTime := time.Second * time.Duration(build.BlockDelaySecs) + if err != nil { + return false, xerrors.Errorf("API error getting head: %w", err) + } + for _, piece := range sector.Pieces { + if piece.DealInfo == nil { + continue + } + dealSafeSealEpoch := piece.DealInfo.DealProposal.StartEpoch - cfg.StartEpochSealingBuffer + dealSafeSealTime := time.Now().Add(time.Duration(dealSafeSealEpoch-current) * blockTime) + if dealSafeSealTime.Before(sealTime) { + sealTime = dealSafeSealTime + } + } + if now.After(sealTime) { log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout") return true, ctx.Send(SectorStartPacking{}) @@ -475,6 +492,10 @@ func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSeal return xerrors.Errorf("getting storage config: %w", err) } + if !cfg.MakeNewSectorForDeals { + return nil + } + if cfg.MaxSealingSectorsForDeals > 0 && m.stats.curSealing() >= cfg.MaxSealingSectorsForDeals { return nil } @@ -524,6 +545,13 @@ func (m *Sealing) StartPacking(sid abi.SectorNumber) error { return m.sectors.Send(uint64(sid), SectorStartPacking{}) } +func (m *Sealing) AbortUpgrade(sid abi.SectorNumber) error { + m.startupWait.Wait() + + log.Infow("aborting upgrade of sector", "sector", sid, "trigger", "user") + return m.sectors.Send(uint64(sid), SectorAbortUpgrade{xerrors.New("triggered by user")}) +} + func proposalCID(deal api.PieceDealInfo) cid.Cid { pc, err := deal.DealProposal.Cid() if err != nil { diff --git a/extern/storage-sealing/precommit_batch_test.go b/extern/storage-sealing/precommit_batch_test.go index f6440996e..a90645a05 100644 --- a/extern/storage-sealing/precommit_batch_test.go +++ b/extern/storage-sealing/precommit_batch_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sealing_test import ( @@ -38,6 +39,7 @@ var fc = config.MinerFeeConfig{ } func TestPrecommitBatcher(t *testing.T) { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 t0123, err := address.NewFromString("t0123") require.NoError(t, err) @@ -151,6 +153,7 @@ func TestPrecommitBatcher(t *testing.T) { } } + //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 expectSend := func(expect []abi.SectorNumber) action { return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) @@ -171,6 +174,7 @@ func TestPrecommitBatcher(t *testing.T) { } } + //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 expectSendsSingle := func(expect []abi.SectorNumber) action { return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) diff --git a/extern/storage-sealing/sealiface/config.go b/extern/storage-sealing/sealiface/config.go index d8a12283c..852034aa7 100644 --- a/extern/storage-sealing/sealiface/config.go +++ b/extern/storage-sealing/sealiface/config.go @@ -18,6 +18,8 @@ type Config struct { // includes failed, 0 = no limit MaxSealingSectorsForDeals uint64 + MakeNewSectorForDeals bool + WaitDealsDelay time.Duration CommittedCapacitySectorLifetime time.Duration diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index ba6df7ff4..5c2c56171 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -52,11 +52,14 @@ var ExistSectorStateList = map[SectorState]struct{}{ ProveReplicaUpdate: {}, SubmitReplicaUpdate: {}, ReplicaUpdateWait: {}, + UpdateActivating: {}, + ReleaseSectorKey: {}, FinalizeReplicaUpdate: {}, SnapDealsAddPieceFailed: {}, SnapDealsDealsExpired: {}, SnapDealsRecoverDealIDs: {}, ReplicaUpdateFailed: {}, + ReleaseSectorKeyFailed: {}, AbortUpgrade: {}, } @@ -104,6 +107,8 @@ const ( SubmitReplicaUpdate SectorState = "SubmitReplicaUpdate" ReplicaUpdateWait SectorState = "ReplicaUpdateWait" FinalizeReplicaUpdate SectorState = "FinalizeReplicaUpdate" + UpdateActivating SectorState = "UpdateActivating" + ReleaseSectorKey SectorState = "ReleaseSectorKey" // error modes FailedUnrecoverable SectorState = "FailedUnrecoverable" @@ -124,6 +129,7 @@ const ( SnapDealsRecoverDealIDs SectorState = "SnapDealsRecoverDealIDs" AbortUpgrade SectorState = "AbortUpgrade" ReplicaUpdateFailed SectorState = "ReplicaUpdateFailed" + ReleaseSectorKeyFailed SectorState = "ReleaseSectorKeyFailed" Faulty SectorState = "Faulty" // sector is corrupted or gone for some reason FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain @@ -153,7 +159,7 @@ func toStatState(st SectorState, finEarly bool) statSectorState { return sstProving } return sstSealing - case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: + case Proving, UpdateActivating, ReleaseSectorKey, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: return sstProving } diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index c32ac4c3a..a1c3be460 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -211,7 +211,7 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect tok, _, err := m.Api.ChainHead(ctx.Context()) if err != nil { - log.Errorf("handleCommitting: api error, not proceeding: %+v", err) + log.Errorf("handleSubmitReplicaUpdateFailed: api error, not proceeding: %+v", err) return nil } @@ -237,6 +237,17 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect } } + // Abort upgrade for sectors that went faulty since being marked for upgrade + active, err := sectorActive(ctx.Context(), m.Api, m.maddr, tok, sector.SectorNumber) + if err != nil { + log.Errorf("sector active check: api error, not proceeding: %+v", err) + return nil + } + if !active { + log.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber) + return ctx.Send(SectorAbortUpgrade{}) + } + if err := failedCooldown(ctx, sector); err != nil { return err } @@ -244,6 +255,16 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect return ctx.Send(SectorRetrySubmitReplicaUpdate{}) } +func (m *Sealing) handleReleaseSectorKeyFailed(ctx statemachine.Context, sector SectorInfo) error { + // not much we can do, wait for a bit and try again + + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorUpdateActive{}) +} + func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo) error { tok, _, err := m.Api.ChainHead(ctx.Context()) if err != nil { @@ -478,7 +499,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx statemachine.Context, sector SectorIn } func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error { - return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{}) + return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{xerrors.New("failed recovering deal ids")}) } func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) { diff --git a/extern/storage-sealing/states_failed_test.go b/extern/storage-sealing/states_failed_test.go index 86f69b11f..9b28f35b1 100644 --- a/extern/storage-sealing/states_failed_test.go +++ b/extern/storage-sealing/states_failed_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sealing_test import ( @@ -49,6 +50,7 @@ func TestStateRecoverDealIDs(t *testing.T) { PieceCID: idCid("newPieceCID"), } + //stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001, @CHAIN_STATE_NETWORK_VERSION_001 api.EXPECT().StateMarketStorageDealProposal(ctx, dealId, nil).Return(dealProposal, nil) pc := idCid("publishCID") diff --git a/extern/storage-sealing/states_replica_update.go b/extern/storage-sealing/states_replica_update.go index 43d5467ed..8683a11d8 100644 --- a/extern/storage-sealing/states_replica_update.go +++ b/extern/storage-sealing/states_replica_update.go @@ -2,17 +2,21 @@ package sealing import ( "bytes" + "context" + "time" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" statemachine "github.com/filecoin-project/go-statemachine" api "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" "golang.org/x/xerrors" ) func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { - if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state + if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api, true); err != nil { // Sanity check state return handleErrors(ctx, err, sector) } out, err := m.sealer.ReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.pieceInfos()) @@ -31,13 +35,28 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect if sector.CommR == nil { return xerrors.Errorf("invalid sector %d with nil CommR", sector.SectorNumber) } + // Abort upgrade for sectors that went faulty since being marked for upgrade + tok, _, err := m.Api.ChainHead(ctx.Context()) + if err != nil { + log.Errorf("handleProveReplicaUpdate: api error, not proceeding: %+v", err) + return nil + } + active, err := sectorActive(ctx.Context(), m.Api, m.maddr, tok, sector.SectorNumber) + if err != nil { + log.Errorf("sector active check: api error, not proceeding: %+v", err) + return nil + } + if !active { + log.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber) + return ctx.Send(SectorAbortUpgrade{}) + } vanillaProofs, err := m.sealer.ProveReplicaUpdate1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed) if err != nil { return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (1) failed: %w", err)}) } - if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state + if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api, true); err != nil { // Sanity check state return handleErrors(ctx, err, sector) } @@ -59,10 +78,6 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec return nil } - if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state - return handleErrors(ctx, err, sector) - } - if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil { return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } @@ -196,16 +211,76 @@ func (m *Sealing) handleReplicaUpdateWait(ctx statemachine.Context, sector Secto } if !si.SealedCID.Equals(*sector.UpdateSealed) { - log.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID) - return ctx.Send(SectorAbortUpgrade{}) + return ctx.Send(SectorAbortUpgrade{xerrors.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID)}) } return ctx.Send(SectorReplicaUpdateLanded{}) } func (m *Sealing) handleFinalizeReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting sealing config: %w", err) + } + + if err := m.sealer.FinalizeReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(false, cfg.AlwaysKeepUnsealedCopy)); err != nil { + return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)}) + } + return ctx.Send(SectorFinalized{}) } +func (m *Sealing) handleUpdateActivating(ctx statemachine.Context, sector SectorInfo) error { + try := func() error { + mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.ReplicaUpdateMessage) + if err != nil { + return err + } + + tok, _, err := m.Api.ChainHead(ctx.Context()) + if err != nil { + return err + } + + nv, err := m.Api.StateNetworkVersion(ctx.Context(), tok) + if err != nil { + return err + } + + lb := policy.GetWinningPoStSectorSetLookback(nv) + + targetHeight := mw.Height + lb + InteractivePoRepConfidence + + return m.events.ChainAt(func(context.Context, TipSetToken, abi.ChainEpoch) error { + return ctx.Send(SectorUpdateActive{}) + }, func(ctx context.Context, ts TipSetToken) error { + log.Warn("revert in handleUpdateActivating") + return nil + }, InteractivePoRepConfidence, targetHeight) + } + + for { + err := try() + if err == nil { + break + } + + log.Errorw("error in handleUpdateActivating", "error", err) + + // likely an API issue, sleep for a bit and retry + time.Sleep(time.Minute) + } + + return nil +} + +func (m *Sealing) handleReleaseSectorKey(ctx statemachine.Context, sector SectorInfo) error { + if err := m.sealer.ReleaseSectorKey(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil { + return ctx.Send(SectorReleaseKeyFailed{err}) + } + + return ctx.Send(SectorKeyReleased{}) +} + func handleErrors(ctx statemachine.Context, err error, sector SectorInfo) error { switch err.(type) { case *ErrApi: diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index 2258250f4..3dba325ee 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -198,7 +198,7 @@ func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) e } func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error { - if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state + if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api, false); err != nil { // Sanity check state switch err.(type) { case *ErrApi: log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err) diff --git a/extern/storage-sealing/upgrade_queue.go b/extern/storage-sealing/upgrade_queue.go index 1aacc9c08..86083930d 100644 --- a/extern/storage-sealing/upgrade_queue.go +++ b/extern/storage-sealing/upgrade_queue.go @@ -3,6 +3,7 @@ package sealing import ( "context" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" @@ -86,19 +87,11 @@ func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) e return xerrors.Errorf("failed to read sector on chain info: %w", err) } - active, err := m.Api.StateMinerActiveSectors(ctx, m.maddr, tok) + active, err := sectorActive(ctx, m.Api, m.maddr, tok, id) if err != nil { - return xerrors.Errorf("failed to check active sectors: %w", err) + return xerrors.Errorf("failed to check if sector is active") } - // Ensure the upgraded sector is active - var found bool - for _, si := range active { - if si.SectorNumber == id { - found = true - break - } - } - if !found { + if !active { return xerrors.Errorf("cannot mark inactive sector for upgrade") } @@ -110,6 +103,22 @@ func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) e return m.sectors.Send(uint64(id), SectorStartCCUpdate{}) } +func sectorActive(ctx context.Context, api SealingAPI, maddr address.Address, tok TipSetToken, sector abi.SectorNumber) (bool, error) { + active, err := api.StateMinerActiveSectors(ctx, maddr, tok) + if err != nil { + return false, xerrors.Errorf("failed to check active sectors: %w", err) + } + // Check if sector is among active sectors + var found bool + for _, si := range active { + if si.SectorNumber == sector { + found = true + break + } + } + return found, nil +} + func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreCommitInfo) big.Int { if len(params.DealIDs) == 0 { return big.Zero() diff --git a/go.mod b/go.mod index 1761efe14..f22400ded 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/filecoin-project/go-fil-markets v1.19.0 github.com/filecoin-project/go-jsonrpc v0.1.5 github.com/filecoin-project/go-padreader v0.0.1 - github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 + github.com/filecoin-project/go-paramfetch v0.0.4 github.com/filecoin-project/go-state-types v0.1.3 github.com/filecoin-project/go-statemachine v1.0.1 github.com/filecoin-project/go-statestore v0.2.0 @@ -51,7 +51,7 @@ require ( github.com/filecoin-project/specs-actors/v5 v5.0.4 github.com/filecoin-project/specs-actors/v6 v6.0.1 github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 - github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 + github.com/filecoin-project/specs-storage v0.2.0 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.1 github.com/gdamore/tcell/v2 v2.2.0 @@ -108,7 +108,7 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 - github.com/libp2p/go-libp2p v0.18.0-rc1 + github.com/libp2p/go-libp2p v0.18.0-rc4 github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect github.com/libp2p/go-libp2p-core v0.14.0 github.com/libp2p/go-libp2p-discovery v0.6.0 @@ -116,13 +116,13 @@ require ( github.com/libp2p/go-libp2p-noise v0.3.0 github.com/libp2p/go-libp2p-peerstore v0.6.0 github.com/libp2p/go-libp2p-pubsub v0.6.1 - github.com/libp2p/go-libp2p-quic-transport v0.16.0 + github.com/libp2p/go-libp2p-quic-transport v0.16.1 github.com/libp2p/go-libp2p-record v0.1.3 - github.com/libp2p/go-libp2p-resource-manager v0.1.2 + github.com/libp2p/go-libp2p-resource-manager v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 - github.com/libp2p/go-libp2p-swarm v0.10.0 + github.com/libp2p/go-libp2p-swarm v0.10.1 github.com/libp2p/go-libp2p-tls v0.3.1 - github.com/libp2p/go-libp2p-yamux v0.8.0 + github.com/libp2p/go-libp2p-yamux v0.8.2 github.com/libp2p/go-maddr-filter v0.1.0 github.com/mattn/go-isatty v0.0.14 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 diff --git a/go.sum b/go.sum index 5615223b2..6b725b5df 100644 --- a/go.sum +++ b/go.sum @@ -342,8 +342,8 @@ github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.m github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= -github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 h1:+nripp+UI/rhl01w9Gs4V0XDGaVPYPMGU/D/gNVLue0= -github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= +github.com/filecoin-project/go-paramfetch v0.0.4 h1:H+Me8EL8T5+79z/KHYQQcT8NVOzYVqXIi7nhb48tdm8= +github.com/filecoin-project/go-paramfetch v0.0.4/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= @@ -382,8 +382,8 @@ github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/g github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 h1:FuDaXIbcw2hRsFI8SDTmsGGCE+NumpF6aiBoU/2X5W4= github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M= -github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 h1:oUYOvF7EvdXS0Zmk9mNkaB6Bu0l+WXBYPzVodKMiLug= -github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU= +github.com/filecoin-project/specs-storage v0.2.0 h1:Y4UDv0apRQ3zI2GiPPubi8JblpUZZphEdaJUxCutfyg= +github.com/filecoin-project/specs-storage v0.2.0/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -994,8 +994,9 @@ github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2 github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= -github.com/libp2p/go-libp2p v0.18.0-rc1 h1:CFHROLGmMwe/p8tR3sHahg/1NSaZa2EGbu7nDmdC+RY= github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8= +github.com/libp2p/go-libp2p v0.18.0-rc4 h1:OUsSbeu7q+Ck/bV9wHDxFzb08ORqBupHhpCmRBhWrJ8= +github.com/libp2p/go-libp2p v0.18.0-rc4/go.mod h1:wzmsk1ioOq9FGQys2BN5BIw4nugP6+R+CyW3JbPEbbs= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= @@ -1147,8 +1148,9 @@ github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= -github.com/libp2p/go-libp2p-quic-transport v0.16.0 h1:aVg9/jr+R2esov5sH7wkXrmYmqJiUjtLMLYX3L9KYdY= github.com/libp2p/go-libp2p-quic-transport v0.16.0/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= +github.com/libp2p/go-libp2p-quic-transport v0.16.1 h1:N/XqYXHurphPLDfXYhll8NyqzdZYQqAF4GIr7+SmLV8= +github.com/libp2p/go-libp2p-quic-transport v0.16.1/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1156,8 +1158,8 @@ github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGd github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= github.com/libp2p/go-libp2p-resource-manager v0.1.0/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= -github.com/libp2p/go-libp2p-resource-manager v0.1.2 h1:t66B/6EF6ivWEUgvO34NKOT3oPtkb+JTBJHdsIMx+mg= -github.com/libp2p/go-libp2p-resource-manager v0.1.2/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= +github.com/libp2p/go-libp2p-resource-manager v0.1.3 h1:Umf0tW6WNXSb6Uoma0YT56azB5iikL/aeGAP7s7+f5o= +github.com/libp2p/go-libp2p-resource-manager v0.1.3/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= @@ -1178,8 +1180,9 @@ github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= -github.com/libp2p/go-libp2p-swarm v0.10.0 h1:1yr7UCwxCN92cw9g9Q+fnJSlk7lOB1RetoEewxhGVL0= github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA= +github.com/libp2p/go-libp2p-swarm v0.10.1 h1:lXW3pgGt+BVmkzcFX61erX7l6Lt+WAamNhwa2Kf3eJM= +github.com/libp2p/go-libp2p-swarm v0.10.1/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1209,8 +1212,9 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZb github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= -github.com/libp2p/go-libp2p-transport-upgrader v0.7.0 h1:ADnLrL7fC4Vy7HPjk9oGof7nDeTqGXuof85Ar6kin9Q= github.com/libp2p/go-libp2p-transport-upgrader v0.7.0/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.1 h1:MSMe+tUfxpC9GArTz7a4G5zQKQgGh00Vio87d3j3xIg= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.1/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= @@ -1225,8 +1229,10 @@ github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLw github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= -github.com/libp2p/go-libp2p-yamux v0.8.0 h1:APQYlttIj+Rr5sfa6siojwsi0ZwcIh/exHIUl9hZr6o= github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8= +github.com/libp2p/go-libp2p-yamux v0.8.1/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= +github.com/libp2p/go-libp2p-yamux v0.8.2 h1:6GKWntresp0TFxMP/oSoH96nV8XKJRdynXsdp43dn0Y= +github.com/libp2p/go-libp2p-yamux v0.8.2/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= diff --git a/itests/api_test.go b/itests/api_test.go index c380a6ed8..ad39f8879 100644 --- a/itests/api_test.go +++ b/itests/api_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -19,6 +20,12 @@ import ( ) func TestAPI(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_STATE_MINER_INFO_001 t.Run("direct", func(t *testing.T) { runAPITest(t) }) @@ -116,11 +123,13 @@ func (ts *apiSuite) testSearchMsg(t *testing.T) { sm, err := full.MpoolPushMessage(ctx, msg, nil) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) require.NoError(t, err) require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful") + //stm: @CHAIN_STATE_SEARCH_MSG_001 searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) require.NoError(t, err) require.NotNil(t, searchRes) diff --git a/itests/ccupgrade_test.go b/itests/ccupgrade_test.go index b63e96d24..6e7a5d090 100644 --- a/itests/ccupgrade_test.go +++ b/itests/ccupgrade_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -18,25 +19,25 @@ import ( ) func TestCCUpgrade(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_STATE_MINER_GET_INFO_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + + //stm: @MINER_SECTOR_LIST_001 kit.QuietMiningLogs() - for _, height := range []abi.ChainEpoch{ - -1, // before - 162, // while sealing - 560, // after upgrade deal - } { - height := height // make linters happy by copying - t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { - runTestCCUpgrade(t, height) - }) - } + runTestCCUpgrade(t) } -func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) *kit.TestFullNode { +func runTestCCUpgrade(t *testing.T) *kit.TestFullNode { ctx := context.Background() blockTime := 1 * time.Millisecond - client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15)) + client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC()) ens.InterconnectAll().BeginMiningMustPost(blockTime) maddr, err := miner.ActorAddress(ctx) @@ -80,6 +81,11 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) *kit.TestFullN status, err := miner.SectorsStatus(ctx, CCUpgrade, true) require.NoError(t, err) assert.Equal(t, 1, len(status.Deals)) + + miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{ + CCUpgrade: {}, + }) + return client } @@ -126,7 +132,7 @@ func TestCCUpgradeAndPoSt(t *testing.T) { kit.QuietMiningLogs() t.Run("upgrade and then post", func(t *testing.T) { ctx := context.Background() - n := runTestCCUpgrade(t, 100) + n := runTestCCUpgrade(t) ts, err := n.ChainHead(ctx) require.NoError(t, err) start := ts.Height() diff --git a/itests/cli_test.go b/itests/cli_test.go index 0bd1ec3b4..ac7e4d488 100644 --- a/itests/cli_test.go +++ b/itests/cli_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -11,6 +12,11 @@ import ( // TestClient does a basic test to exercise the client CLI commands. func TestClient(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() diff --git a/itests/deadlines_test.go b/itests/deadlines_test.go index c698f1154..f0abdb556 100644 --- a/itests/deadlines_test.go +++ b/itests/deadlines_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -52,6 +53,13 @@ import ( // * asserts that miner B loses power // * asserts that miner D loses power, is inactive func TestDeadlineToggling(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @MINER_SECTOR_LIST_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -108,6 +116,7 @@ func TestDeadlineToggling(t *testing.T) { { minerC.PledgeSectors(ctx, sectorsC, 0, nil) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK) require.NoError(t, err) @@ -127,6 +136,7 @@ func TestDeadlineToggling(t *testing.T) { expectedPower := types.NewInt(uint64(ssz) * sectorsC) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, maddrC, types.EmptyTSK) require.NoError(t, err) @@ -147,12 +157,14 @@ func TestDeadlineToggling(t *testing.T) { } checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) { + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, ma, tsk) require.NoError(t, err) // make sure it has the expected power. require.Equal(t, p.MinerPower.RawBytePower, power) + //stm: @CHAIN_STATE_GET_ACTOR_001 mact, err := client.StateGetActor(ctx, ma, tsk) require.NoError(t, err) @@ -187,6 +199,7 @@ func TestDeadlineToggling(t *testing.T) { checkMiner(maddrB, types.NewInt(0), true, true, uts.Key()) } + //stm: @CHAIN_STATE_NETWORK_VERSION_001 nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK) require.NoError(t, err) require.GreaterOrEqual(t, nv, network.Version12) @@ -246,6 +259,7 @@ func TestDeadlineToggling(t *testing.T) { }, nil) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 r, err := client.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true) require.NoError(t, err) require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) @@ -298,6 +312,7 @@ func TestDeadlineToggling(t *testing.T) { sectorbit := bitfield.New() sectorbit.Set(uint64(sectorNum)) + //stm: @CHAIN_STATE_SECTOR_PARTITION_001 loca, err := client.StateSectorPartition(ctx, maddrD, sectorNum, types.EmptyTSK) require.NoError(t, err) @@ -329,6 +344,7 @@ func TestDeadlineToggling(t *testing.T) { t.Log("sent termination message:", smsg.Cid()) + //stm: @CHAIN_STATE_WAIT_MSG_001 r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true) require.NoError(t, err) require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) diff --git a/itests/deals_512mb_test.go b/itests/deals_512mb_test.go index 766d83835..967e33da4 100644 --- a/itests/deals_512mb_test.go +++ b/itests/deals_512mb_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -12,6 +13,13 @@ import ( ) func TestStorageDealMissingBlock(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 ctx := context.Background() // enable 512MiB proofs so we can conduct larger transfers. diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go index 49a8bb008..18d8da02a 100644 --- a/itests/deals_concurrent_test.go +++ b/itests/deals_concurrent_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -71,6 +72,12 @@ func TestDealWithMarketAndMinerNode(t *testing.T) { } func TestDealCyclesConcurrent(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 if testing.Short() { t.Skip("skipping test in short mode") } diff --git a/itests/deals_max_staging_deals_test.go b/itests/deals_max_staging_deals_test.go index 895a07954..6a4234e02 100644 --- a/itests/deals_max_staging_deals_test.go +++ b/itests/deals_max_staging_deals_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -12,6 +13,13 @@ import ( ) func TestMaxStagingDeals(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 ctx := context.Background() // enable 512MiB proofs so we can conduct larger transfers. diff --git a/itests/deals_offline_test.go b/itests/deals_offline_test.go index 003f12b11..bb2549026 100644 --- a/itests/deals_offline_test.go +++ b/itests/deals_offline_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -16,7 +17,13 @@ import ( ) func TestOfflineDealFlow(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_DATA_CALCULATE_COMMP_001, @CLIENT_DATA_GENERATE_CAR_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001 runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) { ctx := context.Background() client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs @@ -60,6 +67,7 @@ func TestOfflineDealFlow(t *testing.T) { proposalCid := dh.StartDeal(ctx, dp) + //stm: @CLIENT_STORAGE_DEALS_GET_001 // Wait for the deal to reach StorageDealCheckForAcceptance on the client cd, err := client.ClientGetDealInfo(ctx, *proposalCid) require.NoError(t, err) diff --git a/itests/deals_padding_test.go b/itests/deals_padding_test.go index cd15d30d7..c79b6a7db 100644 --- a/itests/deals_padding_test.go +++ b/itests/deals_padding_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -14,7 +15,13 @@ import ( ) func TestDealPadding(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_DATA_GET_DEAL_PIECE_CID_001 kit.QuietMiningLogs() var blockTime = 250 * time.Millisecond @@ -58,6 +65,7 @@ func TestDealPadding(t *testing.T) { // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this time.Sleep(time.Second) + //stm: @CLIENT_STORAGE_DEALS_GET_001 di, err := client.ClientGetDealInfo(ctx, *proposalCid) require.NoError(t, err) require.True(t, di.PieceCID.Equals(pcid)) diff --git a/itests/deals_partial_retrieval_test.go b/itests/deals_partial_retrieval_test.go index b164e70d0..abc5cf411 100644 --- a/itests/deals_partial_retrieval_test.go +++ b/itests/deals_partial_retrieval_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -38,7 +39,13 @@ var ( ) func TestPartialRetrieval(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_RETRIEVAL_RETRIEVE_001 ctx := context.Background() policy.SetPreCommitChallengeDelay(2) diff --git a/itests/deals_power_test.go b/itests/deals_power_test.go index 0c29ad060..27b196109 100644 --- a/itests/deals_power_test.go +++ b/itests/deals_power_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -9,6 +10,12 @@ import ( ) func TestFirstDealEnablesMining(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 // test making a deal with a fresh miner, and see if it starts to mine. if testing.Short() { t.Skip("skipping test in short mode") diff --git a/itests/deals_pricing_test.go b/itests/deals_pricing_test.go index eb28af0bd..b1f1d7e5d 100644 --- a/itests/deals_pricing_test.go +++ b/itests/deals_pricing_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -12,6 +13,12 @@ import ( ) func TestQuotePriceForUnsealedRetrieval(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 var ( ctx = context.Background() blocktime = 50 * time.Millisecond @@ -43,10 +50,12 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) { _, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) require.Equal(t, res1.Root, res2.Root) + //stm: @CLIENT_STORAGE_DEALS_GET_001 // Retrieval dealInfo, err := client.ClientGetDealInfo(ctx, *deal1) require.NoError(t, err) + //stm: @CLIENT_RETRIEVAL_FIND_001 // fetch quote -> zero for unsealed price since unsealed file already exists. offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) require.NoError(t, err) @@ -56,11 +65,13 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) { require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64()) // remove ONLY one unsealed file + //stm: @STORAGE_LIST_001, @MINER_SECTOR_LIST_001 ss, err := miner.StorageList(context.Background()) require.NoError(t, err) _, err = miner.SectorsList(ctx) require.NoError(t, err) + //stm: @STORAGE_DROP_SECTOR_001, @STORAGE_LIST_001 iLoop: for storeID, sd := range ss { for _, sector := range sd { @@ -70,6 +81,7 @@ iLoop: } } + //stm: @CLIENT_RETRIEVAL_FIND_001 // get retrieval quote -> zero for unsealed price as unsealed file exists. offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) require.NoError(t, err) @@ -89,6 +101,7 @@ iLoop: } } + //stm: @CLIENT_RETRIEVAL_FIND_001 // fetch quote -> non-zero for unseal price as we no more unsealed files. offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) require.NoError(t, err) @@ -100,6 +113,10 @@ iLoop: } func TestZeroPricePerByteRetrieval(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 if testing.Short() { t.Skip("skipping test in short mode") } diff --git a/itests/deals_publish_test.go b/itests/deals_publish_test.go index 85a358f06..8d707c235 100644 --- a/itests/deals_publish_test.go +++ b/itests/deals_publish_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -23,6 +24,12 @@ import ( ) func TestPublishDealsBatching(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 var ( ctx = context.Background() publishPeriod = 10 * time.Second @@ -103,6 +110,7 @@ func TestPublishDealsBatching(t *testing.T) { } // Expect a single PublishStorageDeals message that includes the first two deals + //stm: @CHAIN_STATE_LIST_MESSAGES_001 msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1) require.NoError(t, err) count := 0 diff --git a/itests/deals_retry_deal_no_funds_test.go b/itests/deals_retry_deal_no_funds_test.go index 202d86b9f..a14a0d085 100644 --- a/itests/deals_retry_deal_no_funds_test.go +++ b/itests/deals_retry_deal_no_funds_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -26,6 +27,13 @@ var ( ) func TestDealsRetryLackOfFunds(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 ctx := context.Background() oldDelay := policy.GetPreCommitChallengeDelay() policy.SetPreCommitChallengeDelay(5) @@ -105,6 +113,11 @@ func TestDealsRetryLackOfFunds(t *testing.T) { } func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 ctx := context.Background() oldDelay := policy.GetPreCommitChallengeDelay() policy.SetPreCommitChallengeDelay(5) @@ -181,6 +194,11 @@ func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) { } func TestDealsRetryLackOfFunds_belowLimit(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 ctx := context.Background() oldDelay := policy.GetPreCommitChallengeDelay() policy.SetPreCommitChallengeDelay(5) diff --git a/itests/deals_test.go b/itests/deals_test.go index 4ad97e969..fb8e6e4f3 100644 --- a/itests/deals_test.go +++ b/itests/deals_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -9,6 +10,12 @@ import ( ) func TestDealsWithSealingAndRPC(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 if testing.Short() { t.Skip("skipping test in short mode") } diff --git a/itests/gateway_test.go b/itests/gateway_test.go index f9e4a0fb6..d5bc9c0eb 100644 --- a/itests/gateway_test.go +++ b/itests/gateway_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -38,6 +39,12 @@ const ( // TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite // node that is connected through a gateway to a full API node func TestGatewayWalletMsig(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() blocktime := 5 * time.Millisecond @@ -116,6 +123,7 @@ func TestGatewayWalletMsig(t *testing.T) { addProposal, err := doSend(proto) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err := lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -127,6 +135,7 @@ func TestGatewayWalletMsig(t *testing.T) { // Get available balance of msig: should be greater than zero and less // than initial amount msig := execReturn.IDAddress + //stm: @CHAIN_STATE_MINER_AVAILABLE_BALANCE_001 msigBalance, err := lite.MsigGetAvailableBalance(ctx, msig, types.EmptyTSK) require.NoError(t, err) require.Greater(t, msigBalance.Int64(), int64(0)) @@ -139,6 +148,7 @@ func TestGatewayWalletMsig(t *testing.T) { addProposal, err = doSend(proto) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err = lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -156,6 +166,7 @@ func TestGatewayWalletMsig(t *testing.T) { approval1, err := doSend(proto) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err = lite.StateWaitMsg(ctx, approval1, 1, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -169,6 +180,10 @@ func TestGatewayWalletMsig(t *testing.T) { // TestGatewayMsigCLI tests that msig CLI calls can be made // on a lite node that is connected through a gateway to a full API node func TestGatewayMsigCLI(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 kit.QuietMiningLogs() blocktime := 5 * time.Millisecond @@ -180,6 +195,10 @@ func TestGatewayMsigCLI(t *testing.T) { } func TestGatewayDealFlow(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 kit.QuietMiningLogs() blocktime := 5 * time.Millisecond @@ -202,6 +221,10 @@ func TestGatewayDealFlow(t *testing.T) { } func TestGatewayCLIDealFlow(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 kit.QuietMiningLogs() blocktime := 5 * time.Millisecond diff --git a/itests/get_messages_in_ts_test.go b/itests/get_messages_in_ts_test.go index 61219a316..b5ef0387e 100644 --- a/itests/get_messages_in_ts_test.go +++ b/itests/get_messages_in_ts_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -16,6 +17,12 @@ import ( ) func TestChainGetMessagesInTs(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 ctx := context.Background() kit.QuietMiningLogs() @@ -84,6 +91,7 @@ func TestChainGetMessagesInTs(t *testing.T) { } for _, sm := range sms { + //stm: @CHAIN_STATE_WAIT_MSG_001 msgLookup, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) require.NoError(t, err) diff --git a/itests/kit/node_miner.go b/itests/kit/node_miner.go index ff406629c..866c1124b 100644 --- a/itests/kit/node_miner.go +++ b/itests/kit/node_miner.go @@ -87,7 +87,10 @@ type TestMiner struct { func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) { toCheck := tm.StartPledge(ctx, n, existing, blockNotif) + tm.WaitSectorsProving(ctx, toCheck) +} +func (tm *TestMiner) WaitSectorsProving(ctx context.Context, toCheck map[abi.SectorNumber]struct{}) { for len(toCheck) > 0 { tm.FlushSealingBatches(ctx) @@ -105,9 +108,8 @@ func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNo } build.Clock.Sleep(100 * time.Millisecond) - fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + fmt.Printf("WaitSectorsProving: %d %+v\n", len(toCheck), states) } - } func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { diff --git a/itests/kit/rpc.go b/itests/kit/rpc.go index 35153eb64..61c8a7b23 100644 --- a/itests/kit/rpc.go +++ b/itests/kit/rpc.go @@ -39,6 +39,7 @@ func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode { require.NoError(t, err) srv, maddr := CreateRPCServer(t, handler, l) + fmt.Printf("FULLNODE RPC ENV FOR CLI DEBUGGING `export FULLNODE_API_INFO=%s`\n", "ws://"+srv.Listener.Addr().String()) cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil) require.NoError(t, err) @@ -54,7 +55,9 @@ func minerRpc(t *testing.T, m *TestMiner) *TestMiner { srv, maddr := CreateRPCServer(t, handler, m.RemoteListener) - fmt.Println("creating RPC server for", m.ActorAddr, "at: ", srv.Listener.Addr().String()) + fmt.Printf("creating RPC server for %s at %s\n", m.ActorAddr, srv.Listener.Addr().String()) + fmt.Printf("SP RPC ENV FOR CLI DEBUGGING `export MINER_API_INFO=%s`\n", "ws://"+srv.Listener.Addr().String()) + url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0" cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), url, nil) require.NoError(t, err) diff --git a/itests/multisig_test.go b/itests/multisig_test.go index 9a15e8c0e..09d9254a3 100644 --- a/itests/multisig_test.go +++ b/itests/multisig_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -10,6 +11,12 @@ import ( // TestMultisig does a basic test to exercise the multisig CLI commands func TestMultisig(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() blockTime := 5 * time.Millisecond diff --git a/itests/nonce_test.go b/itests/nonce_test.go index b50fcbe26..e0c247ed6 100644 --- a/itests/nonce_test.go +++ b/itests/nonce_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -13,6 +14,12 @@ import ( ) func TestNonceIncremental(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 ctx := context.Background() kit.QuietMiningLogs() @@ -51,6 +58,7 @@ func TestNonceIncremental(t *testing.T) { } for _, sm := range sms { + //stm: @CHAIN_STATE_WAIT_MSG_001 _, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) require.NoError(t, err) } diff --git a/itests/paych_api_test.go b/itests/paych_api_test.go index c2d14aeb8..074551a83 100644 --- a/itests/paych_api_test.go +++ b/itests/paych_api_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -27,6 +28,12 @@ import ( ) func TestPaymentChannelsAPI(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() ctx := context.Background() @@ -110,6 +117,7 @@ func TestPaymentChannelsAPI(t *testing.T) { require.NoError(t, err) preds := state.NewStatePredicates(paymentCreator) finished := make(chan struct{}) + //stm: @CHAIN_STATE_GET_ACTOR_001 err = ev.StateChanged(func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { act, err := paymentCreator.StateGetActor(ctx, channel, ts.Key()) if err != nil { @@ -185,6 +193,7 @@ func TestPaymentChannelsAPI(t *testing.T) { collectMsg, err := paymentReceiver.PaychCollect(ctx, channel) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel") diff --git a/itests/paych_cli_test.go b/itests/paych_cli_test.go index f964d7813..781be80a8 100644 --- a/itests/paych_cli_test.go +++ b/itests/paych_cli_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -30,6 +31,12 @@ import ( // TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI // commands func TestPaymentChannelsBasic(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() @@ -87,6 +94,10 @@ type voucherSpec struct { // TestPaymentChannelStatus tests the payment channel status CLI command func TestPaymentChannelStatus(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() @@ -167,6 +178,12 @@ func TestPaymentChannelStatus(t *testing.T) { // TestPaymentChannelVouchers does a basic test to exercise some payment // channel voucher commands func TestPaymentChannelVouchers(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() @@ -299,6 +316,12 @@ func TestPaymentChannelVouchers(t *testing.T) { // TestPaymentChannelVoucherCreateShortfall verifies that if a voucher amount // is greater than what's left in the channel, voucher create fails func TestPaymentChannelVoucherCreateShortfall(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() diff --git a/itests/sdr_upgrade_test.go b/itests/sdr_upgrade_test.go index f4cefd67c..c1198dd0c 100644 --- a/itests/sdr_upgrade_test.go +++ b/itests/sdr_upgrade_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -17,6 +18,15 @@ import ( ) func TestSDRUpgrade(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CHAIN_STATE_NETWORK_VERSION_001 + + //stm: @MINER_SECTOR_LIST_001 kit.QuietMiningLogs() // oldDelay := policy.GetPreCommitChallengeDelay() diff --git a/itests/sector_finalize_early_test.go b/itests/sector_finalize_early_test.go index fa5cc9dd3..233bc8fcb 100644 --- a/itests/sector_finalize_early_test.go +++ b/itests/sector_finalize_early_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -18,6 +19,13 @@ import ( ) func TestDealsWithFinalizeEarly(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @STORAGE_INFO_001 if testing.Short() { t.Skip("skipping test in short mode") } @@ -42,6 +50,7 @@ func TestDealsWithFinalizeEarly(t *testing.T) { miner.AddStorage(ctx, t, 1000000000, true, false) miner.AddStorage(ctx, t, 1000000000, false, true) + //stm: @STORAGE_LIST_001 sl, err := miner.StorageList(ctx) require.NoError(t, err) for si, d := range sl { @@ -55,6 +64,7 @@ func TestDealsWithFinalizeEarly(t *testing.T) { dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) }) + //stm: @STORAGE_LIST_001 sl, err = miner.StorageList(ctx) require.NoError(t, err) for si, d := range sl { diff --git a/itests/sector_miner_collateral_test.go b/itests/sector_miner_collateral_test.go index de3da21f6..af67b132b 100644 --- a/itests/sector_miner_collateral_test.go +++ b/itests/sector_miner_collateral_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -21,6 +22,13 @@ import ( ) func TestMinerBalanceCollateral(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @MINER_SECTOR_LIST_001 kit.QuietMiningLogs() blockTime := 5 * time.Millisecond diff --git a/itests/sector_pledge_test.go b/itests/sector_pledge_test.go index a32eb958f..a6aa1a7c8 100644 --- a/itests/sector_pledge_test.go +++ b/itests/sector_pledge_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -22,6 +23,12 @@ import ( ) func TestPledgeSectors(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() blockTime := 50 * time.Millisecond @@ -54,6 +61,7 @@ func TestPledgeSectors(t *testing.T) { } func TestPledgeBatching(t *testing.T) { + //stm: @SECTOR_PRE_COMMIT_FLUSH_001, @SECTOR_COMMIT_FLUSH_001 blockTime := 50 * time.Millisecond runTest := func(t *testing.T, nSectors int) { @@ -110,6 +118,12 @@ func TestPledgeBatching(t *testing.T) { } func TestPledgeMaxBatching(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 blockTime := 50 * time.Millisecond runTest := func(t *testing.T) { @@ -173,6 +187,7 @@ func TestPledgeMaxBatching(t *testing.T) { } // Ensure that max aggregate message has propagated to the other node by checking current state + //stm: @CHAIN_STATE_MINER_SECTORS_001 sectorInfosAfter, err := full.StateMinerSectors(ctx, miner.ActorAddr, nil, types.EmptyTSK) require.NoError(t, err) assert.Equal(t, miner5.MaxAggregatedSectors+kit.DefaultPresealsPerBootstrapMiner, len(sectorInfosAfter)) @@ -182,6 +197,12 @@ func TestPledgeMaxBatching(t *testing.T) { } func TestPledgeBeforeNv13(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 blocktime := 50 * time.Millisecond runTest := func(t *testing.T, nSectors int) { diff --git a/itests/sector_terminate_test.go b/itests/sector_terminate_test.go index 2a3143a0a..536e51538 100644 --- a/itests/sector_terminate_test.go +++ b/itests/sector_terminate_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -14,6 +15,12 @@ import ( ) func TestTerminate(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -33,6 +40,7 @@ func TestTerminate(t *testing.T) { ssz, err := miner.ActorSectorSize(ctx, maddr) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) require.Equal(t, p.MinerPower, p.TotalPower) @@ -45,6 +53,7 @@ func TestTerminate(t *testing.T) { t.Log("wait for power") { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 // Wait until proven. di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -58,6 +67,7 @@ func TestTerminate(t *testing.T) { nSectors++ + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) require.Equal(t, p.MinerPower, p.TotalPower) @@ -67,6 +77,7 @@ func TestTerminate(t *testing.T) { toTerminate := abi.SectorNumber(3) + //stm: @SECTOR_TERMINATE_001 err = miner.SectorTerminate(ctx, toTerminate) require.NoError(t, err) @@ -79,6 +90,7 @@ loop: t.Log("state: ", si.State, msgTriggerred) switch sealing.SectorState(si.State) { + //stm: @SECTOR_TERMINATE_PENDING_001 case sealing.Terminating: if !msgTriggerred { { @@ -111,6 +123,7 @@ loop: // need to wait for message to be mined and applied. time.Sleep(5 * time.Second) + //stm: @CHAIN_STATE_MINER_POWER_001 // check power decreased p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -119,6 +132,7 @@ loop: // check in terminated set { + //stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001 parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK) require.NoError(t, err) require.Greater(t, len(parts), 0) @@ -133,6 +147,7 @@ loop: require.Equal(t, uint64(0), bflen(parts[0].LiveSectors)) } + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -141,6 +156,7 @@ loop: ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) diff --git a/itests/tape_test.go b/itests/tape_test.go index c6728b834..79f8961e4 100644 --- a/itests/tape_test.go +++ b/itests/tape_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -14,6 +15,12 @@ import ( ) func TestTapeFix(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() var blocktime = 2 * time.Millisecond diff --git a/itests/verifreg_test.go b/itests/verifreg_test.go index 80a21b0a0..9efefc7b9 100644 --- a/itests/verifreg_test.go +++ b/itests/verifreg_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -23,6 +24,12 @@ import ( ) func TestVerifiedClientTopUp(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 blockTime := 100 * time.Millisecond test := func(nv network.Version, shouldWork bool) func(*testing.T) { @@ -51,6 +58,7 @@ func TestVerifiedClientTopUp(t *testing.T) { defer cancel() // get VRH + //stm: @CHAIN_STATE_VERIFIED_REGISTRY_ROOT_KEY_001 vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{}) fmt.Println(vrh.String()) require.NoError(t, err) @@ -81,6 +89,7 @@ func TestVerifiedClientTopUp(t *testing.T) { sm, err := api.MpoolPushMessage(ctx, msg, nil) require.NoError(t, err, "AddVerifier failed") + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -102,11 +111,13 @@ func TestVerifiedClientTopUp(t *testing.T) { sm, err = api.MpoolPushMessage(ctx, msg, nil) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) // check datacap balance + //stm: @CHAIN_STATE_VERIFIED_CLIENT_STATUS_001 dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK) require.NoError(t, err) diff --git a/itests/wdpost_dispute_test.go b/itests/wdpost_dispute_test.go index aa892aca7..fe723a814 100644 --- a/itests/wdpost_dispute_test.go +++ b/itests/wdpost_dispute_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -20,6 +21,12 @@ import ( ) func TestWindowPostDispute(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -61,6 +68,7 @@ func TestWindowPostDispute(t *testing.T) { evilMinerAddr, err := evilMiner.ActorAddress(ctx) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) @@ -72,6 +80,7 @@ func TestWindowPostDispute(t *testing.T) { ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) @@ -81,9 +90,11 @@ func TestWindowPostDispute(t *testing.T) { // make sure it has gained power. require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) + //stm: @MINER_SECTOR_LIST_001 evilSectors, err := evilMiner.SectorsList(ctx) require.NoError(t, err) evilSectorNo := evilSectors[0] // only one. + //stm: @CHAIN_STATE_SECTOR_PARTITION_001 evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK) require.NoError(t, err) @@ -96,6 +107,7 @@ func TestWindowPostDispute(t *testing.T) { // Wait until we need to prove our sector. for { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 { @@ -109,6 +121,7 @@ func TestWindowPostDispute(t *testing.T) { // Wait until after the proving period. for { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) if di.Index != evilSectorLoc.Deadline { @@ -119,6 +132,7 @@ func TestWindowPostDispute(t *testing.T) { t.Log("accepted evil proof") + //stm: @CHAIN_STATE_MINER_POWER_001 // Make sure the evil node didn't lose any power. p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) @@ -145,11 +159,13 @@ func TestWindowPostDispute(t *testing.T) { require.NoError(t, err) t.Log("waiting dispute") + //stm: @CHAIN_STATE_WAIT_MSG_001 rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) require.NoError(t, err) require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error()) } + //stm: @CHAIN_STATE_MINER_POWER_001 // Objection SUSTAINED! // Make sure the evil node lost power. p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) @@ -162,6 +178,7 @@ func TestWindowPostDispute(t *testing.T) { // First, recover the sector. { + //stm: @CHAIN_STATE_MINER_INFO_001 minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) @@ -186,6 +203,7 @@ func TestWindowPostDispute(t *testing.T) { sm, err := client.MpoolPushMessage(ctx, msg, nil) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) require.NoError(t, err) require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error()) @@ -193,6 +211,7 @@ func TestWindowPostDispute(t *testing.T) { // Then wait for the deadline. for { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) if di.Index == evilSectorLoc.Deadline { @@ -210,6 +229,11 @@ func TestWindowPostDispute(t *testing.T) { } func TestWindowPostDisputeFails(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_STATE_MINER_GET_DEADLINES_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -232,6 +256,7 @@ func TestWindowPostDisputeFails(t *testing.T) { miner.PledgeSectors(ctx, 10, 0, nil) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -246,6 +271,7 @@ func TestWindowPostDisputeFails(t *testing.T) { require.NoError(t, err) expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10)) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -271,6 +297,7 @@ waitForProof: } for { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) // wait until the deadline finishes. @@ -314,11 +341,13 @@ func submitBadProof( return err } + //stm: @CHAIN_STATE_MINER_INFO_001 minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key()) if err != nil { return err } + //stm: @CHAIN_STATE_GET_RANDOMNESS_FROM_TICKETS_001 commEpoch := di.Open commRand, err := client.StateGetRandomnessFromTickets( ctx, crypto.DomainSeparationTag_PoStChainCommit, @@ -355,6 +384,7 @@ func submitBadProof( return err } + //stm: @CHAIN_STATE_WAIT_MSG_001 rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { return err diff --git a/itests/wdpost_test.go b/itests/wdpost_test.go index d87059bb4..bbeedb8d8 100644 --- a/itests/wdpost_test.go +++ b/itests/wdpost_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -23,6 +24,12 @@ import ( ) func TestWindowedPost(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -58,6 +65,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -71,6 +79,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -84,6 +93,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, // Drop 2 sectors from deadline 2 partition 0 (full partition / deadline) { + //stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001 parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK) require.NoError(t, err) require.Greater(t, len(parts), 0) @@ -109,6 +119,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, // Drop 1 sectors from deadline 3 partition 0 { + //stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001 parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK) require.NoError(t, err) require.Greater(t, len(parts), 0) @@ -137,6 +148,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, require.NoError(t, err) } + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -147,6 +159,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -160,6 +173,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -169,6 +183,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -183,6 +198,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, { // Wait until proven. + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -193,6 +209,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, t.Logf("Now head.Height = %d", ts.Height()) } + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -203,6 +220,12 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, } func TestWindowPostBaseFeeNoBurn(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -225,10 +248,12 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) { maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_INFO_001 mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) require.NoError(t, err) miner.PledgeSectors(ctx, nSectors, 0, nil) + //stm: @CHAIN_STATE_GET_ACTOR_001 wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) en := wact.Nonce @@ -237,6 +262,7 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) { waitForProof: for { + //stm: @CHAIN_STATE_GET_ACTOR_001 wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) if wact.Nonce > en { @@ -246,9 +272,11 @@ waitForProof: build.Clock.Sleep(blocktime) } + //stm: @CHAIN_STATE_LIST_MESSAGES_001 slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) require.NoError(t, err) + //stm: @CHAIN_STATE_REPLAY_001 pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) require.NoError(t, err) @@ -256,6 +284,12 @@ waitForProof: } func TestWindowPostBaseFeeBurn(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -271,10 +305,12 @@ func TestWindowPostBaseFeeBurn(t *testing.T) { maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_INFO_001 mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) require.NoError(t, err) miner.PledgeSectors(ctx, 10, 0, nil) + //stm: @CHAIN_STATE_GET_ACTOR_001 wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) en := wact.Nonce @@ -283,6 +319,7 @@ func TestWindowPostBaseFeeBurn(t *testing.T) { waitForProof: for { + //stm: @CHAIN_STATE_GET_ACTOR_001 wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) if wact.Nonce > en { @@ -292,9 +329,11 @@ waitForProof: build.Clock.Sleep(blocktime) } + //stm: @CHAIN_STATE_LIST_MESSAGES_001 slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) require.NoError(t, err) + //stm: @CHAIN_STATE_REPLAY_001 pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) require.NoError(t, err) diff --git a/markets/retrievaladapter/provider_test.go b/markets/retrievaladapter/provider_test.go index eca3b1152..18dfe42a0 100644 --- a/markets/retrievaladapter/provider_test.go +++ b/markets/retrievaladapter/provider_test.go @@ -1,3 +1,4 @@ +//stm: #unit package retrievaladapter import ( @@ -18,6 +19,7 @@ import ( ) func TestGetPricingInput(t *testing.T) { + //stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001 ctx := context.Background() tsk := &types.TipSet{} key := tsk.Key() diff --git a/markets/storageadapter/dealstatematcher_test.go b/markets/storageadapter/dealstatematcher_test.go index cb0360778..755ecaf47 100644 --- a/markets/storageadapter/dealstatematcher_test.go +++ b/markets/storageadapter/dealstatematcher_test.go @@ -1,3 +1,4 @@ +//stm: #unit package storageadapter import ( @@ -27,6 +28,7 @@ import ( ) func TestDealStateMatcher(t *testing.T) { + //stm: @CHAIN_STATE_GET_ACTOR_001 ctx := context.Background() bs := bstore.NewMemorySync() store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) diff --git a/node/config/def.go b/node/config/def.go index 644c28bea..157350866 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -85,7 +85,7 @@ func DefaultFullNode() *FullNode { Splitstore: Splitstore{ ColdStoreType: "universal", HotStoreType: "badger", - MarkSetType: "map", + MarkSetType: "badger", HotStoreFullGCFrequency: 20, }, @@ -156,6 +156,7 @@ func DefaultStorageMiner() *StorageMiner { ConsiderVerifiedStorageDeals: true, ConsiderUnverifiedStorageDeals: true, PieceCidBlocklist: []cid.Cid{}, + MakeNewSectorForDeals: true, // TODO: It'd be nice to set this based on sector size MaxDealStartDelay: Duration(time.Hour * 24 * 14), ExpectedSealDuration: Duration(time.Hour * 24), diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 59181f9f6..d160643c2 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -247,6 +247,14 @@ Default value: 1 minute.`, Comment: `Maximum expected amount of time getting the deal into a sealed sector will take This includes the time the deal will need to get transferred and published before being assigned to a sector`, + }, + { + Name: "MakeNewSectorForDeals", + Type: "bool", + + Comment: `Whether new sectors are created to pack incoming deals +When this is set to false no new sectors will be created for sealing incoming deals +This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade`, }, { Name: "MaxDealStartDelay", @@ -810,7 +818,7 @@ Only currently supported value is "badger".`, Type: "string", Comment: `MarkSetType specifies the type of the markset. -It can be "map" (default) for in memory marking or "badger" for on-disk marking.`, +It can be "map" for in memory marking or "badger" (default) for on-disk marking.`, }, { Name: "HotStoreMessageRetention", diff --git a/node/config/load_test.go b/node/config/load_test.go index 9abe8a54b..9267b44ad 100644 --- a/node/config/load_test.go +++ b/node/config/load_test.go @@ -1,3 +1,4 @@ +//stm: #unit package config import ( diff --git a/node/config/types.go b/node/config/types.go index 7e9064614..762b8b6eb 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -120,6 +120,10 @@ type DealmakingConfig struct { // This includes the time the deal will need to get transferred and published // before being assigned to a sector ExpectedSealDuration Duration + // Whether new sectors are created to pack incoming deals + // When this is set to false no new sectors will be created for sealing incoming deals + // This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade + MakeNewSectorForDeals bool // Maximum amount of time proposed deal StartEpoch can be in future MaxDealStartDelay Duration // When a deal is ready to publish, the amount of time to wait for more @@ -359,7 +363,7 @@ type Splitstore struct { // Only currently supported value is "badger". HotStoreType string // MarkSetType specifies the type of the markset. - // It can be "map" (default) for in memory marking or "badger" for on-disk marking. + // It can be "map" for in memory marking or "badger" (default) for on-disk marking. MarkSetType string // HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond diff --git a/node/impl/client/client_test.go b/node/impl/client/client_test.go index 1be225278..1b195816d 100644 --- a/node/impl/client/client_test.go +++ b/node/impl/client/client_test.go @@ -1,3 +1,4 @@ +//stm: #unit package client import ( @@ -31,6 +32,7 @@ import ( var testdata embed.FS func TestImportLocal(t *testing.T) { + //stm: @CLIENT_STORAGE_DEALS_IMPORT_LOCAL_001, @CLIENT_RETRIEVAL_FIND_001 ds := dssync.MutexWrap(datastore.NewMapDatastore()) dir := t.TempDir() im := imports.NewManager(ds, dir) @@ -44,6 +46,7 @@ func TestImportLocal(t *testing.T) { b, err := testdata.ReadFile("testdata/payload.txt") require.NoError(t, err) + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 root, err := a.ClientImportLocal(ctx, bytes.NewReader(b)) require.NoError(t, err) require.NotEqual(t, cid.Undef, root) @@ -56,6 +59,7 @@ func TestImportLocal(t *testing.T) { require.Equal(t, root, *it.Root) require.True(t, strings.HasPrefix(it.CARPath, dir)) + //stm: @CLIENT_DATA_HAS_LOCAL_001 local, err := a.ClientHasLocal(ctx, root) require.NoError(t, err) require.True(t, local) diff --git a/node/impl/client/import_test.go b/node/impl/client/import_test.go index adf6531d0..1d7af86cb 100644 --- a/node/impl/client/import_test.go +++ b/node/impl/client/import_test.go @@ -1,3 +1,4 @@ +//stm: #unit package client import ( diff --git a/node/impl/full/gas_test.go b/node/impl/full/gas_test.go index 028e039ce..ac2835790 100644 --- a/node/impl/full/gas_test.go +++ b/node/impl/full/gas_test.go @@ -1,3 +1,4 @@ +//stm: #unit package full import ( diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 3ebac1409..38a34dfe8 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -391,6 +391,10 @@ func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.Sect return sm.Miner.MarkForUpgrade(ctx, id, snap) } +func (sm *StorageMinerAPI) SectorAbortUpgrade(ctx context.Context, number abi.SectorNumber) error { + return sm.Miner.SectorAbortUpgrade(number) +} + func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { return sm.Miner.CommitFlush(ctx) } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index da1a016f7..f4a2364fd 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -925,6 +925,8 @@ func ToSealingConfig(cfg *config.StorageMiner) sealiface.Config { MaxWaitDealsSectors: cfg.Sealing.MaxWaitDealsSectors, MaxSealingSectors: cfg.Sealing.MaxSealingSectors, MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals, + StartEpochSealingBuffer: abi.ChainEpoch(cfg.Dealmaking.StartEpochSealingBuffer), + MakeNewSectorForDeals: cfg.Dealmaking.MakeNewSectorForDeals, CommittedCapacitySectorLifetime: time.Duration(cfg.Sealing.CommittedCapacitySectorLifetime), WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay), AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy, @@ -950,8 +952,6 @@ func ToSealingConfig(cfg *config.StorageMiner) sealiface.Config { TerminateBatchMax: cfg.Sealing.TerminateBatchMax, TerminateBatchMin: cfg.Sealing.TerminateBatchMin, TerminateBatchWait: time.Duration(cfg.Sealing.TerminateBatchWait), - - StartEpochSealingBuffer: abi.ChainEpoch(cfg.Dealmaking.StartEpochSealingBuffer), } } diff --git a/node/repo/fsrepo_test.go b/node/repo/fsrepo_test.go index bd03cc084..381ebdcbe 100644 --- a/node/repo/fsrepo_test.go +++ b/node/repo/fsrepo_test.go @@ -1,3 +1,4 @@ +//stm: #unit package repo import ( diff --git a/node/repo/memrepo_test.go b/node/repo/memrepo_test.go index 965bc02c1..fdf609bac 100644 --- a/node/repo/memrepo_test.go +++ b/node/repo/memrepo_test.go @@ -1,3 +1,4 @@ +//stm: #unit package repo import ( diff --git a/node/repo/repo_test.go b/node/repo/repo_test.go index 444fab267..cd19f86f6 100644 --- a/node/repo/repo_test.go +++ b/node/repo/repo_test.go @@ -1,3 +1,4 @@ +//stm: #unit package repo import ( diff --git a/paychmgr/paych_test.go b/paychmgr/paych_test.go index 24214d1b5..e485c4e83 100644 --- a/paychmgr/paych_test.go +++ b/paychmgr/paych_test.go @@ -1,3 +1,4 @@ +//stm: #unit package paychmgr import ( @@ -43,6 +44,9 @@ func TestCheckVoucherValid(t *testing.T) { mock.setAccountAddress(fromAcct, from) mock.setAccountAddress(toAcct, to) + //stm: @TOKEN_PAYCH_VOUCHER_VALID_001, @TOKEN_PAYCH_VOUCHER_VALID_002, @TOKEN_PAYCH_VOUCHER_VALID_003 + //stm: @TOKEN_PAYCH_VOUCHER_VALID_004, @TOKEN_PAYCH_VOUCHER_VALID_005, @TOKEN_PAYCH_VOUCHER_VALID_006, @TOKEN_PAYCH_VOUCHER_VALID_007 + //stm: @TOKEN_PAYCH_VOUCHER_VALID_009, @TOKEN_PAYCH_VOUCHER_VALID_010 tcases := []struct { name string expectError bool @@ -242,6 +246,7 @@ func TestCreateVoucher(t *testing.T) { Lane: 1, Amount: voucherLane1Amt, } + //stm: @TOKEN_PAYCH_VOUCHER_CREATE_001 res, err := s.mgr.CreateVoucher(ctx, s.ch, voucher) require.NoError(t, err) require.NotNil(t, res.Voucher) @@ -286,6 +291,7 @@ func TestCreateVoucher(t *testing.T) { Lane: 2, Amount: voucherLane2Amt, } + //stm: @TOKEN_PAYCH_VOUCHER_CREATE_004 res, err = s.mgr.CreateVoucher(ctx, s.ch, voucher) require.NoError(t, err) @@ -296,6 +302,7 @@ func TestCreateVoucher(t *testing.T) { } func TestAddVoucherDelta(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_VOUCHERS_001 ctx := context.Background() // Set up a manager with a single payment channel @@ -357,6 +364,7 @@ func TestAddVoucherNextLane(t *testing.T) { require.NoError(t, err) require.EqualValues(t, ci.NextLane, 3) + //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001 // Allocate a lane (should be lane 3) lane, err := s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) @@ -393,6 +401,7 @@ func TestAllocateLane(t *testing.T) { // Set up a manager with a single payment channel s := testSetupMgrWithChannel(t) + //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001 // First lane should be 0 lane, err := s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) @@ -447,6 +456,7 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) { _, err = mgr.AddVoucherInbound(ctx, ch, sv, nil, minDelta) require.NoError(t, err) + //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001 // Allocate lane should return the next lane (lane 3) lane, err := mgr.AllocateLane(ctx, ch) require.NoError(t, err) @@ -509,6 +519,7 @@ func TestAddVoucherInboundWalletKey(t *testing.T) { } func TestBestSpendable(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_VOUCHERS_001 ctx := context.Background() // Set up a manager with a single payment channel @@ -551,6 +562,7 @@ func TestBestSpendable(t *testing.T) { }, }) + //stm: @TOKEN_PAYCH_BEST_SPENDABLE_001 // Verify best spendable vouchers on each lane vouchers, err := BestSpendableByLane(ctx, bsapi, s.ch) require.NoError(t, err) @@ -691,6 +703,7 @@ func TestSubmitVoucher(t *testing.T) { err = p3.UnmarshalCBOR(bytes.NewReader(msg.Message.Params)) require.NoError(t, err) + //stm: @TOKEN_PAYCH_LIST_VOUCHERS_001 // Verify that vouchers are marked as submitted vis, err := s.mgr.ListVouchers(ctx, s.ch) require.NoError(t, err) diff --git a/paychmgr/paychget_test.go b/paychmgr/paychget_test.go index c53a85d86..a508fdfc9 100644 --- a/paychmgr/paychget_test.go +++ b/paychmgr/paychget_test.go @@ -122,6 +122,7 @@ func TestPaychGetCreateOffchainReserveFails(t *testing.T) { // TestPaychGetCreateChannelThenAddFunds tests creating a channel and then // adding funds to it func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -296,6 +297,7 @@ func TestPaychGetCreatePrefundedChannelThenAddFunds(t *testing.T) { // operation is queued up behind a create channel operation, and the create // channel fails, then the waiting operation can succeed. func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -360,6 +362,7 @@ func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) { // TestPaychGetRecoverAfterError tests that after a create channel fails, the // next attempt to create channel can succeed. func TestPaychGetRecoverAfterError(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -412,6 +415,7 @@ func TestPaychGetRecoverAfterError(t *testing.T) { // TestPaychGetRecoverAfterAddFundsError tests that after an add funds fails, the // next attempt to add funds can succeed. func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -502,6 +506,7 @@ func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { // right after the create channel message is sent, the channel will be // created when the system restarts. func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -589,6 +594,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { // right after the add funds message is sent, the add funds will be // processed when the system restarts. func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -662,6 +668,7 @@ func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) { // TestPaychGetWait tests that GetPaychWaitReady correctly waits for the // channel to be created or funds to be added func TestPaychGetWait(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -727,6 +734,7 @@ func TestPaychGetWait(t *testing.T) { // TestPaychGetWaitErr tests that GetPaychWaitReady correctly handles errors func TestPaychGetWaitErr(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -774,6 +782,7 @@ func TestPaychGetWaitErr(t *testing.T) { // TestPaychGetWaitCtx tests that GetPaychWaitReady returns early if the context // is cancelled func TestPaychGetWaitCtx(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx, cancel := context.WithCancel(context.Background()) store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -803,6 +812,7 @@ func TestPaychGetWaitCtx(t *testing.T) { // progress and two add funds are queued up behind it, the two add funds // will be merged func TestPaychGetMergeAddFunds(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -1383,6 +1393,7 @@ func TestPaychGetMergePrefundAndReserveOneOffchainOneFail(t *testing.T) { // TestPaychGetMergeAddFundsCtxCancelOne tests that when a queued add funds // request is cancelled, its amount is removed from the total merged add funds func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -1488,6 +1499,7 @@ func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) { // TestPaychGetMergeAddFundsCtxCancelAll tests that when all queued add funds // requests are cancelled, no add funds message is sent func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -1562,6 +1574,7 @@ func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) { // TestPaychAvailableFunds tests that PaychAvailableFunds returns the correct // channel state func TestPaychAvailableFunds(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001, @TOKEN_PAYCH_AVAILABLE_FUNDS_001, @TOKEN_PAYCH_AVAILABLE_FUNDS_002, @TOKEN_PAYCH_AVAILABLE_FUNDS_003 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) diff --git a/paychmgr/paychvoucherfunds_test.go b/paychmgr/paychvoucherfunds_test.go index 4a2f7e31a..dc894a04f 100644 --- a/paychmgr/paychvoucherfunds_test.go +++ b/paychmgr/paychvoucherfunds_test.go @@ -23,6 +23,7 @@ import ( // insufficient funds, then adding funds to the channel, then adding the // voucher again func TestPaychAddVoucherAfterAddFunds(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) diff --git a/paychmgr/settle_test.go b/paychmgr/settle_test.go index bc88df2f0..ffbffc660 100644 --- a/paychmgr/settle_test.go +++ b/paychmgr/settle_test.go @@ -14,6 +14,7 @@ import ( ) func TestPaychSettle(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001, @TOKEN_PAYCH_SETTLE_001, @TOKEN_PAYCH_LIST_CHANNELS_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) diff --git a/storage/miner_sealing.go b/storage/miner_sealing.go index d8ef26835..a22c32a40 100644 --- a/storage/miner_sealing.go +++ b/storage/miner_sealing.go @@ -86,6 +86,10 @@ func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool { return m.sealing.IsMarkedForUpgrade(id) } +func (m *Miner) SectorAbortUpgrade(sectorNum abi.SectorNumber) error { + return m.sealing.AbortUpgrade(sectorNum) +} + func (m *Miner) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) { return m.sealing.SectorAddPieceToAny(ctx, size, r, d) } diff --git a/testplans/docker-images/proof-parameters.json b/testplans/docker-images/proof-parameters.json index 1d4584454..88bb0bfa3 100644 --- a/testplans/docker-images/proof-parameters.json +++ b/testplans/docker-images/proof-parameters.json @@ -1,4 +1,54 @@ { + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.params": { + "cid": "Qma5WL6abSqYg9uUQAZ3EHS286bsNsha7oAGsJBD48Bq2q", + "digest": "c3ad7bb549470b82ad52ed070aebb4f4", + "sector_size": 536870912 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.vk": { + "cid": "QmUa7f9JtJMsqJJ3s3ZXk6WyF4xJLE8FiqYskZGgk8GCDv", + "digest": "994c5b7d450ca9da348c910689f2dc7f", + "sector_size": 536870912 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.params": { + "cid": "QmQiT4qBGodrVNEgVTDXxBNDdPbaD8Ag7Sx3ZTq1zHX79S", + "digest": "5aedd2cf3e5c0a15623d56a1b43110ad", + "sector_size": 8388608 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.vk": { + "cid": "QmdcpKUQvHM8RFRVKbk1yHfEqMcBzhtFWKRp9SNEmWq37i", + "digest": "abd80269054d391a734febdac0d2e687", + "sector_size": 8388608 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.params": { + "cid": "QmYM6Hg7mjmvA3ZHTsqkss1fkdyDju5dDmLiBZGJ5pz9y9", + "digest": "311f92a3e75036ced01b1c0025f1fa0c", + "sector_size": 2048 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.vk": { + "cid": "QmaQsTLL3nc5dw6wAvaioJSBfd1jhQrA2o6ucFf7XeV74P", + "digest": "eadad9784969890d30f2749708c79771", + "sector_size": 2048 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": { + "cid": "QmNPc75iEfcahCwNKdqnWLtxnjspUGGR4iscjiz3wP3RtS", + "digest": "1b3cfd761a961543f9eb273e435a06a2", + "sector_size": 34359738368 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": { + "cid": "QmdFFUe1gcz9MMHc6YW8aoV48w4ckvcERjt7PkydQAMfCN", + "digest": "3a6941983754737fde880d29c7094905", + "sector_size": 34359738368 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": { + "cid": "QmUB6xTVjzBQGuDNeyJMrrJ1byk58vhPm8eY2Lv9pgwanp", + "digest": "1a392e7b759fb18e036c7559b5ece816", + "sector_size": 68719476736 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": { + "cid": "Qmd794Jty7k26XJ8Eg4NDEks65Qk8G4GVfGkwqvymv8HAg", + "digest": "80e366df2f1011953c2d01c7b7c9ee8e", + "sector_size": 68719476736 + }, "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { "cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR", "digest": "7610b9f82bfc88405b7a832b651ce2f6", diff --git a/testplans/lotus-soup/go.mod b/testplans/lotus-soup/go.mod index 6b4be1d97..f227452e7 100644 --- a/testplans/lotus-soup/go.mod +++ b/testplans/lotus-soup/go.mod @@ -8,8 +8,8 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/drand/drand v1.3.0 github.com/filecoin-project/go-address v0.0.6 - github.com/filecoin-project/go-data-transfer v1.12.1 - github.com/filecoin-project/go-fil-markets v1.14.1 + github.com/filecoin-project/go-data-transfer v1.14.0 + github.com/filecoin-project/go-fil-markets v1.19.0 github.com/filecoin-project/go-jsonrpc v0.1.5 github.com/filecoin-project/go-state-types v0.1.3 github.com/filecoin-project/go-storedcounter v0.1.0 @@ -23,15 +23,15 @@ require ( github.com/ipfs/go-datastore v0.5.1 github.com/ipfs/go-ipfs-files v0.0.9 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.4.0 + github.com/ipfs/go-log/v2 v2.5.0 github.com/ipfs/go-merkledag v0.5.1 - github.com/ipfs/go-unixfs v0.2.6 + github.com/ipfs/go-unixfs v0.3.1 github.com/ipld/go-car v0.3.3 github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c - github.com/libp2p/go-libp2p v0.17.0 - github.com/libp2p/go-libp2p-core v0.13.0 + github.com/libp2p/go-libp2p v0.18.0-rc4 + github.com/libp2p/go-libp2p-core v0.14.0 github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 - github.com/multiformats/go-multiaddr v0.4.1 + github.com/multiformats/go-multiaddr v0.5.0 github.com/testground/sdk-go v0.2.6 go.opencensus.io v0.23.0 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c diff --git a/testplans/lotus-soup/go.sum b/testplans/lotus-soup/go.sum index e6e4149c8..23877f87b 100644 --- a/testplans/lotus-soup/go.sum +++ b/testplans/lotus-soup/go.sum @@ -133,6 +133,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= @@ -405,19 +407,19 @@ github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9AN github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v1.12.0/go.mod h1:tDrD2jLU2TpVhd+5B8iqBp0fQRV4lP80WZccKXugjYc= -github.com/filecoin-project/go-data-transfer v1.12.1 h1:gAznAZKySVs2FS6T/vDq7R3f0DewLnxeROe0oOE6bZU= -github.com/filecoin-project/go-data-transfer v1.12.1/go.mod h1:j3HL645YiQFxcM+q7uPlGApILSqeweDABNgZQP7pDYU= -github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff h1:2bG2ggVZ/rInd/YqUfRj4A5siGuYOPxxuD4I8nYLJF0= +github.com/filecoin-project/go-data-transfer v1.14.0 h1:4pnfJk8FYtqcdAg+QRGzaz57seUC/Tz+HJgPuGB7zdg= +github.com/filecoin-project/go-data-transfer v1.14.0/go.mod h1:wNJKhaLLYBJDM3VFvgvYi4iUjPa69pz/1Q5Q4HzX2wE= github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= +github.com/filecoin-project/go-ds-versioning v0.1.1 h1:JiyBqaQlwC+UM0WhcBtVEeT3XrX59mQhT8U3p7nu86o= +github.com/filecoin-project/go-ds-versioning v0.1.1/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= -github.com/filecoin-project/go-fil-markets v1.14.1 h1:Bx+TSbkAN8K97Hpjgu+MpeRFbXIKH/fNpNp1ZGAEH3I= -github.com/filecoin-project/go-fil-markets v1.14.1/go.mod h1:vXOHH3q2+zLk929W+lIq3etuDFTyJJ8nG2DwGHG2R1E= +github.com/filecoin-project/go-fil-markets v1.19.0 h1:kap2q2wTM6tfkVO5gMA5DD9GUeTvkDhMfhjCtEwMDM8= +github.com/filecoin-project/go-fil-markets v1.19.0/go.mod h1:qsb3apmo4RSJYCEq40QxVdU7UZospN6nFJLOBHuaIbc= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -431,8 +433,8 @@ github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.m github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= -github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 h1:+nripp+UI/rhl01w9Gs4V0XDGaVPYPMGU/D/gNVLue0= -github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= +github.com/filecoin-project/go-paramfetch v0.0.4 h1:H+Me8EL8T5+79z/KHYQQcT8NVOzYVqXIi7nhb48tdm8= +github.com/filecoin-project/go-paramfetch v0.0.4/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= @@ -879,6 +881,8 @@ github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1: github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitfield v1.0.0 h1:y/XHm2GEmD9wKngheWNNCNL0pzrWXZwCdQGv1ikXknQ= +github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= @@ -914,7 +918,6 @@ github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13X github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.4.7-0.20211013204805-28a3721c2e66/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= @@ -942,13 +945,11 @@ github.com/ipfs/go-filestore v1.1.0 h1:Pu4tLBi1bucu6/HU9llaOmb9yLFk/sgP+pW764zND github.com/ipfs/go-filestore v1.1.0/go.mod h1:6e1/5Y6NvLuCRdmda/KA4GUhXJQ3Uat6vcWm2DJfxc8= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= -github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE= -github.com/ipfs/go-graphsync v0.11.5 h1:WA5hVxGBtcal6L6nqubKiqRolaZxbexOK3GumGFJRR4= -github.com/ipfs/go-graphsync v0.11.5/go.mod h1:+/sZqRwRCQRrV7NCzgBtufmr5QGpUE98XSa7NlsztmM= +github.com/ipfs/go-graphsync v0.12.0 h1:QCsVHVzb9FTkcm3NEa8GjXnUeGit1L9s08HcSVQ4m/g= +github.com/ipfs/go-graphsync v0.12.0/go.mod h1:nASYWYETgsnMbQ3+DirNImOHQ8TY0a5AhAqyOY55tUg= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= -github.com/ipfs/go-ipfs-blockstore v1.1.0/go.mod h1:5QDUApRqpgPcfGstCxYeMnjt/DYQtXXdJVCvxHHuWVk= github.com/ipfs/go-ipfs-blockstore v1.1.1/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blockstore v1.1.2 h1:WCXoZcMYnvOTmlpX+RSSnhVN0uCmbWTeepTGX5lgiXw= github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= @@ -966,7 +967,6 @@ github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1I github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= @@ -1028,8 +1028,9 @@ github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGf github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= -github.com/ipfs/go-log/v2 v2.4.0 h1:iR/2o9PGWanVJrBgIH5Ff8mPGOwpqLaPIAFqSnsdlzk= github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= +github.com/ipfs/go-log/v2 v2.5.0 h1:+MhAooFd9XZNvR0i9FriKW6HB0ql7HNXUuflWtc0dd4= +github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= @@ -1049,8 +1050,10 @@ github.com/ipfs/go-peertaskqueue v0.7.1/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68 github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= -github.com/ipfs/go-unixfs v0.2.6 h1:gq3U3T2vh8x6tXhfo3uSO3n+2z4yW0tYtNgVP/3sIyA= -github.com/ipfs/go-unixfs v0.2.6/go.mod h1:GTTzQvaZsTZARdNkkdjDKFFnBhmO3e5mIM1PkH/x4p0= +github.com/ipfs/go-unixfs v0.3.1 h1:LrfED0OGfG98ZEegO4/xiprx2O+yS+krCMQSp7zLVv8= +github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= +github.com/ipfs/go-unixfsnode v1.2.0 h1:tHHBJftsJyHGa8bS62PpkYNqHy/Sug3c/vxxC8NaGQY= +github.com/ipfs/go-unixfsnode v1.2.0/go.mod h1:mQEgLjxkV/1mohkC4p7taRRBYPBeXu97SA3YaerT2q0= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= github.com/ipfs/interface-go-ipfs-core v0.4.0 h1:+mUiamyHIwedqP8ZgbCIwpy40oX7QcXUbo4CZOeJVJg= @@ -1074,11 +1077,10 @@ github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/j github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= github.com/ipld/go-ipld-prime v0.14.3-0.20211207234443-319145880958/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= -github.com/ipld/go-ipld-prime v0.14.3 h1:cGUmxSws2IHurn00/iLMDapeXsnf9+FyAtYVy8G/JsQ= -github.com/ipld/go-ipld-prime v0.14.3/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.4 h1:bqhmume8+nbNsX4/+J6eohktfZHAI8GKrF3rQ0xgOyc= +github.com/ipld/go-ipld-prime v0.14.4/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= @@ -1198,7 +1200,6 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= -github.com/libp2p/go-addr-util v0.1.0 h1:acKsntI33w2bTU7tC9a0SaPimJGfSI0bFKC18ChxeVI= github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= @@ -1234,8 +1235,10 @@ github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= -github.com/libp2p/go-libp2p v0.17.0 h1:8l4GV401OSd4dFRyHDtIT/mEzdh/aQGoFC8xshYgm5M= github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= +github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8= +github.com/libp2p/go-libp2p v0.18.0-rc4 h1:OUsSbeu7q+Ck/bV9wHDxFzb08ORqBupHhpCmRBhWrJ8= +github.com/libp2p/go-libp2p v0.18.0-rc4/go.mod h1:wzmsk1ioOq9FGQys2BN5BIw4nugP6+R+CyW3JbPEbbs= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= @@ -1248,7 +1251,6 @@ github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/ github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= -github.com/libp2p/go-libp2p-autonat v0.7.0 h1:rCP5s+A2dlhM1Xd66wurE0k7S7pPmM0D+FlqqSBXxks= github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= @@ -1267,12 +1269,14 @@ github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3 github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= +github.com/libp2p/go-libp2p-circuit v0.6.0 h1:rw/HlhmUB3OktS/Ygz6+2XABOmHKzZpPUuMNUMosj8w= +github.com/libp2p/go-libp2p-circuit v0.6.0/go.mod h1:kB8hY+zCpMeScyvFrKrGicRdid6vNXbunKE4rXATZ0M= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= -github.com/libp2p/go-libp2p-connmgr v0.3.0 h1:yerFXrYa0oxpuVsLlndwm/bLulouHYDcvFrY/4H4fx8= github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= +github.com/libp2p/go-libp2p-connmgr v0.3.1 h1:alEy2fpGKFu+7ZhQF4GF0dvKLyVHeLtIfS/KziwoiZw= +github.com/libp2p/go-libp2p-connmgr v0.3.1/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= @@ -1306,8 +1310,9 @@ github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmk github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= -github.com/libp2p/go-libp2p-core v0.13.0 h1:IFG/s8dN6JN2OTrXX9eq2wNU/Zlz2KLdwZUp5FplgXI= github.com/libp2p/go-libp2p-core v0.13.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.14.0 h1:0kYSgiK/D7Eo28GTuRXo5YHsWwAisVpFCqCVPUd/vJs= +github.com/libp2p/go-libp2p-core v0.14.0/go.mod h1:tLasfcVdTXnixsLB0QYaT1syJOhsbrhG7q6pGrHtBg8= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= @@ -1343,8 +1348,9 @@ github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiY github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= -github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= +github.com/libp2p/go-libp2p-mplex v0.5.0 h1:vt3k4E4HSND9XH4Z8rUpacPJFSAgLOv6HDvG8W9Ks9E= +github.com/libp2p/go-libp2p-mplex v0.5.0/go.mod h1:eLImPJLkj3iG5t5lq68w3Vm5NAQ5BcKwrrb2VmOYb3M= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= @@ -1383,8 +1389,9 @@ github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1 github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk= -github.com/libp2p/go-libp2p-pubsub v0.6.0 h1:98+RXuEWW17U6cAijK1yaTf6mw/B+n5yPA421z+dlo0= github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= +github.com/libp2p/go-libp2p-pubsub v0.6.1 h1:wycbV+f4rreCoVY61Do6g/BUk0RIrbNRcYVbn+QkjGk= +github.com/libp2p/go-libp2p-pubsub v0.6.1/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 h1:2lH7rMlvDPSvXeOR+g7FE6aqiEwxtpxWKQL8uigk5fQ= github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6/go.mod h1:8ZodgKS4qRLayfw9FDKDd9DX4C16/GMofDxSldG8QPI= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= @@ -1393,14 +1400,19 @@ github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqU github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= -github.com/libp2p/go-libp2p-quic-transport v0.15.2 h1:wHBEceRy+1/8Ec8dAIyr+/P7L2YefIGprPVy5LrMM+k= github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.16.0/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= +github.com/libp2p/go-libp2p-quic-transport v0.16.1 h1:N/XqYXHurphPLDfXYhll8NyqzdZYQqAF4GIr7+SmLV8= +github.com/libp2p/go-libp2p-quic-transport v0.16.1/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= +github.com/libp2p/go-libp2p-resource-manager v0.1.0/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= +github.com/libp2p/go-libp2p-resource-manager v0.1.3 h1:Umf0tW6WNXSb6Uoma0YT56azB5iikL/aeGAP7s7+f5o= +github.com/libp2p/go-libp2p-resource-manager v0.1.3/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= @@ -1422,8 +1434,10 @@ github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJeg github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= -github.com/libp2p/go-libp2p-swarm v0.9.0 h1:LdWjHDVjPMYt3NCG2EHcQiIP8XzA8BHhHz8ZLAYol2Y= github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= +github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA= +github.com/libp2p/go-libp2p-swarm v0.10.1 h1:lXW3pgGt+BVmkzcFX61erX7l6Lt+WAamNhwa2Kf3eJM= +github.com/libp2p/go-libp2p-swarm v0.10.1/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1435,8 +1449,9 @@ github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehts github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= -github.com/libp2p/go-libp2p-testing v0.6.0 h1:tV/wz6mS1VoAYA/5DGTiyzw9TJ+eXMCMvzU5VPLJSgg= github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.7.0 h1:9bfyhNINizxuLrKsenzGaZalXRXIaAEmx1BP/PzF1gM= +github.com/libp2p/go-libp2p-testing v0.7.0/go.mod h1:OLbdn9DbgdMwv00v+tlp1l3oe2Cl+FAjoWIA2pa0X6E= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-tls v0.3.1 h1:lsE2zYte+rZCEOHF72J1Fg3XK3dGQyKvI6i5ehJfEp0= @@ -1451,8 +1466,10 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIW github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= -github.com/libp2p/go-libp2p-transport-upgrader v0.6.0 h1:GfMCU+2aGGEm1zW3UcOz6wYSn8tXQalFfVfcww99i5A= github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.0/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.1 h1:MSMe+tUfxpC9GArTz7a4G5zQKQgGh00Vio87d3j3xIg= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.1/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= @@ -1466,8 +1483,11 @@ github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelN github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= -github.com/libp2p/go-libp2p-yamux v0.7.0 h1:bVXHbTj/XH4uBBsPrg26BlDABk5WYRlssY73P0SjhPc= github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= +github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8= +github.com/libp2p/go-libp2p-yamux v0.8.1/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= +github.com/libp2p/go-libp2p-yamux v0.8.2 h1:6GKWntresp0TFxMP/oSoH96nV8XKJRdynXsdp43dn0Y= +github.com/libp2p/go-libp2p-yamux v0.8.2/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -1479,8 +1499,9 @@ github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6 github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU= github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.4.0 h1:Ukkez9/4EOX5rTw4sHefNJp10dksftAA05ZgyjplUbM= +github.com/libp2p/go-mplex v0.4.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= @@ -1495,8 +1516,9 @@ github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6 h1:ruPJStbYyXVYGQ81uzEDzuvbYRLKRrLvTYd33yomC38= github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= +github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= +github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -1515,14 +1537,14 @@ github.com/libp2p/go-reuseport-transport v0.1.0 h1:C3PHeHjmnz8m6f0uydObj02tMEoi7 github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-stream-muxer-multistream v0.3.0 h1:TqnSHPJEIqDEO7h1wZZ0p3DXdvDSiLHQidKKUGZtiOY= github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= +github.com/libp2p/go-stream-muxer-multistream v0.4.0 h1:HsM/9OdtqnIzjVXcxTXjmqKrj3gJ8kacaOJwJS1ipaY= +github.com/libp2p/go-stream-muxer-multistream v0.4.0/go.mod h1:nb+dGViZleRP4XcyHuZSVrJCBl55nRBOMmiSL/dyziw= github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= @@ -1530,8 +1552,9 @@ github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcr github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= -github.com/libp2p/go-tcp-transport v0.4.0 h1:VDyg4j6en3OuXf90gfDQh5Sy9KowO9udnd0OU8PP6zg= github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= +github.com/libp2p/go-tcp-transport v0.5.0 h1:3ZPW8HAuyRAuFzyabE0hSrCXKKSWzROnZZX7DtcIatY= +github.com/libp2p/go-tcp-transport v0.5.0/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= @@ -1541,8 +1564,9 @@ github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzl github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= -github.com/libp2p/go-ws-transport v0.5.0 h1:cO6x4P0v6PfxbKnxmf5cY2Ny4OPDGYkUqNvZzp/zdlo= github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= +github.com/libp2p/go-ws-transport v0.6.0 h1:326XBL6Q+5CQ2KtjXz32+eGu02W/Kz2+Fm4SpXdr0q4= +github.com/libp2p/go-ws-transport v0.6.0/go.mod h1:dXqtI9e2JV9FtF1NOtWVZSKXh5zXvnuwPXfj8GPBbYU= github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= @@ -1554,8 +1578,10 @@ github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/h github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v2 v2.3.0 h1:luRV68GS1vqqr6EFUjtu1kr51d+IbW0gSowu8emYWAI= github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/go-yamux/v3 v3.0.1/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= +github.com/libp2p/go-yamux/v3 v3.0.2 h1:LW0q5+A1Wy0npEsPJP9wmare2NH4ohNluN5EWVwv2mE= +github.com/libp2p/go-yamux/v3 v3.0.2/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= @@ -1564,8 +1590,9 @@ github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86 github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= -github.com/lucas-clemente/quic-go v0.24.0 h1:ToR7SIIEdrgOhgVTHvPgdVRJfgVy+N0wQAagH7L4d5g= github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.25.0 h1:K+X9Gvd7JXsOHtU0N2icZ2Nw3rx82uBej3mP4CLgibc= +github.com/lucas-clemente/quic-go v0.25.0/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg= github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= @@ -1597,6 +1624,8 @@ github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZE github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk= github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1 h1:EnzzN9fPUkUck/1CuY1FlzBaIYMoiBsdwTNmNGkwUUM= +github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1/go.mod h1:PUhIQk19LoFt2174H4+an8TYvWOGjb/hHwphBeaDHwI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= @@ -1709,8 +1738,9 @@ github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4 github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= -github.com/multiformats/go-multiaddr v0.4.1 h1:Pq37uLx3hsyNlTDir7FZyU8+cFCTqd5y1KiM2IzOutI= github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= +github.com/multiformats/go-multiaddr v0.5.0 h1:i/JuOoVg4szYQ4YEzDGtb2h0o8M7CG/Yq6cGlcjWZpM= +github.com/multiformats/go-multiaddr v0.5.0/go.mod h1:3KAxNkUqLTJ20AAwN4XVX4kZar+bR+gh4zgbfr3SNug= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= @@ -1735,8 +1765,9 @@ github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77 github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61 h1:ZrUuMKNgJ52qHPoQ+bx0h0uBfcWmN7Px+4uKSZeesiI= github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c h1:VyANTtZ0wsx0IAZnCZhfMmAmfUyzJq/5JQi2hHOtKS0= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1842,6 +1873,8 @@ github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIw github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= @@ -2908,4 +2941,4 @@ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= \ No newline at end of file