Merge remote-tracking branch 'origin/master' into feat/post-worker
This commit is contained in:
commit
98f4d37f1a
@ -10,20 +10,12 @@ import (
|
|||||||
|
|
||||||
var errMarkSetClosed = errors.New("markset closed")
|
var errMarkSetClosed = errors.New("markset closed")
|
||||||
|
|
||||||
// MarkSet is a utility to keep track of seen CID, and later query for them.
|
// MarkSet is an interface for tracking CIDs during chain and object walks
|
||||||
//
|
|
||||||
// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt).
|
|
||||||
// * If a probabilistic result is acceptable, it can be backed by a bloom filter
|
|
||||||
type MarkSet interface {
|
type MarkSet interface {
|
||||||
|
ObjectVisitor
|
||||||
Mark(cid.Cid) error
|
Mark(cid.Cid) error
|
||||||
Has(cid.Cid) (bool, error)
|
Has(cid.Cid) (bool, error)
|
||||||
Close() error
|
Close() error
|
||||||
SetConcurrent()
|
|
||||||
}
|
|
||||||
|
|
||||||
type MarkSetVisitor interface {
|
|
||||||
MarkSet
|
|
||||||
ObjectVisitor
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type MarkSetEnv interface {
|
type MarkSetEnv interface {
|
||||||
@ -31,11 +23,7 @@ type MarkSetEnv interface {
|
|||||||
// name is a unique name for this markset, mapped to the filesystem in disk-backed environments
|
// name is a unique name for this markset, mapped to the filesystem in disk-backed environments
|
||||||
// sizeHint is a hint about the expected size of the markset
|
// sizeHint is a hint about the expected size of the markset
|
||||||
Create(name string, sizeHint int64) (MarkSet, error)
|
Create(name string, sizeHint int64) (MarkSet, error)
|
||||||
// CreateVisitor is like Create, but returns a wider interface that supports atomic visits.
|
// Close closes the markset
|
||||||
// It may not be supported by some markset types (e.g. bloom).
|
|
||||||
CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error)
|
|
||||||
// SupportsVisitor returns true if the marksets created by this environment support the visitor interface.
|
|
||||||
SupportsVisitor() bool
|
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,7 +34,6 @@ type BadgerMarkSet struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _ MarkSet = (*BadgerMarkSet)(nil)
|
var _ MarkSet = (*BadgerMarkSet)(nil)
|
||||||
var _ MarkSetVisitor = (*BadgerMarkSet)(nil)
|
|
||||||
|
|
||||||
var badgerMarkSetBatchSize = 16384
|
var badgerMarkSetBatchSize = 16384
|
||||||
|
|
||||||
@ -48,7 +47,7 @@ func NewBadgerMarkSetEnv(path string) (MarkSetEnv, error) {
|
|||||||
return &BadgerMarkSetEnv{path: msPath}, nil
|
return &BadgerMarkSetEnv{path: msPath}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet, error) {
|
func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
||||||
name += ".tmp"
|
name += ".tmp"
|
||||||
path := filepath.Join(e.path, name)
|
path := filepath.Join(e.path, name)
|
||||||
|
|
||||||
@ -68,16 +67,6 @@ func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet,
|
|||||||
return ms, nil
|
return ms, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
|
||||||
return e.create(name, sizeHint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *BadgerMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) {
|
|
||||||
return e.create(name, sizeHint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *BadgerMarkSetEnv) SupportsVisitor() bool { return true }
|
|
||||||
|
|
||||||
func (e *BadgerMarkSetEnv) Close() error {
|
func (e *BadgerMarkSetEnv) Close() error {
|
||||||
return os.RemoveAll(e.path)
|
return os.RemoveAll(e.path)
|
||||||
}
|
}
|
||||||
|
@ -13,42 +13,27 @@ var _ MarkSetEnv = (*MapMarkSetEnv)(nil)
|
|||||||
type MapMarkSet struct {
|
type MapMarkSet struct {
|
||||||
mx sync.RWMutex
|
mx sync.RWMutex
|
||||||
set map[string]struct{}
|
set map[string]struct{}
|
||||||
|
|
||||||
ts bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ MarkSet = (*MapMarkSet)(nil)
|
var _ MarkSet = (*MapMarkSet)(nil)
|
||||||
var _ MarkSetVisitor = (*MapMarkSet)(nil)
|
|
||||||
|
|
||||||
func NewMapMarkSetEnv() (*MapMarkSetEnv, error) {
|
func NewMapMarkSetEnv() (*MapMarkSetEnv, error) {
|
||||||
return &MapMarkSetEnv{}, nil
|
return &MapMarkSetEnv{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *MapMarkSetEnv) create(name string, sizeHint int64) (*MapMarkSet, error) {
|
func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
||||||
return &MapMarkSet{
|
return &MapMarkSet{
|
||||||
set: make(map[string]struct{}, sizeHint),
|
set: make(map[string]struct{}, sizeHint),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
|
||||||
return e.create(name, sizeHint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MapMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) {
|
|
||||||
return e.create(name, sizeHint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MapMarkSetEnv) SupportsVisitor() bool { return true }
|
|
||||||
|
|
||||||
func (e *MapMarkSetEnv) Close() error {
|
func (e *MapMarkSetEnv) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MapMarkSet) Mark(cid cid.Cid) error {
|
func (s *MapMarkSet) Mark(cid cid.Cid) error {
|
||||||
if s.ts {
|
|
||||||
s.mx.Lock()
|
s.mx.Lock()
|
||||||
defer s.mx.Unlock()
|
defer s.mx.Unlock()
|
||||||
}
|
|
||||||
|
|
||||||
if s.set == nil {
|
if s.set == nil {
|
||||||
return errMarkSetClosed
|
return errMarkSetClosed
|
||||||
@ -59,10 +44,8 @@ func (s *MapMarkSet) Mark(cid cid.Cid) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
|
func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
|
||||||
if s.ts {
|
|
||||||
s.mx.RLock()
|
s.mx.RLock()
|
||||||
defer s.mx.RUnlock()
|
defer s.mx.RUnlock()
|
||||||
}
|
|
||||||
|
|
||||||
if s.set == nil {
|
if s.set == nil {
|
||||||
return false, errMarkSetClosed
|
return false, errMarkSetClosed
|
||||||
@ -73,10 +56,8 @@ func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) {
|
func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) {
|
||||||
if s.ts {
|
|
||||||
s.mx.Lock()
|
s.mx.Lock()
|
||||||
defer s.mx.Unlock()
|
defer s.mx.Unlock()
|
||||||
}
|
|
||||||
|
|
||||||
if s.set == nil {
|
if s.set == nil {
|
||||||
return false, errMarkSetClosed
|
return false, errMarkSetClosed
|
||||||
@ -92,14 +73,9 @@ func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *MapMarkSet) Close() error {
|
func (s *MapMarkSet) Close() error {
|
||||||
if s.ts {
|
|
||||||
s.mx.Lock()
|
s.mx.Lock()
|
||||||
defer s.mx.Unlock()
|
defer s.mx.Unlock()
|
||||||
}
|
|
||||||
s.set = nil
|
s.set = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MapMarkSet) SetConcurrent() {
|
|
||||||
s.ts = true
|
|
||||||
}
|
|
||||||
|
@ -167,7 +167,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
|
|||||||
}
|
}
|
||||||
defer env.Close() //nolint:errcheck
|
defer env.Close() //nolint:errcheck
|
||||||
|
|
||||||
visitor, err := env.CreateVisitor("test", 0)
|
visitor, err := env.Create("test", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -186,10 +186,6 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !markSetEnv.SupportsVisitor() {
|
|
||||||
return nil, xerrors.Errorf("markset type does not support atomic visitors")
|
|
||||||
}
|
|
||||||
|
|
||||||
// and now we can make a SplitStore
|
// and now we can make a SplitStore
|
||||||
ss := &SplitStore{
|
ss := &SplitStore{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -67,7 +68,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
|||||||
}
|
}
|
||||||
defer output.Close() //nolint:errcheck
|
defer output.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
var mx sync.Mutex
|
||||||
write := func(format string, args ...interface{}) {
|
write := func(format string, args ...interface{}) {
|
||||||
|
mx.Lock()
|
||||||
|
defer mx.Unlock()
|
||||||
_, err := fmt.Fprintf(output, format+"\n", args...)
|
_, err := fmt.Fprintf(output, format+"\n", args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error writing check output: %s", err)
|
log.Warnf("error writing check output: %s", err)
|
||||||
@ -82,9 +86,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
|||||||
write("compaction index: %d", s.compactionIndex)
|
write("compaction index: %d", s.compactionIndex)
|
||||||
write("--")
|
write("--")
|
||||||
|
|
||||||
var coldCnt, missingCnt int64
|
coldCnt := new(int64)
|
||||||
|
missingCnt := new(int64)
|
||||||
|
|
||||||
visitor, err := s.markSetEnv.CreateVisitor("check", 0)
|
visitor, err := s.markSetEnv.Create("check", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error creating visitor: %w", err)
|
return xerrors.Errorf("error creating visitor: %w", err)
|
||||||
}
|
}
|
||||||
@ -111,10 +116,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if has {
|
if has {
|
||||||
coldCnt++
|
atomic.AddInt64(coldCnt, 1)
|
||||||
write("cold object reference: %s", c)
|
write("cold object reference: %s", c)
|
||||||
} else {
|
} else {
|
||||||
missingCnt++
|
atomic.AddInt64(missingCnt, 1)
|
||||||
write("missing object reference: %s", c)
|
write("missing object reference: %s", c)
|
||||||
return errStopWalk
|
return errStopWalk
|
||||||
}
|
}
|
||||||
@ -128,9 +133,9 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infow("check done", "cold", coldCnt, "missing", missingCnt)
|
log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt)
|
||||||
write("--")
|
write("--")
|
||||||
write("cold: %d missing: %d", coldCnt, missingCnt)
|
write("cold: %d missing: %d", *coldCnt, *missingCnt)
|
||||||
write("DONE")
|
write("DONE")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -227,7 +228,7 @@ func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// protect all pending transactional references
|
// protect all pending transactional references
|
||||||
func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error {
|
func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
|
||||||
for {
|
for {
|
||||||
var txnRefs map[cid.Cid]struct{}
|
var txnRefs map[cid.Cid]struct{}
|
||||||
|
|
||||||
@ -299,14 +300,14 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error {
|
|||||||
|
|
||||||
// transactionally protect a reference by walking the object and marking.
|
// transactionally protect a reference by walking the object and marking.
|
||||||
// concurrent markings are short circuited by checking the markset.
|
// concurrent markings are short circuited by checking the markset.
|
||||||
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSetVisitor) error {
|
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
|
||||||
if err := s.checkClosing(); err != nil {
|
if err := s.checkClosing(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: cold objects are deleted heaviest first, so the consituents of an object
|
// Note: cold objects are deleted heaviest first, so the consituents of an object
|
||||||
// cannot be deleted before the object itself.
|
// cannot be deleted before the object itself.
|
||||||
return s.walkObjectIncomplete(root, tmpVisitor(),
|
return s.walkObjectIncomplete(root, newTmpVisitor(),
|
||||||
func(c cid.Cid) error {
|
func(c cid.Cid) error {
|
||||||
if isUnitaryObject(c) {
|
if isUnitaryObject(c) {
|
||||||
return errStopWalk
|
return errStopWalk
|
||||||
@ -397,7 +398,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
|||||||
|
|
||||||
log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex)
|
log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex)
|
||||||
|
|
||||||
markSet, err := s.markSetEnv.CreateVisitor("live", s.markSetSize)
|
markSet, err := s.markSetEnv.Create("live", s.markSetSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error creating mark set: %w", err)
|
return xerrors.Errorf("error creating mark set: %w", err)
|
||||||
}
|
}
|
||||||
@ -602,8 +603,8 @@ func (s *SplitStore) beginTxnProtect() {
|
|||||||
s.txnMissing = make(map[cid.Cid]struct{})
|
s.txnMissing = make(map[cid.Cid]struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) beginTxnMarking(markSet MarkSetVisitor) {
|
func (s *SplitStore) beginTxnMarking(markSet MarkSet) {
|
||||||
markSet.SetConcurrent()
|
log.Info("beginning transactional marking")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) endTxnProtect() {
|
func (s *SplitStore) endTxnProtect() {
|
||||||
@ -621,26 +622,33 @@ func (s *SplitStore) endTxnProtect() {
|
|||||||
|
|
||||||
func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch,
|
func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch,
|
||||||
visitor ObjectVisitor, f func(cid.Cid) error) error {
|
visitor ObjectVisitor, f func(cid.Cid) error) error {
|
||||||
var walked *cid.Set
|
var walked ObjectVisitor
|
||||||
toWalk := ts.Cids()
|
var mx sync.Mutex
|
||||||
walkCnt := 0
|
// we copy the tipset first into a new slice, which allows us to reuse it in every epoch.
|
||||||
scanCnt := 0
|
toWalk := make([]cid.Cid, len(ts.Cids()))
|
||||||
|
copy(toWalk, ts.Cids())
|
||||||
|
walkCnt := new(int64)
|
||||||
|
scanCnt := new(int64)
|
||||||
|
|
||||||
stopWalk := func(_ cid.Cid) error { return errStopWalk }
|
stopWalk := func(_ cid.Cid) error { return errStopWalk }
|
||||||
|
|
||||||
walkBlock := func(c cid.Cid) error {
|
walkBlock := func(c cid.Cid) error {
|
||||||
if !walked.Visit(c) {
|
visit, err := walked.Visit(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !visit {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
walkCnt++
|
atomic.AddInt64(walkCnt, 1)
|
||||||
|
|
||||||
if err := f(c); err != nil {
|
if err := f(c); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var hdr types.BlockHeader
|
var hdr types.BlockHeader
|
||||||
err := s.view(c, func(data []byte) error {
|
err = s.view(c, func(data []byte) error {
|
||||||
return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
|
return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -676,11 +684,13 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
|||||||
if err := s.walkObject(hdr.ParentStateRoot, visitor, f); err != nil {
|
if err := s.walkObject(hdr.ParentStateRoot, visitor, f); err != nil {
|
||||||
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
|
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
|
||||||
}
|
}
|
||||||
scanCnt++
|
atomic.AddInt64(scanCnt, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if hdr.Height > 0 {
|
if hdr.Height > 0 {
|
||||||
|
mx.Lock()
|
||||||
toWalk = append(toWalk, hdr.Parents...)
|
toWalk = append(toWalk, hdr.Parents...)
|
||||||
|
mx.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -692,20 +702,43 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
workers := len(toWalk)
|
||||||
|
if workers > runtime.NumCPU()/2 {
|
||||||
|
workers = runtime.NumCPU() / 2
|
||||||
|
}
|
||||||
|
if workers < 2 {
|
||||||
|
workers = 2
|
||||||
|
}
|
||||||
|
|
||||||
// the walk is BFS, so we can reset the walked set in every iteration and avoid building up
|
// the walk is BFS, so we can reset the walked set in every iteration and avoid building up
|
||||||
// a set that contains all blocks (1M epochs -> 5M blocks -> 200MB worth of memory and growing
|
// a set that contains all blocks (1M epochs -> 5M blocks -> 200MB worth of memory and growing
|
||||||
// over time)
|
// over time)
|
||||||
walked = cid.NewSet()
|
walked = newConcurrentVisitor()
|
||||||
walking := toWalk
|
workch := make(chan cid.Cid, len(toWalk))
|
||||||
toWalk = nil
|
for _, c := range toWalk {
|
||||||
for _, c := range walking {
|
workch <- c
|
||||||
|
}
|
||||||
|
close(workch)
|
||||||
|
toWalk = toWalk[:0]
|
||||||
|
|
||||||
|
g := new(errgroup.Group)
|
||||||
|
for i := 0; i < workers; i++ {
|
||||||
|
g.Go(func() error {
|
||||||
|
for c := range workch {
|
||||||
if err := walkBlock(c); err != nil {
|
if err := walkBlock(c); err != nil {
|
||||||
return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
|
return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infow("chain walk done", "walked", walkCnt, "scanned", scanCnt)
|
if err := g.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1011,7 +1044,7 @@ func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error {
|
func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSet) error {
|
||||||
deadCids := make([]cid.Cid, 0, batchSize)
|
deadCids := make([]cid.Cid, 0, batchSize)
|
||||||
var purgeCnt, liveCnt int
|
var purgeCnt, liveCnt int
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -1077,7 +1110,7 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error {
|
|||||||
// have this gem[TM].
|
// have this gem[TM].
|
||||||
// My best guess is that they are parent message receipts or yet to be computed state roots; magik
|
// My best guess is that they are parent message receipts or yet to be computed state roots; magik
|
||||||
// thinks the cause may be block validation.
|
// thinks the cause may be block validation.
|
||||||
func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) {
|
func (s *SplitStore) waitForMissingRefs(markSet MarkSet) {
|
||||||
s.txnLk.Lock()
|
s.txnLk.Lock()
|
||||||
missing := s.txnMissing
|
missing := s.txnMissing
|
||||||
s.txnMissing = nil
|
s.txnMissing = nil
|
||||||
@ -1106,7 +1139,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
towalk := missing
|
towalk := missing
|
||||||
visitor := tmpVisitor()
|
visitor := newTmpVisitor()
|
||||||
missing = make(map[cid.Cid]struct{})
|
missing = make(map[cid.Cid]struct{})
|
||||||
|
|
||||||
for c := range towalk {
|
for c := range towalk {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package splitstore
|
package splitstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -55,12 +56,13 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
if WarmupBoundary < epoch {
|
if WarmupBoundary < epoch {
|
||||||
boundaryEpoch = epoch - WarmupBoundary
|
boundaryEpoch = epoch - WarmupBoundary
|
||||||
}
|
}
|
||||||
|
var mx sync.Mutex
|
||||||
batchHot := make([]blocks.Block, 0, batchSize)
|
batchHot := make([]blocks.Block, 0, batchSize)
|
||||||
count := int64(0)
|
count := new(int64)
|
||||||
xcount := int64(0)
|
xcount := new(int64)
|
||||||
missing := int64(0)
|
missing := new(int64)
|
||||||
|
|
||||||
visitor, err := s.markSetEnv.CreateVisitor("warmup", 0)
|
visitor, err := s.markSetEnv.Create("warmup", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error creating visitor: %w", err)
|
return xerrors.Errorf("error creating visitor: %w", err)
|
||||||
}
|
}
|
||||||
@ -73,7 +75,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
return errStopWalk
|
return errStopWalk
|
||||||
}
|
}
|
||||||
|
|
||||||
count++
|
atomic.AddInt64(count, 1)
|
||||||
|
|
||||||
has, err := s.hot.Has(s.ctx, c)
|
has, err := s.hot.Has(s.ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -87,22 +89,25 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
blk, err := s.cold.Get(s.ctx, c)
|
blk, err := s.cold.Get(s.ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == bstore.ErrNotFound {
|
if err == bstore.ErrNotFound {
|
||||||
missing++
|
atomic.AddInt64(missing, 1)
|
||||||
return errStopWalk
|
return errStopWalk
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
xcount++
|
atomic.AddInt64(xcount, 1)
|
||||||
|
|
||||||
|
mx.Lock()
|
||||||
batchHot = append(batchHot, blk)
|
batchHot = append(batchHot, blk)
|
||||||
if len(batchHot) == batchSize {
|
if len(batchHot) == batchSize {
|
||||||
err = s.hot.PutMany(s.ctx, batchHot)
|
err = s.hot.PutMany(s.ctx, batchHot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
mx.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
batchHot = batchHot[:0]
|
batchHot = batchHot[:0]
|
||||||
}
|
}
|
||||||
|
mx.Unlock()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -118,9 +123,9 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing)
|
log.Infow("warmup stats", "visited", *count, "warm", *xcount, "missing", *missing)
|
||||||
|
|
||||||
s.markSetSize = count + count>>2 // overestimate a bit
|
s.markSetSize = *count + *count>>2 // overestimate a bit
|
||||||
err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize))
|
err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error saving mark set size: %s", err)
|
log.Warnf("error saving mark set size: %s", err)
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package splitstore
|
package splitstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
cid "github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -17,16 +19,34 @@ func (v *noopVisitor) Visit(_ cid.Cid) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type cidSetVisitor struct {
|
type tmpVisitor struct {
|
||||||
set *cid.Set
|
set *cid.Set
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ ObjectVisitor = (*cidSetVisitor)(nil)
|
var _ ObjectVisitor = (*tmpVisitor)(nil)
|
||||||
|
|
||||||
func (v *cidSetVisitor) Visit(c cid.Cid) (bool, error) {
|
func (v *tmpVisitor) Visit(c cid.Cid) (bool, error) {
|
||||||
return v.set.Visit(c), nil
|
return v.set.Visit(c), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func tmpVisitor() ObjectVisitor {
|
func newTmpVisitor() ObjectVisitor {
|
||||||
return &cidSetVisitor{set: cid.NewSet()}
|
return &tmpVisitor{set: cid.NewSet()}
|
||||||
|
}
|
||||||
|
|
||||||
|
type concurrentVisitor struct {
|
||||||
|
mx sync.Mutex
|
||||||
|
set *cid.Set
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ObjectVisitor = (*concurrentVisitor)(nil)
|
||||||
|
|
||||||
|
func newConcurrentVisitor() *concurrentVisitor {
|
||||||
|
return &concurrentVisitor{set: cid.NewSet()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *concurrentVisitor) Visit(c cid.Cid) (bool, error) {
|
||||||
|
v.mx.Lock()
|
||||||
|
defer v.mx.Unlock()
|
||||||
|
|
||||||
|
return v.set.Visit(c), nil
|
||||||
}
|
}
|
||||||
|
@ -90,6 +90,7 @@ func init() {
|
|||||||
UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight)
|
UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight)
|
||||||
UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight)
|
UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight)
|
||||||
UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight)
|
UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight)
|
||||||
|
UpgradeOhSnapHeight = getUpgradeHeight("LOTUS_OHSNAP_HEIGHT", UpgradeOhSnapHeight)
|
||||||
|
|
||||||
BuildType |= Build2k
|
BuildType |= Build2k
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
|
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", h.ParentStateRoot, stateroot)
|
||||||
}
|
}
|
||||||
|
|
||||||
if precp != h.ParentMessageReceipts {
|
if precp != h.ParentMessageReceipts {
|
||||||
|
@ -173,10 +173,17 @@ type MessagePool struct {
|
|||||||
|
|
||||||
sigValCache *lru.TwoQueueCache
|
sigValCache *lru.TwoQueueCache
|
||||||
|
|
||||||
|
nonceCache *lru.Cache
|
||||||
|
|
||||||
evtTypes [3]journal.EventType
|
evtTypes [3]journal.EventType
|
||||||
journal journal.Journal
|
journal journal.Journal
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type nonceCacheKey struct {
|
||||||
|
tsk types.TipSetKey
|
||||||
|
addr address.Address
|
||||||
|
}
|
||||||
|
|
||||||
type msgSet struct {
|
type msgSet struct {
|
||||||
msgs map[uint64]*types.SignedMessage
|
msgs map[uint64]*types.SignedMessage
|
||||||
nextNonce uint64
|
nextNonce uint64
|
||||||
@ -361,6 +368,7 @@ func (ms *msgSet) toSlice() []*types.SignedMessage {
|
|||||||
func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
|
func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
|
||||||
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
|
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
|
||||||
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
|
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
|
||||||
|
noncecache, _ := lru.New(256)
|
||||||
|
|
||||||
cfg, err := loadConfig(ctx, ds)
|
cfg, err := loadConfig(ctx, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -386,6 +394,7 @@ func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.Upgra
|
|||||||
pruneCooldown: make(chan struct{}, 1),
|
pruneCooldown: make(chan struct{}, 1),
|
||||||
blsSigCache: cache,
|
blsSigCache: cache,
|
||||||
sigValCache: verifcache,
|
sigValCache: verifcache,
|
||||||
|
nonceCache: noncecache,
|
||||||
changes: lps.New(50),
|
changes: lps.New(50),
|
||||||
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
|
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
|
||||||
api: api,
|
api: api,
|
||||||
@ -1016,11 +1025,23 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address,
|
|||||||
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
|
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
|
||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
|
nk := nonceCacheKey{
|
||||||
|
tsk: ts.Key(),
|
||||||
|
addr: addr,
|
||||||
|
}
|
||||||
|
|
||||||
|
n, ok := mp.nonceCache.Get(nk)
|
||||||
|
if ok {
|
||||||
|
return n.(uint64), nil
|
||||||
|
}
|
||||||
|
|
||||||
act, err := mp.api.GetActorAfter(addr, ts)
|
act, err := mp.api.GetActorAfter(addr, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mp.nonceCache.Add(nk, act.Nonce)
|
||||||
|
|
||||||
return act.Nonce, nil
|
return act.Nonce, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package messagepool
|
package messagepool
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -206,6 +207,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS
|
|||||||
|
|
||||||
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
|
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
//stm: @CHAIN_MEMPOOL_GET_NONCE_001
|
||||||
n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK)
|
n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -366,8 +368,10 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) {
|
|||||||
tma.applyBlock(t, a)
|
tma.applyBlock(t, a)
|
||||||
tsa := mock.TipSet(a)
|
tsa := mock.TipSet(a)
|
||||||
|
|
||||||
|
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||||
_, _ = mp.Pending(context.TODO())
|
_, _ = mp.Pending(context.TODO())
|
||||||
|
|
||||||
|
//stm: @CHAIN_MEMPOOL_SELECT_001
|
||||||
selm, _ := mp.SelectMessages(context.Background(), tsa, 1)
|
selm, _ := mp.SelectMessages(context.Background(), tsa, 1)
|
||||||
if len(selm) == 0 {
|
if len(selm) == 0 {
|
||||||
t.Fatal("should have returned the rest of the messages")
|
t.Fatal("should have returned the rest of the messages")
|
||||||
@ -428,6 +432,7 @@ func TestRevertMessages(t *testing.T) {
|
|||||||
|
|
||||||
assertNonce(t, mp, sender, 4)
|
assertNonce(t, mp, sender, 4)
|
||||||
|
|
||||||
|
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||||
p, _ := mp.Pending(context.TODO())
|
p, _ := mp.Pending(context.TODO())
|
||||||
fmt.Printf("%+v\n", p)
|
fmt.Printf("%+v\n", p)
|
||||||
if len(p) != 3 {
|
if len(p) != 3 {
|
||||||
@ -486,6 +491,7 @@ func TestPruningSimple(t *testing.T) {
|
|||||||
|
|
||||||
mp.Prune()
|
mp.Prune()
|
||||||
|
|
||||||
|
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||||
msgs, _ := mp.Pending(context.TODO())
|
msgs, _ := mp.Pending(context.TODO())
|
||||||
if len(msgs) != 5 {
|
if len(msgs) != 5 {
|
||||||
t.Fatal("expected only 5 messages in pool, got: ", len(msgs))
|
t.Fatal("expected only 5 messages in pool, got: ", len(msgs))
|
||||||
@ -528,6 +534,7 @@ func TestLoadLocal(t *testing.T) {
|
|||||||
msgs := make(map[cid.Cid]struct{})
|
msgs := make(map[cid.Cid]struct{})
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||||
cid, err := mp.Push(context.TODO(), m)
|
cid, err := mp.Push(context.TODO(), m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -544,6 +551,7 @@ func TestLoadLocal(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||||
pmsgs, _ := mp.Pending(context.TODO())
|
pmsgs, _ := mp.Pending(context.TODO())
|
||||||
if len(msgs) != len(pmsgs) {
|
if len(msgs) != len(pmsgs) {
|
||||||
t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs))
|
t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs))
|
||||||
@ -599,6 +607,7 @@ func TestClearAll(t *testing.T) {
|
|||||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||||
_, err := mp.Push(context.TODO(), m)
|
_, err := mp.Push(context.TODO(), m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -610,8 +619,10 @@ func TestClearAll(t *testing.T) {
|
|||||||
mustAdd(t, mp, m)
|
mustAdd(t, mp, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_MEMPOOL_CLEAR_001
|
||||||
mp.Clear(context.Background(), true)
|
mp.Clear(context.Background(), true)
|
||||||
|
|
||||||
|
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||||
pending, _ := mp.Pending(context.TODO())
|
pending, _ := mp.Pending(context.TODO())
|
||||||
if len(pending) > 0 {
|
if len(pending) > 0 {
|
||||||
t.Fatalf("cleared the mpool, but got %d pending messages", len(pending))
|
t.Fatalf("cleared the mpool, but got %d pending messages", len(pending))
|
||||||
@ -654,6 +665,7 @@ func TestClearNonLocal(t *testing.T) {
|
|||||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||||
_, err := mp.Push(context.TODO(), m)
|
_, err := mp.Push(context.TODO(), m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -665,8 +677,10 @@ func TestClearNonLocal(t *testing.T) {
|
|||||||
mustAdd(t, mp, m)
|
mustAdd(t, mp, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_MEMPOOL_CLEAR_001
|
||||||
mp.Clear(context.Background(), false)
|
mp.Clear(context.Background(), false)
|
||||||
|
|
||||||
|
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||||
pending, _ := mp.Pending(context.TODO())
|
pending, _ := mp.Pending(context.TODO())
|
||||||
if len(pending) != 10 {
|
if len(pending) != 10 {
|
||||||
t.Fatalf("expected 10 pending messages, but got %d instead", len(pending))
|
t.Fatalf("expected 10 pending messages, but got %d instead", len(pending))
|
||||||
@ -724,6 +738,7 @@ func TestUpdates(t *testing.T) {
|
|||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||||
_, err := mp.Push(context.TODO(), m)
|
_, err := mp.Push(context.TODO(), m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package messagesigner
|
package messagesigner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -60,6 +61,7 @@ func TestMessageSignerSignMessage(t *testing.T) {
|
|||||||
to2, err := w.WalletNew(ctx, types.KTSecp256k1)
|
to2, err := w.WalletNew(ctx, types.KTSecp256k1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_MESSAGE_SIGNER_NEW_SIGNER_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_005
|
||||||
type msgSpec struct {
|
type msgSpec struct {
|
||||||
msg *types.Message
|
msg *types.Message
|
||||||
mpoolNonce [1]uint64
|
mpoolNonce [1]uint64
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm:#unit
|
||||||
package rand_test
|
package rand_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -55,11 +56,13 @@ func TestNullRandomnessV1(t *testing.T) {
|
|||||||
|
|
||||||
randEpoch := ts.TipSet.TipSet().Height() - 2
|
randEpoch := ts.TipSet.TipSet().Height() - 2
|
||||||
|
|
||||||
|
//stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V1_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_02
|
||||||
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
|
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01
|
||||||
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(beforeNullHeight)+offset)
|
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(beforeNullHeight)+offset)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -68,6 +71,7 @@ func TestNullRandomnessV1(t *testing.T) {
|
|||||||
t.Fatal(resp.Err)
|
t.Fatal(resp.Err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
|
||||||
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -131,11 +135,13 @@ func TestNullRandomnessV2(t *testing.T) {
|
|||||||
|
|
||||||
randEpoch := ts.TipSet.TipSet().Height() - 2
|
randEpoch := ts.TipSet.TipSet().Height() - 2
|
||||||
|
|
||||||
|
//stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V2_01
|
||||||
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
|
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01
|
||||||
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(ts.TipSet.TipSet().Height())+offset)
|
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(ts.TipSet.TipSet().Height())+offset)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -144,6 +150,7 @@ func TestNullRandomnessV2(t *testing.T) {
|
|||||||
t.Fatal(resp.Err)
|
t.Fatal(resp.Err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_03
|
||||||
// note that the randEpoch passed to DrawRandomness is still randEpoch (not the latest ts height)
|
// note that the randEpoch passed to DrawRandomness is still randEpoch (not the latest ts height)
|
||||||
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -212,11 +219,13 @@ func TestNullRandomnessV3(t *testing.T) {
|
|||||||
|
|
||||||
randEpoch := ts.TipSet.TipSet().Height() - 2
|
randEpoch := ts.TipSet.TipSet().Height() - 2
|
||||||
|
|
||||||
|
//stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V3_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01
|
||||||
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
|
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01
|
||||||
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(randEpoch)+offset)
|
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(randEpoch)+offset)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -225,6 +234,7 @@ func TestNullRandomnessV3(t *testing.T) {
|
|||||||
t.Fatal(resp.Err)
|
t.Fatal(resp.Err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
|
||||||
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package sub
|
package sub
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -49,6 +50,7 @@ func TestFetchCidsWithDedup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
g := &getter{msgs}
|
g := &getter{msgs}
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_FETCH_MESSAGES_BY_CID_001
|
||||||
// the cids have a duplicate
|
// the cids have a duplicate
|
||||||
res, err := FetchMessagesByCids(context.TODO(), g, append(cids, cids[0]))
|
res, err := FetchMessagesByCids(context.TODO(), g, append(cids, cids[0]))
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package chain_test
|
package chain_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -462,6 +463,8 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncSimple(t *testing.T) {
|
func TestSyncSimple(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 50
|
H := 50
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -478,6 +481,8 @@ func TestSyncSimple(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncMining(t *testing.T) {
|
func TestSyncMining(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 50
|
H := 50
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -500,6 +505,8 @@ func TestSyncMining(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncBadTimestamp(t *testing.T) {
|
func TestSyncBadTimestamp(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 50
|
H := 50
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -554,6 +561,8 @@ func (wpp badWpp) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncBadWinningPoSt(t *testing.T) {
|
func TestSyncBadWinningPoSt(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 15
|
H := 15
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -583,6 +592,9 @@ func (tu *syncTestUtil) loadChainToNode(to int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncFork(t *testing.T) {
|
func TestSyncFork(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 10
|
H := 10
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -650,6 +662,9 @@ func TestSyncFork(t *testing.T) {
|
|||||||
// A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X).
|
// A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X).
|
||||||
// We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected.
|
// We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected.
|
||||||
func TestDuplicateNonce(t *testing.T) {
|
func TestDuplicateNonce(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 10
|
H := 10
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -704,6 +719,7 @@ func TestDuplicateNonce(t *testing.T) {
|
|||||||
|
|
||||||
var includedMsg cid.Cid
|
var includedMsg cid.Cid
|
||||||
var skippedMsg cid.Cid
|
var skippedMsg cid.Cid
|
||||||
|
//stm: @CHAIN_STATE_SEARCH_MSG_001
|
||||||
r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true)
|
r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true)
|
||||||
r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true)
|
r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true)
|
||||||
|
|
||||||
@ -745,6 +761,9 @@ func TestDuplicateNonce(t *testing.T) {
|
|||||||
// This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't
|
// This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't
|
||||||
// be applied on the parent state.
|
// be applied on the parent state.
|
||||||
func TestBadNonce(t *testing.T) {
|
func TestBadNonce(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||||
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 10
|
H := 10
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -792,6 +811,9 @@ func TestBadNonce(t *testing.T) {
|
|||||||
// One of the messages uses the sender's robust address, the other uses the ID address.
|
// One of the messages uses the sender's robust address, the other uses the ID address.
|
||||||
// Such a block is invalid and should not sync.
|
// Such a block is invalid and should not sync.
|
||||||
func TestMismatchedNoncesRobustID(t *testing.T) {
|
func TestMismatchedNoncesRobustID(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||||
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||||
v5h := abi.ChainEpoch(4)
|
v5h := abi.ChainEpoch(4)
|
||||||
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
|
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
|
||||||
|
|
||||||
@ -804,6 +826,7 @@ func TestMismatchedNoncesRobustID(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Produce a message from the banker
|
// Produce a message from the banker
|
||||||
|
//stm: @CHAIN_STATE_LOOKUP_ID_001
|
||||||
makeMsg := func(id bool) *types.SignedMessage {
|
makeMsg := func(id bool) *types.SignedMessage {
|
||||||
sender := tu.g.Banker()
|
sender := tu.g.Banker()
|
||||||
if id {
|
if id {
|
||||||
@ -846,6 +869,9 @@ func TestMismatchedNoncesRobustID(t *testing.T) {
|
|||||||
// One of the messages uses the sender's robust address, the other uses the ID address.
|
// One of the messages uses the sender's robust address, the other uses the ID address.
|
||||||
// Such a block is valid and should sync.
|
// Such a block is valid and should sync.
|
||||||
func TestMatchedNoncesRobustID(t *testing.T) {
|
func TestMatchedNoncesRobustID(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||||
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||||
v5h := abi.ChainEpoch(4)
|
v5h := abi.ChainEpoch(4)
|
||||||
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
|
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
|
||||||
|
|
||||||
@ -858,6 +884,7 @@ func TestMatchedNoncesRobustID(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Produce a message from the banker with specified nonce
|
// Produce a message from the banker with specified nonce
|
||||||
|
//stm: @CHAIN_STATE_LOOKUP_ID_001
|
||||||
makeMsg := func(n uint64, id bool) *types.SignedMessage {
|
makeMsg := func(n uint64, id bool) *types.SignedMessage {
|
||||||
sender := tu.g.Banker()
|
sender := tu.g.Banker()
|
||||||
if id {
|
if id {
|
||||||
@ -917,6 +944,8 @@ func runSyncBenchLength(b *testing.B, l int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncInputs(t *testing.T) {
|
func TestSyncInputs(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_VALIDATE_BLOCK_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 10
|
H := 10
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -944,6 +973,9 @@ func TestSyncInputs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncCheckpointHead(t *testing.T) {
|
func TestSyncCheckpointHead(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||||
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 10
|
H := 10
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -963,6 +995,7 @@ func TestSyncCheckpointHead(t *testing.T) {
|
|||||||
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
|
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
|
||||||
|
|
||||||
tu.waitUntilSyncTarget(p1, a.TipSet())
|
tu.waitUntilSyncTarget(p1, a.TipSet())
|
||||||
|
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
||||||
tu.checkpointTs(p1, a.TipSet().Key())
|
tu.checkpointTs(p1, a.TipSet().Key())
|
||||||
|
|
||||||
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
||||||
@ -982,15 +1015,20 @@ func TestSyncCheckpointHead(t *testing.T) {
|
|||||||
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
||||||
p1Head := tu.getHead(p1)
|
p1Head := tu.getHead(p1)
|
||||||
require.True(tu.t, p1Head.Equals(a.TipSet()))
|
require.True(tu.t, p1Head.Equals(a.TipSet()))
|
||||||
|
//stm: @CHAIN_SYNCER_CHECK_BAD_001
|
||||||
tu.assertBad(p1, b.TipSet())
|
tu.assertBad(p1, b.TipSet())
|
||||||
|
|
||||||
// Should be able to switch forks.
|
// Should be able to switch forks.
|
||||||
|
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
||||||
tu.checkpointTs(p1, b.TipSet().Key())
|
tu.checkpointTs(p1, b.TipSet().Key())
|
||||||
p1Head = tu.getHead(p1)
|
p1Head = tu.getHead(p1)
|
||||||
require.True(tu.t, p1Head.Equals(b.TipSet()))
|
require.True(tu.t, p1Head.Equals(b.TipSet()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncCheckpointEarlierThanHead(t *testing.T) {
|
func TestSyncCheckpointEarlierThanHead(t *testing.T) {
|
||||||
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||||
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 10
|
H := 10
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
@ -1010,6 +1048,7 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
|
|||||||
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
|
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
|
||||||
|
|
||||||
tu.waitUntilSyncTarget(p1, a.TipSet())
|
tu.waitUntilSyncTarget(p1, a.TipSet())
|
||||||
|
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
||||||
tu.checkpointTs(p1, a1.TipSet().Key())
|
tu.checkpointTs(p1, a1.TipSet().Key())
|
||||||
|
|
||||||
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
||||||
@ -1029,15 +1068,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
|
|||||||
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
||||||
p1Head := tu.getHead(p1)
|
p1Head := tu.getHead(p1)
|
||||||
require.True(tu.t, p1Head.Equals(a.TipSet()))
|
require.True(tu.t, p1Head.Equals(a.TipSet()))
|
||||||
|
//stm: @CHAIN_SYNCER_CHECK_BAD_001
|
||||||
tu.assertBad(p1, b.TipSet())
|
tu.assertBad(p1, b.TipSet())
|
||||||
|
|
||||||
// Should be able to switch forks.
|
// Should be able to switch forks.
|
||||||
|
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
||||||
tu.checkpointTs(p1, b.TipSet().Key())
|
tu.checkpointTs(p1, b.TipSet().Key())
|
||||||
p1Head = tu.getHead(p1)
|
p1Head = tu.getHead(p1)
|
||||||
require.True(tu.t, p1Head.Equals(b.TipSet()))
|
require.True(tu.t, p1Head.Equals(b.TipSet()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInvalidHeight(t *testing.T) {
|
func TestInvalidHeight(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||||
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
H := 50
|
H := 50
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
|
||||||
|
@ -24,6 +24,15 @@ var ProtocolCodenames = []struct {
|
|||||||
{build.UpgradeTapeHeight + 1, "tape"},
|
{build.UpgradeTapeHeight + 1, "tape"},
|
||||||
{build.UpgradeLiftoffHeight + 1, "liftoff"},
|
{build.UpgradeLiftoffHeight + 1, "liftoff"},
|
||||||
{build.UpgradeKumquatHeight + 1, "postliftoff"},
|
{build.UpgradeKumquatHeight + 1, "postliftoff"},
|
||||||
|
{build.UpgradeCalicoHeight + 1, "calico"},
|
||||||
|
{build.UpgradePersianHeight + 1, "persian"},
|
||||||
|
{build.UpgradeOrangeHeight + 1, "orange"},
|
||||||
|
{build.UpgradeTrustHeight + 1, "trust"},
|
||||||
|
{build.UpgradeNorwegianHeight + 1, "norwegian"},
|
||||||
|
{build.UpgradeTurboHeight + 1, "turbo"},
|
||||||
|
{build.UpgradeHyperdriveHeight + 1, "hyperdrive"},
|
||||||
|
{build.UpgradeChocolateHeight + 1, "chocolate"},
|
||||||
|
{build.UpgradeOhSnapHeight + 1, "ohsnap"},
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProtocolCodename gets the protocol codename associated with a height.
|
// GetProtocolCodename gets the protocol codename associated with a height.
|
||||||
|
@ -8,12 +8,11 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api/v0api"
|
|
||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||||
@ -43,6 +42,15 @@ func doExtractMessage(opts extractOpts) error {
|
|||||||
return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err)
|
return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Assumes that the desired message isn't at the boundary of network versions.
|
||||||
|
// Otherwise this will be inaccurate. But it's such a tiny edge case that
|
||||||
|
// it's not worth spending the time to support boundary messages unless
|
||||||
|
// actually needed.
|
||||||
|
nv, err := FullAPI.StateNetworkVersion(ctx, incTs.Key())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to resolve network version from inclusion height: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// get the circulating supply before the message was executed.
|
// get the circulating supply before the message was executed.
|
||||||
circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key())
|
circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -53,6 +61,7 @@ func doExtractMessage(opts extractOpts) error {
|
|||||||
|
|
||||||
log.Printf("message was executed in tipset: %s", execTs.Key())
|
log.Printf("message was executed in tipset: %s", execTs.Key())
|
||||||
log.Printf("message was included in tipset: %s", incTs.Key())
|
log.Printf("message was included in tipset: %s", incTs.Key())
|
||||||
|
log.Printf("network version at inclusion: %d", nv)
|
||||||
log.Printf("circulating supply at inclusion tipset: %d", circSupply)
|
log.Printf("circulating supply at inclusion tipset: %d", circSupply)
|
||||||
log.Printf("finding precursor messages using mode: %s", opts.precursor)
|
log.Printf("finding precursor messages using mode: %s", opts.precursor)
|
||||||
|
|
||||||
@ -111,6 +120,7 @@ func doExtractMessage(opts extractOpts) error {
|
|||||||
BaseFee: basefee,
|
BaseFee: basefee,
|
||||||
// recorded randomness will be discarded.
|
// recorded randomness will be discarded.
|
||||||
Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI),
|
Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI),
|
||||||
|
NetworkVersion: nv,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute precursor message: %w", err)
|
return fmt.Errorf("failed to execute precursor message: %w", err)
|
||||||
@ -146,6 +156,7 @@ func doExtractMessage(opts extractOpts) error {
|
|||||||
CircSupply: circSupplyDetail.FilCirculating,
|
CircSupply: circSupplyDetail.FilCirculating,
|
||||||
BaseFee: basefee,
|
BaseFee: basefee,
|
||||||
Rand: recordingRand,
|
Rand: recordingRand,
|
||||||
|
NetworkVersion: nv,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute message: %w", err)
|
return fmt.Errorf("failed to execute message: %w", err)
|
||||||
@ -263,11 +274,6 @@ func doExtractMessage(opts extractOpts) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
codename := GetProtocolCodename(execTs.Height())
|
codename := GetProtocolCodename(execTs.Height())
|
||||||
|
|
||||||
// Write out the test vector.
|
// Write out the test vector.
|
||||||
|
@ -129,6 +129,7 @@ func runSimulateCmd(_ *cli.Context) error {
|
|||||||
CircSupply: circSupply.FilCirculating,
|
CircSupply: circSupply.FilCirculating,
|
||||||
BaseFee: baseFee,
|
BaseFee: baseFee,
|
||||||
Rand: rand,
|
Rand: rand,
|
||||||
|
// TODO NetworkVersion
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to apply message: %w", err)
|
return fmt.Errorf("failed to apply message: %w", err)
|
||||||
|
@ -670,7 +670,7 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, pha
|
|||||||
|
|
||||||
func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) {
|
func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) {
|
||||||
empty := storage.ReplicaUpdateOut{}
|
empty := storage.ReplicaUpdateOut{}
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return empty, xerrors.Errorf("failed to acquire sector paths: %w", err)
|
return empty, xerrors.Errorf("failed to acquire sector paths: %w", err)
|
||||||
}
|
}
|
||||||
@ -719,7 +719,7 @@ func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) {
|
func (sb *Sealer) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) {
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdateCache|storiface.FTUpdate, storiface.FTNone, storiface.PathSealing)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to acquire sector paths: %w", err)
|
return nil, xerrors.Errorf("failed to acquire sector paths: %w", err)
|
||||||
}
|
}
|
||||||
|
12
extern/sector-storage/manager.go
vendored
12
extern/sector-storage/manager.go
vendored
@ -746,14 +746,13 @@ func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, p
|
|||||||
return out, waitErr
|
return out, waitErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil {
|
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil {
|
||||||
return storage.ReplicaUpdateOut{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
return storage.ReplicaUpdateOut{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
|
selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
|
||||||
log.Errorf("scheduled work for replica update")
|
|
||||||
err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces))
|
err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("startWork: %w", err)
|
return xerrors.Errorf("startWork: %w", err)
|
||||||
@ -799,9 +798,12 @@ func (m *Manager) ProveReplicaUpdate1(ctx context.Context, sector storage.Sector
|
|||||||
return nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
return nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, true)
|
// NOTE: We set allowFetch to false in so that we always execute on a worker
|
||||||
|
// with direct access to the data. We want to do that because this step is
|
||||||
|
// generally very cheap / fast, and transferring data is not worth the effort
|
||||||
|
selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, false)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
|
||||||
|
|
||||||
err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed))
|
err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
4
extern/sector-storage/manager_test.go
vendored
4
extern/sector-storage/manager_test.go
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package sectorstorage
|
package sectorstorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -365,6 +366,7 @@ func TestRedoPC1(t *testing.T) {
|
|||||||
|
|
||||||
// Manager restarts in the middle of a task, restarts it, it completes
|
// Manager restarts in the middle of a task, restarts it, it completes
|
||||||
func TestRestartManager(t *testing.T) {
|
func TestRestartManager(t *testing.T) {
|
||||||
|
//stm: @WORKER_JOBS_001
|
||||||
test := func(returnBeforeCall bool) func(*testing.T) {
|
test := func(returnBeforeCall bool) func(*testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
logging.SetAllLoggers(logging.LevelDebug)
|
logging.SetAllLoggers(logging.LevelDebug)
|
||||||
@ -509,6 +511,7 @@ func TestRestartWorker(t *testing.T) {
|
|||||||
<-arch
|
<-arch
|
||||||
require.NoError(t, w.Close())
|
require.NoError(t, w.Close())
|
||||||
|
|
||||||
|
//stm: @WORKER_STATS_001
|
||||||
for {
|
for {
|
||||||
if len(m.WorkerStats()) == 0 {
|
if len(m.WorkerStats()) == 0 {
|
||||||
break
|
break
|
||||||
@ -571,6 +574,7 @@ func TestReenableWorker(t *testing.T) {
|
|||||||
// disable
|
// disable
|
||||||
atomic.StoreInt64(&w.testDisable, 1)
|
atomic.StoreInt64(&w.testDisable, 1)
|
||||||
|
|
||||||
|
//stm: @WORKER_STATS_001
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
if !m.WorkerStats()[w.session].Enabled {
|
if !m.WorkerStats()[w.session].Enabled {
|
||||||
break
|
break
|
||||||
|
2
extern/sector-storage/sched_test.go
vendored
2
extern/sector-storage/sched_test.go
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package sectorstorage
|
package sectorstorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -223,6 +224,7 @@ func TestSchedStartStop(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSched(t *testing.T) {
|
func TestSched(t *testing.T) {
|
||||||
|
//stm: @WORKER_JOBS_001
|
||||||
storiface.ParallelNum = 1
|
storiface.ParallelNum = 1
|
||||||
storiface.ParallelDenom = 1
|
storiface.ParallelDenom = 1
|
||||||
|
|
||||||
|
2
extern/sector-storage/sealtasks/task.go
vendored
2
extern/sector-storage/sealtasks/task.go
vendored
@ -6,7 +6,7 @@ const (
|
|||||||
TTAddPiece TaskType = "seal/v0/addpiece"
|
TTAddPiece TaskType = "seal/v0/addpiece"
|
||||||
TTPreCommit1 TaskType = "seal/v0/precommit/1"
|
TTPreCommit1 TaskType = "seal/v0/precommit/1"
|
||||||
TTPreCommit2 TaskType = "seal/v0/precommit/2"
|
TTPreCommit2 TaskType = "seal/v0/precommit/2"
|
||||||
TTCommit1 TaskType = "seal/v0/commit/1" // NOTE: We use this to transfer the sector into miner-local storage for now; Don't use on workers!
|
TTCommit1 TaskType = "seal/v0/commit/1"
|
||||||
TTCommit2 TaskType = "seal/v0/commit/2"
|
TTCommit2 TaskType = "seal/v0/commit/2"
|
||||||
|
|
||||||
TTFinalize TaskType = "seal/v0/finalize"
|
TTFinalize TaskType = "seal/v0/finalize"
|
||||||
|
2
extern/sector-storage/stores/remote_test.go
vendored
2
extern/sector-storage/stores/remote_test.go
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package stores_test
|
package stores_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -154,6 +155,7 @@ func TestMoveShared(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReader(t *testing.T) {
|
func TestReader(t *testing.T) {
|
||||||
|
//stm: @STORAGE_INFO_001
|
||||||
logging.SetAllLoggers(logging.LevelDebug)
|
logging.SetAllLoggers(logging.LevelDebug)
|
||||||
bz := []byte("Hello World")
|
bz := []byte("Hello World")
|
||||||
|
|
||||||
|
3
extern/storage-sealing/commit_batch_test.go
vendored
3
extern/storage-sealing/commit_batch_test.go
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package sealing_test
|
package sealing_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -28,6 +29,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestCommitBatcher(t *testing.T) {
|
func TestCommitBatcher(t *testing.T) {
|
||||||
|
//stm: @CHAIN_STATE_MINER_PRE_COM_INFO_001, @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001
|
||||||
t0123, err := address.NewFromString("t0123")
|
t0123, err := address.NewFromString("t0123")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -147,6 +149,7 @@ func TestCommitBatcher(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001, @CHAIN_STATE_MINER_GET_COLLATERAL_001
|
||||||
expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action {
|
expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action {
|
||||||
return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise {
|
return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise {
|
||||||
s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil)
|
s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package sealing_test
|
package sealing_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -38,6 +39,7 @@ var fc = config.MinerFeeConfig{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPrecommitBatcher(t *testing.T) {
|
func TestPrecommitBatcher(t *testing.T) {
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
t0123, err := address.NewFromString("t0123")
|
t0123, err := address.NewFromString("t0123")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -151,6 +153,7 @@ func TestPrecommitBatcher(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001
|
||||||
expectSend := func(expect []abi.SectorNumber) action {
|
expectSend := func(expect []abi.SectorNumber) action {
|
||||||
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
|
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
|
||||||
s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil)
|
s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil)
|
||||||
@ -171,6 +174,7 @@ func TestPrecommitBatcher(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001
|
||||||
expectSendsSingle := func(expect []abi.SectorNumber) action {
|
expectSendsSingle := func(expect []abi.SectorNumber) action {
|
||||||
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
|
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
|
||||||
s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil)
|
s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil)
|
||||||
|
2
extern/storage-sealing/states_failed_test.go
vendored
2
extern/storage-sealing/states_failed_test.go
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package sealing_test
|
package sealing_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -49,6 +50,7 @@ func TestStateRecoverDealIDs(t *testing.T) {
|
|||||||
PieceCID: idCid("newPieceCID"),
|
PieceCID: idCid("newPieceCID"),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001, @CHAIN_STATE_NETWORK_VERSION_001
|
||||||
api.EXPECT().StateMarketStorageDealProposal(ctx, dealId, nil).Return(dealProposal, nil)
|
api.EXPECT().StateMarketStorageDealProposal(ctx, dealId, nil).Return(dealProposal, nil)
|
||||||
|
|
||||||
pc := idCid("publishCID")
|
pc := idCid("publishCID")
|
||||||
|
6
go.mod
6
go.mod
@ -108,7 +108,7 @@ require (
|
|||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/libp2p/go-buffer-pool v0.0.2
|
github.com/libp2p/go-buffer-pool v0.0.2
|
||||||
github.com/libp2p/go-eventbus v0.2.1
|
github.com/libp2p/go-eventbus v0.2.1
|
||||||
github.com/libp2p/go-libp2p v0.18.0-rc1
|
github.com/libp2p/go-libp2p v0.18.0-rc2
|
||||||
github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect
|
github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect
|
||||||
github.com/libp2p/go-libp2p-core v0.14.0
|
github.com/libp2p/go-libp2p-core v0.14.0
|
||||||
github.com/libp2p/go-libp2p-discovery v0.6.0
|
github.com/libp2p/go-libp2p-discovery v0.6.0
|
||||||
@ -120,9 +120,9 @@ require (
|
|||||||
github.com/libp2p/go-libp2p-record v0.1.3
|
github.com/libp2p/go-libp2p-record v0.1.3
|
||||||
github.com/libp2p/go-libp2p-resource-manager v0.1.2
|
github.com/libp2p/go-libp2p-resource-manager v0.1.2
|
||||||
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
|
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
|
||||||
github.com/libp2p/go-libp2p-swarm v0.10.0
|
github.com/libp2p/go-libp2p-swarm v0.10.1
|
||||||
github.com/libp2p/go-libp2p-tls v0.3.1
|
github.com/libp2p/go-libp2p-tls v0.3.1
|
||||||
github.com/libp2p/go-libp2p-yamux v0.8.0
|
github.com/libp2p/go-libp2p-yamux v0.8.1
|
||||||
github.com/libp2p/go-maddr-filter v0.1.0
|
github.com/libp2p/go-maddr-filter v0.1.0
|
||||||
github.com/mattn/go-isatty v0.0.14
|
github.com/mattn/go-isatty v0.0.14
|
||||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
|
||||||
|
9
go.sum
9
go.sum
@ -995,8 +995,9 @@ github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2
|
|||||||
github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM=
|
github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM=
|
||||||
github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4=
|
github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4=
|
||||||
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
|
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
|
||||||
github.com/libp2p/go-libp2p v0.18.0-rc1 h1:CFHROLGmMwe/p8tR3sHahg/1NSaZa2EGbu7nDmdC+RY=
|
|
||||||
github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8=
|
github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8=
|
||||||
|
github.com/libp2p/go-libp2p v0.18.0-rc2 h1:ZLzGMdp1cVwxmA0vFpPVUDPQYUdHHGX7I58nXwpNr7Y=
|
||||||
|
github.com/libp2p/go-libp2p v0.18.0-rc2/go.mod h1:gGNCvn0T19AzyNPDWej2vsAlZFZVnS+IxqckjnsOyM0=
|
||||||
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
|
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
|
||||||
github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E=
|
github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E=
|
||||||
github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I=
|
github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I=
|
||||||
@ -1179,8 +1180,9 @@ github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat
|
|||||||
github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8=
|
github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8=
|
||||||
github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc=
|
github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc=
|
||||||
github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8=
|
github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8=
|
||||||
github.com/libp2p/go-libp2p-swarm v0.10.0 h1:1yr7UCwxCN92cw9g9Q+fnJSlk7lOB1RetoEewxhGVL0=
|
|
||||||
github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA=
|
github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA=
|
||||||
|
github.com/libp2p/go-libp2p-swarm v0.10.1 h1:lXW3pgGt+BVmkzcFX61erX7l6Lt+WAamNhwa2Kf3eJM=
|
||||||
|
github.com/libp2p/go-libp2p-swarm v0.10.1/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs=
|
||||||
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||||
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||||
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||||
@ -1226,8 +1228,9 @@ github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLw
|
|||||||
github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE=
|
github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE=
|
||||||
github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k=
|
github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k=
|
||||||
github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08=
|
github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08=
|
||||||
github.com/libp2p/go-libp2p-yamux v0.8.0 h1:APQYlttIj+Rr5sfa6siojwsi0ZwcIh/exHIUl9hZr6o=
|
|
||||||
github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8=
|
github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8=
|
||||||
|
github.com/libp2p/go-libp2p-yamux v0.8.1 h1:pi7zUeZ4Z9TpbUMntvSvoP3dFD4SEw/VPybxBcOZGzg=
|
||||||
|
github.com/libp2p/go-libp2p-yamux v0.8.1/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE=
|
||||||
github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
|
github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
|
||||||
github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
|
github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
|
||||||
github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M=
|
github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M=
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -19,6 +20,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestAPI(t *testing.T) {
|
func TestAPI(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_INFO_001
|
||||||
t.Run("direct", func(t *testing.T) {
|
t.Run("direct", func(t *testing.T) {
|
||||||
runAPITest(t)
|
runAPITest(t)
|
||||||
})
|
})
|
||||||
@ -116,11 +123,13 @@ func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
|||||||
sm, err := full.MpoolPushMessage(ctx, msg, nil)
|
sm, err := full.MpoolPushMessage(ctx, msg, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful")
|
require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful")
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_SEARCH_MSG_001
|
||||||
searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
|
searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, searchRes)
|
require.NotNil(t, searchRes)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -19,6 +20,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestCCUpgrade(t *testing.T) {
|
func TestCCUpgrade(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_GET_INFO_001
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
|
||||||
|
//stm: @MINER_SECTOR_LIST_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
for _, height := range []abi.ChainEpoch{
|
for _, height := range []abi.ChainEpoch{
|
||||||
@ -63,6 +73,7 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) *kit.TestFullN
|
|||||||
}
|
}
|
||||||
waitForSectorActive(ctx, t, CCUpgrade, client, maddr)
|
waitForSectorActive(ctx, t, CCUpgrade, client, maddr)
|
||||||
|
|
||||||
|
//stm: @SECTOR_CC_UPGRADE_001
|
||||||
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -11,6 +12,11 @@ import (
|
|||||||
|
|
||||||
// TestClient does a basic test to exercise the client CLI commands.
|
// TestClient does a basic test to exercise the client CLI commands.
|
||||||
func TestClient(t *testing.T) {
|
func TestClient(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -52,6 +53,13 @@ import (
|
|||||||
// * asserts that miner B loses power
|
// * asserts that miner B loses power
|
||||||
// * asserts that miner D loses power, is inactive
|
// * asserts that miner D loses power, is inactive
|
||||||
func TestDeadlineToggling(t *testing.T) {
|
func TestDeadlineToggling(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @MINER_SECTOR_LIST_001
|
||||||
kit.Expensive(t)
|
kit.Expensive(t)
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -108,6 +116,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
{
|
{
|
||||||
minerC.PledgeSectors(ctx, sectorsC, 0, nil)
|
minerC.PledgeSectors(ctx, sectorsC, 0, nil)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -127,6 +136,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
|
|
||||||
expectedPower := types.NewInt(uint64(ssz) * sectorsC)
|
expectedPower := types.NewInt(uint64(ssz) * sectorsC)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err := client.StateMinerPower(ctx, maddrC, types.EmptyTSK)
|
p, err := client.StateMinerPower(ctx, maddrC, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -147,12 +157,14 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) {
|
checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) {
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err := client.StateMinerPower(ctx, ma, tsk)
|
p, err := client.StateMinerPower(ctx, ma, tsk)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// make sure it has the expected power.
|
// make sure it has the expected power.
|
||||||
require.Equal(t, p.MinerPower.RawBytePower, power)
|
require.Equal(t, p.MinerPower.RawBytePower, power)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_GET_ACTOR_001
|
||||||
mact, err := client.StateGetActor(ctx, ma, tsk)
|
mact, err := client.StateGetActor(ctx, ma, tsk)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -187,6 +199,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
checkMiner(maddrB, types.NewInt(0), true, true, uts.Key())
|
checkMiner(maddrB, types.NewInt(0), true, true, uts.Key())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_NETWORK_VERSION_001
|
||||||
nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK)
|
nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.GreaterOrEqual(t, nv, network.Version12)
|
require.GreaterOrEqual(t, nv, network.Version12)
|
||||||
@ -246,6 +259,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
r, err := client.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true)
|
r, err := client.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
||||||
@ -298,6 +312,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
sectorbit := bitfield.New()
|
sectorbit := bitfield.New()
|
||||||
sectorbit.Set(uint64(sectorNum))
|
sectorbit.Set(uint64(sectorNum))
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_SECTOR_PARTITION_001
|
||||||
loca, err := client.StateSectorPartition(ctx, maddrD, sectorNum, types.EmptyTSK)
|
loca, err := client.StateSectorPartition(ctx, maddrD, sectorNum, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -329,6 +344,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
|
|
||||||
t.Log("sent termination message:", smsg.Cid())
|
t.Log("sent termination message:", smsg.Cid())
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true)
|
r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -12,6 +13,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestStorageDealMissingBlock(t *testing.T) {
|
func TestStorageDealMissingBlock(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// enable 512MiB proofs so we can conduct larger transfers.
|
// enable 512MiB proofs so we can conduct larger transfers.
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -71,6 +72,12 @@ func TestDealWithMarketAndMinerNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDealCyclesConcurrent(t *testing.T) {
|
func TestDealCyclesConcurrent(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode")
|
t.Skip("skipping test in short mode")
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -12,6 +13,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestMaxStagingDeals(t *testing.T) {
|
func TestMaxStagingDeals(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// enable 512MiB proofs so we can conduct larger transfers.
|
// enable 512MiB proofs so we can conduct larger transfers.
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -16,7 +17,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestOfflineDealFlow(t *testing.T) {
|
func TestOfflineDealFlow(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_DATA_CALCULATE_COMMP_001, @CLIENT_DATA_GENERATE_CAR_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001
|
||||||
runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) {
|
runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs
|
client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs
|
||||||
@ -60,6 +67,7 @@ func TestOfflineDealFlow(t *testing.T) {
|
|||||||
|
|
||||||
proposalCid := dh.StartDeal(ctx, dp)
|
proposalCid := dh.StartDeal(ctx, dp)
|
||||||
|
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||||
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
||||||
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -14,7 +15,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestDealPadding(t *testing.T) {
|
func TestDealPadding(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_DATA_GET_DEAL_PIECE_CID_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
var blockTime = 250 * time.Millisecond
|
var blockTime = 250 * time.Millisecond
|
||||||
@ -58,6 +65,7 @@ func TestDealPadding(t *testing.T) {
|
|||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||||
di, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
di, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, di.PieceCID.Equals(pcid))
|
require.True(t, di.PieceCID.Equals(pcid))
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -38,7 +39,13 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestPartialRetrieval(t *testing.T) {
|
func TestPartialRetrieval(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_RETRIEVAL_RETRIEVE_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
policy.SetPreCommitChallengeDelay(2)
|
policy.SetPreCommitChallengeDelay(2)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -9,6 +10,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestFirstDealEnablesMining(t *testing.T) {
|
func TestFirstDealEnablesMining(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
// test making a deal with a fresh miner, and see if it starts to mine.
|
// test making a deal with a fresh miner, and see if it starts to mine.
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode")
|
t.Skip("skipping test in short mode")
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -12,6 +13,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
var (
|
var (
|
||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
blocktime = 50 * time.Millisecond
|
blocktime = 50 * time.Millisecond
|
||||||
@ -43,10 +50,12 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
|||||||
_, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
|
_, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
|
||||||
require.Equal(t, res1.Root, res2.Root)
|
require.Equal(t, res1.Root, res2.Root)
|
||||||
|
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||||
// Retrieval
|
// Retrieval
|
||||||
dealInfo, err := client.ClientGetDealInfo(ctx, *deal1)
|
dealInfo, err := client.ClientGetDealInfo(ctx, *deal1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||||
// fetch quote -> zero for unsealed price since unsealed file already exists.
|
// fetch quote -> zero for unsealed price since unsealed file already exists.
|
||||||
offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -56,11 +65,13 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
|||||||
require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64())
|
require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64())
|
||||||
|
|
||||||
// remove ONLY one unsealed file
|
// remove ONLY one unsealed file
|
||||||
|
//stm: @STORAGE_LIST_001, @MINER_SECTOR_LIST_001
|
||||||
ss, err := miner.StorageList(context.Background())
|
ss, err := miner.StorageList(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = miner.SectorsList(ctx)
|
_, err = miner.SectorsList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @STORAGE_DROP_SECTOR_001, @STORAGE_LIST_001
|
||||||
iLoop:
|
iLoop:
|
||||||
for storeID, sd := range ss {
|
for storeID, sd := range ss {
|
||||||
for _, sector := range sd {
|
for _, sector := range sd {
|
||||||
@ -70,6 +81,7 @@ iLoop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||||
// get retrieval quote -> zero for unsealed price as unsealed file exists.
|
// get retrieval quote -> zero for unsealed price as unsealed file exists.
|
||||||
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -89,6 +101,7 @@ iLoop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||||
// fetch quote -> non-zero for unseal price as we no more unsealed files.
|
// fetch quote -> non-zero for unseal price as we no more unsealed files.
|
||||||
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -100,6 +113,10 @@ iLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestZeroPricePerByteRetrieval(t *testing.T) {
|
func TestZeroPricePerByteRetrieval(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode")
|
t.Skip("skipping test in short mode")
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -23,6 +24,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestPublishDealsBatching(t *testing.T) {
|
func TestPublishDealsBatching(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
var (
|
var (
|
||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
publishPeriod = 10 * time.Second
|
publishPeriod = 10 * time.Second
|
||||||
@ -103,6 +110,7 @@ func TestPublishDealsBatching(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Expect a single PublishStorageDeals message that includes the first two deals
|
// Expect a single PublishStorageDeals message that includes the first two deals
|
||||||
|
//stm: @CHAIN_STATE_LIST_MESSAGES_001
|
||||||
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
|
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
count := 0
|
count := 0
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -26,6 +27,13 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestDealsRetryLackOfFunds(t *testing.T) {
|
func TestDealsRetryLackOfFunds(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||||
policy.SetPreCommitChallengeDelay(5)
|
policy.SetPreCommitChallengeDelay(5)
|
||||||
@ -105,6 +113,11 @@ func TestDealsRetryLackOfFunds(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) {
|
func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||||
policy.SetPreCommitChallengeDelay(5)
|
policy.SetPreCommitChallengeDelay(5)
|
||||||
@ -181,6 +194,11 @@ func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDealsRetryLackOfFunds_belowLimit(t *testing.T) {
|
func TestDealsRetryLackOfFunds_belowLimit(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||||
policy.SetPreCommitChallengeDelay(5)
|
policy.SetPreCommitChallengeDelay(5)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -9,6 +10,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestDealsWithSealingAndRPC(t *testing.T) {
|
func TestDealsWithSealingAndRPC(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode")
|
t.Skip("skipping test in short mode")
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -38,6 +39,12 @@ const (
|
|||||||
// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite
|
// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite
|
||||||
// node that is connected through a gateway to a full API node
|
// node that is connected through a gateway to a full API node
|
||||||
func TestGatewayWalletMsig(t *testing.T) {
|
func TestGatewayWalletMsig(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
@ -116,6 +123,7 @@ func TestGatewayWalletMsig(t *testing.T) {
|
|||||||
addProposal, err := doSend(proto)
|
addProposal, err := doSend(proto)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
res, err := lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true)
|
res, err := lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
@ -127,6 +135,7 @@ func TestGatewayWalletMsig(t *testing.T) {
|
|||||||
// Get available balance of msig: should be greater than zero and less
|
// Get available balance of msig: should be greater than zero and less
|
||||||
// than initial amount
|
// than initial amount
|
||||||
msig := execReturn.IDAddress
|
msig := execReturn.IDAddress
|
||||||
|
//stm: @CHAIN_STATE_MINER_AVAILABLE_BALANCE_001
|
||||||
msigBalance, err := lite.MsigGetAvailableBalance(ctx, msig, types.EmptyTSK)
|
msigBalance, err := lite.MsigGetAvailableBalance(ctx, msig, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, msigBalance.Int64(), int64(0))
|
require.Greater(t, msigBalance.Int64(), int64(0))
|
||||||
@ -139,6 +148,7 @@ func TestGatewayWalletMsig(t *testing.T) {
|
|||||||
addProposal, err = doSend(proto)
|
addProposal, err = doSend(proto)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
res, err = lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true)
|
res, err = lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
@ -156,6 +166,7 @@ func TestGatewayWalletMsig(t *testing.T) {
|
|||||||
approval1, err := doSend(proto)
|
approval1, err := doSend(proto)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
res, err = lite.StateWaitMsg(ctx, approval1, 1, api.LookbackNoLimit, true)
|
res, err = lite.StateWaitMsg(ctx, approval1, 1, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
@ -169,6 +180,10 @@ func TestGatewayWalletMsig(t *testing.T) {
|
|||||||
// TestGatewayMsigCLI tests that msig CLI calls can be made
|
// TestGatewayMsigCLI tests that msig CLI calls can be made
|
||||||
// on a lite node that is connected through a gateway to a full API node
|
// on a lite node that is connected through a gateway to a full API node
|
||||||
func TestGatewayMsigCLI(t *testing.T) {
|
func TestGatewayMsigCLI(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
@ -180,6 +195,10 @@ func TestGatewayMsigCLI(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGatewayDealFlow(t *testing.T) {
|
func TestGatewayDealFlow(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
@ -202,6 +221,10 @@ func TestGatewayDealFlow(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGatewayCLIDealFlow(t *testing.T) {
|
func TestGatewayCLIDealFlow(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -16,6 +17,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestChainGetMessagesInTs(t *testing.T) {
|
func TestChainGetMessagesInTs(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -84,6 +91,7 @@ func TestChainGetMessagesInTs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, sm := range sms {
|
for _, sm := range sms {
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
msgLookup, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
msgLookup, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -10,6 +11,12 @@ import (
|
|||||||
|
|
||||||
// TestMultisig does a basic test to exercise the multisig CLI commands
|
// TestMultisig does a basic test to exercise the multisig CLI commands
|
||||||
func TestMultisig(t *testing.T) {
|
func TestMultisig(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blockTime := 5 * time.Millisecond
|
blockTime := 5 * time.Millisecond
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -13,6 +14,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestNonceIncremental(t *testing.T) {
|
func TestNonceIncremental(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -51,6 +58,7 @@ func TestNonceIncremental(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, sm := range sms {
|
for _, sm := range sms {
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
_, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
_, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -27,6 +28,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestPaymentChannelsAPI(t *testing.T) {
|
func TestPaymentChannelsAPI(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@ -107,6 +114,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
preds := state.NewStatePredicates(paymentCreator)
|
preds := state.NewStatePredicates(paymentCreator)
|
||||||
finished := make(chan struct{})
|
finished := make(chan struct{})
|
||||||
|
//stm: @CHAIN_STATE_GET_ACTOR_001
|
||||||
err = ev.StateChanged(func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) {
|
err = ev.StateChanged(func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) {
|
||||||
act, err := paymentCreator.StateGetActor(ctx, channel, ts.Key())
|
act, err := paymentCreator.StateGetActor(ctx, channel, ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -182,6 +190,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
|
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
|
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel")
|
require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel")
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -30,6 +31,12 @@ import (
|
|||||||
// TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI
|
// TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI
|
||||||
// commands
|
// commands
|
||||||
func TestPaymentChannelsBasic(t *testing.T) {
|
func TestPaymentChannelsBasic(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
@ -87,6 +94,10 @@ type voucherSpec struct {
|
|||||||
|
|
||||||
// TestPaymentChannelStatus tests the payment channel status CLI command
|
// TestPaymentChannelStatus tests the payment channel status CLI command
|
||||||
func TestPaymentChannelStatus(t *testing.T) {
|
func TestPaymentChannelStatus(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
@ -167,6 +178,12 @@ func TestPaymentChannelStatus(t *testing.T) {
|
|||||||
// TestPaymentChannelVouchers does a basic test to exercise some payment
|
// TestPaymentChannelVouchers does a basic test to exercise some payment
|
||||||
// channel voucher commands
|
// channel voucher commands
|
||||||
func TestPaymentChannelVouchers(t *testing.T) {
|
func TestPaymentChannelVouchers(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
@ -299,6 +316,12 @@ func TestPaymentChannelVouchers(t *testing.T) {
|
|||||||
// TestPaymentChannelVoucherCreateShortfall verifies that if a voucher amount
|
// TestPaymentChannelVoucherCreateShortfall verifies that if a voucher amount
|
||||||
// is greater than what's left in the channel, voucher create fails
|
// is greater than what's left in the channel, voucher create fails
|
||||||
func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
|
func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -17,6 +18,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestSDRUpgrade(t *testing.T) {
|
func TestSDRUpgrade(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @CHAIN_STATE_NETWORK_VERSION_001
|
||||||
|
|
||||||
|
//stm: @MINER_SECTOR_LIST_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
// oldDelay := policy.GetPreCommitChallengeDelay()
|
// oldDelay := policy.GetPreCommitChallengeDelay()
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -18,6 +19,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestDealsWithFinalizeEarly(t *testing.T) {
|
func TestDealsWithFinalizeEarly(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @STORAGE_INFO_001
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode")
|
t.Skip("skipping test in short mode")
|
||||||
}
|
}
|
||||||
@ -42,6 +50,7 @@ func TestDealsWithFinalizeEarly(t *testing.T) {
|
|||||||
miner.AddStorage(ctx, t, 1000000000, true, false)
|
miner.AddStorage(ctx, t, 1000000000, true, false)
|
||||||
miner.AddStorage(ctx, t, 1000000000, false, true)
|
miner.AddStorage(ctx, t, 1000000000, false, true)
|
||||||
|
|
||||||
|
//stm: @STORAGE_LIST_001
|
||||||
sl, err := miner.StorageList(ctx)
|
sl, err := miner.StorageList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for si, d := range sl {
|
for si, d := range sl {
|
||||||
@ -55,6 +64,7 @@ func TestDealsWithFinalizeEarly(t *testing.T) {
|
|||||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1})
|
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
//stm: @STORAGE_LIST_001
|
||||||
sl, err = miner.StorageList(ctx)
|
sl, err = miner.StorageList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for si, d := range sl {
|
for si, d := range sl {
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -21,6 +22,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestMinerBalanceCollateral(t *testing.T) {
|
func TestMinerBalanceCollateral(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
|
//stm: @MINER_SECTOR_LIST_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blockTime := 5 * time.Millisecond
|
blockTime := 5 * time.Millisecond
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -22,6 +23,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestPledgeSectors(t *testing.T) {
|
func TestPledgeSectors(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blockTime := 50 * time.Millisecond
|
blockTime := 50 * time.Millisecond
|
||||||
@ -54,6 +61,7 @@ func TestPledgeSectors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPledgeBatching(t *testing.T) {
|
func TestPledgeBatching(t *testing.T) {
|
||||||
|
//stm: @SECTOR_PRE_COMMIT_FLUSH_001, @SECTOR_COMMIT_FLUSH_001
|
||||||
blockTime := 50 * time.Millisecond
|
blockTime := 50 * time.Millisecond
|
||||||
|
|
||||||
runTest := func(t *testing.T, nSectors int) {
|
runTest := func(t *testing.T, nSectors int) {
|
||||||
@ -110,6 +118,12 @@ func TestPledgeBatching(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPledgeMaxBatching(t *testing.T) {
|
func TestPledgeMaxBatching(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
blockTime := 50 * time.Millisecond
|
blockTime := 50 * time.Millisecond
|
||||||
|
|
||||||
runTest := func(t *testing.T) {
|
runTest := func(t *testing.T) {
|
||||||
@ -173,6 +187,7 @@ func TestPledgeMaxBatching(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that max aggregate message has propagated to the other node by checking current state
|
// Ensure that max aggregate message has propagated to the other node by checking current state
|
||||||
|
//stm: @CHAIN_STATE_MINER_SECTORS_001
|
||||||
sectorInfosAfter, err := full.StateMinerSectors(ctx, miner.ActorAddr, nil, types.EmptyTSK)
|
sectorInfosAfter, err := full.StateMinerSectors(ctx, miner.ActorAddr, nil, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, miner5.MaxAggregatedSectors+kit.DefaultPresealsPerBootstrapMiner, len(sectorInfosAfter))
|
assert.Equal(t, miner5.MaxAggregatedSectors+kit.DefaultPresealsPerBootstrapMiner, len(sectorInfosAfter))
|
||||||
@ -182,6 +197,12 @@ func TestPledgeMaxBatching(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPledgeBeforeNv13(t *testing.T) {
|
func TestPledgeBeforeNv13(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
blocktime := 50 * time.Millisecond
|
blocktime := 50 * time.Millisecond
|
||||||
|
|
||||||
runTest := func(t *testing.T, nSectors int) {
|
runTest := func(t *testing.T, nSectors int) {
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -14,6 +15,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestTerminate(t *testing.T) {
|
func TestTerminate(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.Expensive(t)
|
kit.Expensive(t)
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -33,6 +40,7 @@ func TestTerminate(t *testing.T) {
|
|||||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
@ -45,6 +53,7 @@ func TestTerminate(t *testing.T) {
|
|||||||
t.Log("wait for power")
|
t.Log("wait for power")
|
||||||
|
|
||||||
{
|
{
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
// Wait until proven.
|
// Wait until proven.
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -58,6 +67,7 @@ func TestTerminate(t *testing.T) {
|
|||||||
|
|
||||||
nSectors++
|
nSectors++
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
@ -67,6 +77,7 @@ func TestTerminate(t *testing.T) {
|
|||||||
|
|
||||||
toTerminate := abi.SectorNumber(3)
|
toTerminate := abi.SectorNumber(3)
|
||||||
|
|
||||||
|
//stm: @SECTOR_TERMINATE_001
|
||||||
err = miner.SectorTerminate(ctx, toTerminate)
|
err = miner.SectorTerminate(ctx, toTerminate)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -79,6 +90,7 @@ loop:
|
|||||||
t.Log("state: ", si.State, msgTriggerred)
|
t.Log("state: ", si.State, msgTriggerred)
|
||||||
|
|
||||||
switch sealing.SectorState(si.State) {
|
switch sealing.SectorState(si.State) {
|
||||||
|
//stm: @SECTOR_TERMINATE_PENDING_001
|
||||||
case sealing.Terminating:
|
case sealing.Terminating:
|
||||||
if !msgTriggerred {
|
if !msgTriggerred {
|
||||||
{
|
{
|
||||||
@ -111,6 +123,7 @@ loop:
|
|||||||
// need to wait for message to be mined and applied.
|
// need to wait for message to be mined and applied.
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
// check power decreased
|
// check power decreased
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -119,6 +132,7 @@ loop:
|
|||||||
|
|
||||||
// check in terminated set
|
// check in terminated set
|
||||||
{
|
{
|
||||||
|
//stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001
|
||||||
parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK)
|
parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, len(parts), 0)
|
require.Greater(t, len(parts), 0)
|
||||||
@ -133,6 +147,7 @@ loop:
|
|||||||
require.Equal(t, uint64(0), bflen(parts[0].LiveSectors))
|
require.Equal(t, uint64(0), bflen(parts[0].LiveSectors))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -141,6 +156,7 @@ loop:
|
|||||||
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
t.Logf("Now head.Height = %d", ts.Height())
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -14,6 +15,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestTapeFix(t *testing.T) {
|
func TestTapeFix(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
var blocktime = 2 * time.Millisecond
|
var blocktime = 2 * time.Millisecond
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -23,6 +24,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestVerifiedClientTopUp(t *testing.T) {
|
func TestVerifiedClientTopUp(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
blockTime := 100 * time.Millisecond
|
blockTime := 100 * time.Millisecond
|
||||||
|
|
||||||
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
|
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
|
||||||
@ -51,6 +58,7 @@ func TestVerifiedClientTopUp(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// get VRH
|
// get VRH
|
||||||
|
//stm: @CHAIN_STATE_VERIFIED_REGISTRY_ROOT_KEY_001
|
||||||
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
|
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
|
||||||
fmt.Println(vrh.String())
|
fmt.Println(vrh.String())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -81,6 +89,7 @@ func TestVerifiedClientTopUp(t *testing.T) {
|
|||||||
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
||||||
require.NoError(t, err, "AddVerifier failed")
|
require.NoError(t, err, "AddVerifier failed")
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
@ -102,11 +111,13 @@ func TestVerifiedClientTopUp(t *testing.T) {
|
|||||||
sm, err = api.MpoolPushMessage(ctx, msg, nil)
|
sm, err = api.MpoolPushMessage(ctx, msg, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
|
|
||||||
// check datacap balance
|
// check datacap balance
|
||||||
|
//stm: @CHAIN_STATE_VERIFIED_CLIENT_STATUS_001
|
||||||
dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
|
dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -20,6 +21,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestWindowPostDispute(t *testing.T) {
|
func TestWindowPostDispute(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.Expensive(t)
|
kit.Expensive(t)
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -61,6 +68,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
|
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -72,6 +80,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
t.Logf("Now head.Height = %d", ts.Height())
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -81,9 +90,11 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
// make sure it has gained power.
|
// make sure it has gained power.
|
||||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
||||||
|
|
||||||
|
//stm: @MINER_SECTOR_LIST_001
|
||||||
evilSectors, err := evilMiner.SectorsList(ctx)
|
evilSectors, err := evilMiner.SectorsList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
evilSectorNo := evilSectors[0] // only one.
|
evilSectorNo := evilSectors[0] // only one.
|
||||||
|
//stm: @CHAIN_STATE_SECTOR_PARTITION_001
|
||||||
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
|
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -96,6 +107,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
|
|
||||||
// Wait until we need to prove our sector.
|
// Wait until we need to prove our sector.
|
||||||
for {
|
for {
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 {
|
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 {
|
||||||
@ -109,6 +121,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
|
|
||||||
// Wait until after the proving period.
|
// Wait until after the proving period.
|
||||||
for {
|
for {
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if di.Index != evilSectorLoc.Deadline {
|
if di.Index != evilSectorLoc.Deadline {
|
||||||
@ -119,6 +132,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
|
|
||||||
t.Log("accepted evil proof")
|
t.Log("accepted evil proof")
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
// Make sure the evil node didn't lose any power.
|
// Make sure the evil node didn't lose any power.
|
||||||
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -145,11 +159,13 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Log("waiting dispute")
|
t.Log("waiting dispute")
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
|
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
// Objection SUSTAINED!
|
// Objection SUSTAINED!
|
||||||
// Make sure the evil node lost power.
|
// Make sure the evil node lost power.
|
||||||
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
@ -162,6 +178,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
// First, recover the sector.
|
// First, recover the sector.
|
||||||
|
|
||||||
{
|
{
|
||||||
|
//stm: @CHAIN_STATE_MINER_INFO_001
|
||||||
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
|
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -186,6 +203,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
|
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
|
||||||
@ -193,6 +211,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
|
|
||||||
// Then wait for the deadline.
|
// Then wait for the deadline.
|
||||||
for {
|
for {
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if di.Index == evilSectorLoc.Deadline {
|
if di.Index == evilSectorLoc.Deadline {
|
||||||
@ -210,6 +229,11 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWindowPostDisputeFails(t *testing.T) {
|
func TestWindowPostDisputeFails(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
//stm: @CHAIN_STATE_MINER_GET_DEADLINES_001
|
||||||
kit.Expensive(t)
|
kit.Expensive(t)
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -232,6 +256,7 @@ func TestWindowPostDisputeFails(t *testing.T) {
|
|||||||
|
|
||||||
miner.PledgeSectors(ctx, 10, 0, nil)
|
miner.PledgeSectors(ctx, 10, 0, nil)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -246,6 +271,7 @@ func TestWindowPostDisputeFails(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10))
|
expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10))
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -271,6 +297,7 @@ waitForProof:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// wait until the deadline finishes.
|
// wait until the deadline finishes.
|
||||||
@ -314,11 +341,13 @@ func submitBadProof(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_INFO_001
|
||||||
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
|
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_GET_RANDOMNESS_FROM_TICKETS_001
|
||||||
commEpoch := di.Open
|
commEpoch := di.Open
|
||||||
commRand, err := client.StateGetRandomnessFromTickets(
|
commRand, err := client.StateGetRandomnessFromTickets(
|
||||||
ctx, crypto.DomainSeparationTag_PoStChainCommit,
|
ctx, crypto.DomainSeparationTag_PoStChainCommit,
|
||||||
@ -355,6 +384,7 @@ func submitBadProof(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_WAIT_MSG_001
|
||||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #integration
|
||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -23,6 +24,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestWindowedPost(t *testing.T) {
|
func TestWindowedPost(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.Expensive(t)
|
kit.Expensive(t)
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -58,6 +65,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -71,6 +79,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
t.Logf("Now head.Height = %d", ts.Height())
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -84,6 +93,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
|
|
||||||
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
|
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
|
||||||
{
|
{
|
||||||
|
//stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001
|
||||||
parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK)
|
parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, len(parts), 0)
|
require.Greater(t, len(parts), 0)
|
||||||
@ -109,6 +119,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
|
|
||||||
// Drop 1 sectors from deadline 3 partition 0
|
// Drop 1 sectors from deadline 3 partition 0
|
||||||
{
|
{
|
||||||
|
//stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001
|
||||||
parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK)
|
parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, len(parts), 0)
|
require.Greater(t, len(parts), 0)
|
||||||
@ -137,6 +148,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -147,6 +159,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
t.Logf("Now head.Height = %d", ts.Height())
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -160,6 +173,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
|
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -169,6 +183,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
t.Logf("Now head.Height = %d", ts.Height())
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -183,6 +198,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
|
|
||||||
{
|
{
|
||||||
// Wait until proven.
|
// Wait until proven.
|
||||||
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
||||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -193,6 +209,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
t.Logf("Now head.Height = %d", ts.Height())
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -203,6 +220,12 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.Expensive(t)
|
kit.Expensive(t)
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -225,10 +248,12 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
|||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_INFO_001
|
||||||
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
miner.PledgeSectors(ctx, nSectors, 0, nil)
|
miner.PledgeSectors(ctx, nSectors, 0, nil)
|
||||||
|
//stm: @CHAIN_STATE_GET_ACTOR_001
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
en := wact.Nonce
|
en := wact.Nonce
|
||||||
@ -237,6 +262,7 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
|||||||
|
|
||||||
waitForProof:
|
waitForProof:
|
||||||
for {
|
for {
|
||||||
|
//stm: @CHAIN_STATE_GET_ACTOR_001
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if wact.Nonce > en {
|
if wact.Nonce > en {
|
||||||
@ -246,9 +272,11 @@ waitForProof:
|
|||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_LIST_MESSAGES_001
|
||||||
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_REPLAY_001
|
||||||
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
|
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -256,6 +284,12 @@ waitForProof:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWindowPostBaseFeeBurn(t *testing.T) {
|
func TestWindowPostBaseFeeBurn(t *testing.T) {
|
||||||
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||||
|
|
||||||
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.Expensive(t)
|
kit.Expensive(t)
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -271,10 +305,12 @@ func TestWindowPostBaseFeeBurn(t *testing.T) {
|
|||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_MINER_INFO_001
|
||||||
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
miner.PledgeSectors(ctx, 10, 0, nil)
|
miner.PledgeSectors(ctx, 10, 0, nil)
|
||||||
|
//stm: @CHAIN_STATE_GET_ACTOR_001
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
en := wact.Nonce
|
en := wact.Nonce
|
||||||
@ -283,6 +319,7 @@ func TestWindowPostBaseFeeBurn(t *testing.T) {
|
|||||||
|
|
||||||
waitForProof:
|
waitForProof:
|
||||||
for {
|
for {
|
||||||
|
//stm: @CHAIN_STATE_GET_ACTOR_001
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if wact.Nonce > en {
|
if wact.Nonce > en {
|
||||||
@ -292,9 +329,11 @@ waitForProof:
|
|||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_LIST_MESSAGES_001
|
||||||
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CHAIN_STATE_REPLAY_001
|
||||||
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
|
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package retrievaladapter
|
package retrievaladapter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -18,6 +19,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestGetPricingInput(t *testing.T) {
|
func TestGetPricingInput(t *testing.T) {
|
||||||
|
//stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tsk := &types.TipSet{}
|
tsk := &types.TipSet{}
|
||||||
key := tsk.Key()
|
key := tsk.Key()
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package storageadapter
|
package storageadapter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -27,6 +28,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestDealStateMatcher(t *testing.T) {
|
func TestDealStateMatcher(t *testing.T) {
|
||||||
|
//stm: @CHAIN_STATE_GET_ACTOR_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
bs := bstore.NewMemorySync()
|
bs := bstore.NewMemorySync()
|
||||||
store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs))
|
store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs))
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -31,6 +32,7 @@ import (
|
|||||||
var testdata embed.FS
|
var testdata embed.FS
|
||||||
|
|
||||||
func TestImportLocal(t *testing.T) {
|
func TestImportLocal(t *testing.T) {
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_IMPORT_LOCAL_001, @CLIENT_RETRIEVAL_FIND_001
|
||||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
im := imports.NewManager(ds, dir)
|
im := imports.NewManager(ds, dir)
|
||||||
@ -44,6 +46,7 @@ func TestImportLocal(t *testing.T) {
|
|||||||
b, err := testdata.ReadFile("testdata/payload.txt")
|
b, err := testdata.ReadFile("testdata/payload.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||||
root, err := a.ClientImportLocal(ctx, bytes.NewReader(b))
|
root, err := a.ClientImportLocal(ctx, bytes.NewReader(b))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, cid.Undef, root)
|
require.NotEqual(t, cid.Undef, root)
|
||||||
@ -56,6 +59,7 @@ func TestImportLocal(t *testing.T) {
|
|||||||
require.Equal(t, root, *it.Root)
|
require.Equal(t, root, *it.Root)
|
||||||
require.True(t, strings.HasPrefix(it.CARPath, dir))
|
require.True(t, strings.HasPrefix(it.CARPath, dir))
|
||||||
|
|
||||||
|
//stm: @CLIENT_DATA_HAS_LOCAL_001
|
||||||
local, err := a.ClientHasLocal(ctx, root)
|
local, err := a.ClientHasLocal(ctx, root)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, local)
|
require.True(t, local)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package full
|
package full
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package repo
|
package repo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package repo
|
package repo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package repo
|
package repo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//stm: #unit
|
||||||
package paychmgr
|
package paychmgr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -43,6 +44,9 @@ func TestCheckVoucherValid(t *testing.T) {
|
|||||||
mock.setAccountAddress(fromAcct, from)
|
mock.setAccountAddress(fromAcct, from)
|
||||||
mock.setAccountAddress(toAcct, to)
|
mock.setAccountAddress(toAcct, to)
|
||||||
|
|
||||||
|
//stm: @TOKEN_PAYCH_VOUCHER_VALID_001, @TOKEN_PAYCH_VOUCHER_VALID_002, @TOKEN_PAYCH_VOUCHER_VALID_003
|
||||||
|
//stm: @TOKEN_PAYCH_VOUCHER_VALID_004, @TOKEN_PAYCH_VOUCHER_VALID_005, @TOKEN_PAYCH_VOUCHER_VALID_006, @TOKEN_PAYCH_VOUCHER_VALID_007
|
||||||
|
//stm: @TOKEN_PAYCH_VOUCHER_VALID_009, @TOKEN_PAYCH_VOUCHER_VALID_010
|
||||||
tcases := []struct {
|
tcases := []struct {
|
||||||
name string
|
name string
|
||||||
expectError bool
|
expectError bool
|
||||||
@ -242,6 +246,7 @@ func TestCreateVoucher(t *testing.T) {
|
|||||||
Lane: 1,
|
Lane: 1,
|
||||||
Amount: voucherLane1Amt,
|
Amount: voucherLane1Amt,
|
||||||
}
|
}
|
||||||
|
//stm: @TOKEN_PAYCH_VOUCHER_CREATE_001
|
||||||
res, err := s.mgr.CreateVoucher(ctx, s.ch, voucher)
|
res, err := s.mgr.CreateVoucher(ctx, s.ch, voucher)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, res.Voucher)
|
require.NotNil(t, res.Voucher)
|
||||||
@ -286,6 +291,7 @@ func TestCreateVoucher(t *testing.T) {
|
|||||||
Lane: 2,
|
Lane: 2,
|
||||||
Amount: voucherLane2Amt,
|
Amount: voucherLane2Amt,
|
||||||
}
|
}
|
||||||
|
//stm: @TOKEN_PAYCH_VOUCHER_CREATE_004
|
||||||
res, err = s.mgr.CreateVoucher(ctx, s.ch, voucher)
|
res, err = s.mgr.CreateVoucher(ctx, s.ch, voucher)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -296,6 +302,7 @@ func TestCreateVoucher(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAddVoucherDelta(t *testing.T) {
|
func TestAddVoucherDelta(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_LIST_VOUCHERS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// Set up a manager with a single payment channel
|
// Set up a manager with a single payment channel
|
||||||
@ -357,6 +364,7 @@ func TestAddVoucherNextLane(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, ci.NextLane, 3)
|
require.EqualValues(t, ci.NextLane, 3)
|
||||||
|
|
||||||
|
//stm: @TOKEN_PAYCH_ALLOCATE_LANE_001
|
||||||
// Allocate a lane (should be lane 3)
|
// Allocate a lane (should be lane 3)
|
||||||
lane, err := s.mgr.AllocateLane(ctx, s.ch)
|
lane, err := s.mgr.AllocateLane(ctx, s.ch)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -393,6 +401,7 @@ func TestAllocateLane(t *testing.T) {
|
|||||||
// Set up a manager with a single payment channel
|
// Set up a manager with a single payment channel
|
||||||
s := testSetupMgrWithChannel(t)
|
s := testSetupMgrWithChannel(t)
|
||||||
|
|
||||||
|
//stm: @TOKEN_PAYCH_ALLOCATE_LANE_001
|
||||||
// First lane should be 0
|
// First lane should be 0
|
||||||
lane, err := s.mgr.AllocateLane(ctx, s.ch)
|
lane, err := s.mgr.AllocateLane(ctx, s.ch)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -447,6 +456,7 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) {
|
|||||||
_, err = mgr.AddVoucherInbound(ctx, ch, sv, nil, minDelta)
|
_, err = mgr.AddVoucherInbound(ctx, ch, sv, nil, minDelta)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @TOKEN_PAYCH_ALLOCATE_LANE_001
|
||||||
// Allocate lane should return the next lane (lane 3)
|
// Allocate lane should return the next lane (lane 3)
|
||||||
lane, err := mgr.AllocateLane(ctx, ch)
|
lane, err := mgr.AllocateLane(ctx, ch)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -509,6 +519,7 @@ func TestAddVoucherInboundWalletKey(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBestSpendable(t *testing.T) {
|
func TestBestSpendable(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_LIST_VOUCHERS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// Set up a manager with a single payment channel
|
// Set up a manager with a single payment channel
|
||||||
@ -551,6 +562,7 @@ func TestBestSpendable(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
//stm: @TOKEN_PAYCH_BEST_SPENDABLE_001
|
||||||
// Verify best spendable vouchers on each lane
|
// Verify best spendable vouchers on each lane
|
||||||
vouchers, err := BestSpendableByLane(ctx, bsapi, s.ch)
|
vouchers, err := BestSpendableByLane(ctx, bsapi, s.ch)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -691,6 +703,7 @@ func TestSubmitVoucher(t *testing.T) {
|
|||||||
err = p3.UnmarshalCBOR(bytes.NewReader(msg.Message.Params))
|
err = p3.UnmarshalCBOR(bytes.NewReader(msg.Message.Params))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @TOKEN_PAYCH_LIST_VOUCHERS_001
|
||||||
// Verify that vouchers are marked as submitted
|
// Verify that vouchers are marked as submitted
|
||||||
vis, err := s.mgr.ListVouchers(ctx, s.ch)
|
vis, err := s.mgr.ListVouchers(ctx, s.ch)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -68,6 +68,7 @@ func TestPaychGetCreateChannelMsg(t *testing.T) {
|
|||||||
// TestPaychGetCreateChannelThenAddFunds tests creating a channel and then
|
// TestPaychGetCreateChannelThenAddFunds tests creating a channel and then
|
||||||
// adding funds to it
|
// adding funds to it
|
||||||
func TestPaychGetCreateChannelThenAddFunds(t *testing.T) {
|
func TestPaychGetCreateChannelThenAddFunds(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -158,6 +159,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) {
|
|||||||
// operation is queued up behind a create channel operation, and the create
|
// operation is queued up behind a create channel operation, and the create
|
||||||
// channel fails, then the waiting operation can succeed.
|
// channel fails, then the waiting operation can succeed.
|
||||||
func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) {
|
func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -222,6 +224,7 @@ func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) {
|
|||||||
// TestPaychGetRecoverAfterError tests that after a create channel fails, the
|
// TestPaychGetRecoverAfterError tests that after a create channel fails, the
|
||||||
// next attempt to create channel can succeed.
|
// next attempt to create channel can succeed.
|
||||||
func TestPaychGetRecoverAfterError(t *testing.T) {
|
func TestPaychGetRecoverAfterError(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -274,6 +277,7 @@ func TestPaychGetRecoverAfterError(t *testing.T) {
|
|||||||
// TestPaychGetRecoverAfterAddFundsError tests that after an add funds fails, the
|
// TestPaychGetRecoverAfterAddFundsError tests that after an add funds fails, the
|
||||||
// next attempt to add funds can succeed.
|
// next attempt to add funds can succeed.
|
||||||
func TestPaychGetRecoverAfterAddFundsError(t *testing.T) {
|
func TestPaychGetRecoverAfterAddFundsError(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -356,6 +360,7 @@ func TestPaychGetRecoverAfterAddFundsError(t *testing.T) {
|
|||||||
// right after the create channel message is sent, the channel will be
|
// right after the create channel message is sent, the channel will be
|
||||||
// created when the system restarts.
|
// created when the system restarts.
|
||||||
func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) {
|
func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -435,6 +440,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) {
|
|||||||
// right after the add funds message is sent, the add funds will be
|
// right after the add funds message is sent, the add funds will be
|
||||||
// processed when the system restarts.
|
// processed when the system restarts.
|
||||||
func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) {
|
func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -498,6 +504,7 @@ func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) {
|
|||||||
// TestPaychGetWait tests that GetPaychWaitReady correctly waits for the
|
// TestPaychGetWait tests that GetPaychWaitReady correctly waits for the
|
||||||
// channel to be created or funds to be added
|
// channel to be created or funds to be added
|
||||||
func TestPaychGetWait(t *testing.T) {
|
func TestPaychGetWait(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -555,6 +562,7 @@ func TestPaychGetWait(t *testing.T) {
|
|||||||
|
|
||||||
// TestPaychGetWaitErr tests that GetPaychWaitReady correctly handles errors
|
// TestPaychGetWaitErr tests that GetPaychWaitReady correctly handles errors
|
||||||
func TestPaychGetWaitErr(t *testing.T) {
|
func TestPaychGetWaitErr(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -602,6 +610,7 @@ func TestPaychGetWaitErr(t *testing.T) {
|
|||||||
// TestPaychGetWaitCtx tests that GetPaychWaitReady returns early if the context
|
// TestPaychGetWaitCtx tests that GetPaychWaitReady returns early if the context
|
||||||
// is cancelled
|
// is cancelled
|
||||||
func TestPaychGetWaitCtx(t *testing.T) {
|
func TestPaychGetWaitCtx(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -631,6 +640,7 @@ func TestPaychGetWaitCtx(t *testing.T) {
|
|||||||
// progress and two add funds are queued up behind it, the two add funds
|
// progress and two add funds are queued up behind it, the two add funds
|
||||||
// will be merged
|
// will be merged
|
||||||
func TestPaychGetMergeAddFunds(t *testing.T) {
|
func TestPaychGetMergeAddFunds(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -729,6 +739,7 @@ func TestPaychGetMergeAddFunds(t *testing.T) {
|
|||||||
// TestPaychGetMergeAddFundsCtxCancelOne tests that when a queued add funds
|
// TestPaychGetMergeAddFundsCtxCancelOne tests that when a queued add funds
|
||||||
// request is cancelled, its amount is removed from the total merged add funds
|
// request is cancelled, its amount is removed from the total merged add funds
|
||||||
func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) {
|
func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -826,6 +837,7 @@ func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) {
|
|||||||
// TestPaychGetMergeAddFundsCtxCancelAll tests that when all queued add funds
|
// TestPaychGetMergeAddFundsCtxCancelAll tests that when all queued add funds
|
||||||
// requests are cancelled, no add funds message is sent
|
// requests are cancelled, no add funds message is sent
|
||||||
func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) {
|
func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
@ -900,6 +912,7 @@ func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) {
|
|||||||
// TestPaychAvailableFunds tests that PaychAvailableFunds returns the correct
|
// TestPaychAvailableFunds tests that PaychAvailableFunds returns the correct
|
||||||
// channel state
|
// channel state
|
||||||
func TestPaychAvailableFunds(t *testing.T) {
|
func TestPaychAvailableFunds(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_WAIT_READY_001, @TOKEN_PAYCH_AVAILABLE_FUNDS_001, @TOKEN_PAYCH_AVAILABLE_FUNDS_002, @TOKEN_PAYCH_AVAILABLE_FUNDS_003
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
// insufficient funds, then adding funds to the channel, then adding the
|
// insufficient funds, then adding funds to the channel, then adding the
|
||||||
// voucher again
|
// voucher again
|
||||||
func TestPaychAddVoucherAfterAddFunds(t *testing.T) {
|
func TestPaychAddVoucherAfterAddFunds(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_WAIT_READY_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestPaychSettle(t *testing.T) {
|
func TestPaychSettle(t *testing.T) {
|
||||||
|
//stm: @TOKEN_PAYCH_WAIT_READY_001, @TOKEN_PAYCH_SETTLE_001, @TOKEN_PAYCH_LIST_CHANNELS_001
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user