Merge pull request #9231 from filecoin-project/fix/datacid-res-envvars

sealing: fix: Make DataCid resource env vars make more sense
This commit is contained in:
Łukasz Magiera 2022-08-30 02:36:02 +02:00 committed by GitHub
commit 088bf56f2a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
181 changed files with 456 additions and 369 deletions

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package api
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package api
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package badgerbs
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package badgerbs
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package splitstore
import (

View File

@ -519,6 +519,7 @@ func (s *SplitStore) applyProtectors() error {
// - At this point we are ready to begin purging:
// - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references)
// - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
//
// - We then end the transaction and compact/gc the hotstore.
func (s *SplitStore) compact(curTs *types.TipSet) {
log.Info("waiting for active views to complete")

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package splitstore
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package blockstore
import (

View File

@ -12,10 +12,9 @@ type unionBlockstore []Blockstore
// Union returns an unioned blockstore.
//
// * Reads return from the first blockstore that has the value, querying in the
// supplied order.
// * Writes (puts and deletes) are broadcast to all stores.
//
// - Reads return from the first blockstore that has the value, querying in the
// supplied order.
// - Writes (puts and deletes) are broadcast to all stores.
func Union(stores ...Blockstore) Blockstore {
return unionBlockstore(stores)
}

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package blockstore
import (

Binary file not shown.

Binary file not shown.

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package build
import (

View File

@ -5,7 +5,6 @@
//
// Its purpose is to unlock various degrees of flexibility and parametrization
// when writing Testground plans for Lotus.
//
package build
import (

View File

@ -26,7 +26,7 @@ type AdtArrayDiff interface {
// - All values that exist in preArr and not in curArr are passed to AdtArrayDiff.Remove()
// - All values that exist in curArr nnd not in prevArr are passed to adtArrayDiff.Add()
// - All values that exist in preArr and in curArr are passed to AdtArrayDiff.Modify()
// - It is the responsibility of AdtArrayDiff.Modify() to determine if the values it was passed have been modified.
// - It is the responsibility of AdtArrayDiff.Modify() to determine if the values it was passed have been modified.
func DiffAdtArray(preArr, curArr Array, out AdtArrayDiff) error {
notNew := make(map[int64]struct{}, curArr.Length())
prevVal := new(typegen.Deferred)

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package adt
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package aerrors_test
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package policy
import (

View File

@ -1,5 +1,5 @@
//stm: ignore
//Only tests external library behavior, therefore it should not be annotated
// stm: ignore
// Only tests external library behavior, therefore it should not be annotated
package drand
import (

View File

@ -23,7 +23,8 @@ type triggerID = uint64
type msgH = abi.ChainEpoch
// triggerH is the block height at which the listener will be notified about the
// message (msgH+confidence)
//
// message (msgH+confidence)
type triggerH = abi.ChainEpoch
type eventData interface{}
@ -39,7 +40,8 @@ type EventHandler func(ctx context.Context, data eventData, prevTs, ts *types.Ti
//
// If `done` is true, timeout won't be triggered
// If `more` is false, no messages will be sent to EventHandler (RevertHandler
// may still be called)
//
// may still be called)
type CheckFunc func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error)
// Keep track of information for an event handler
@ -375,31 +377,31 @@ type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error)
// StateChanged registers a callback which is triggered when a specified state
// change occurs or a timeout is reached.
//
// * `CheckFunc` callback is invoked immediately with a recent tipset, it
// returns two booleans - `done`, and `more`.
// - `CheckFunc` callback is invoked immediately with a recent tipset, it
// returns two booleans - `done`, and `more`.
//
// * `done` should be true when some on-chain state change we are waiting
// - `done` should be true when some on-chain state change we are waiting
// for has happened. When `done` is set to true, timeout trigger is disabled.
//
// * `more` should be false when we don't want to receive new notifications
// - `more` should be false when we don't want to receive new notifications
// through StateChangeHandler. Note that notifications may still be delivered to
// RevertHandler
//
// * `StateChangeHandler` is called when the specified state change was observed
// on-chain, and a confidence threshold was reached, or the specified `timeout`
// height was reached with no state change observed. When this callback is
// invoked on a timeout, `oldTs` and `states are set to nil.
// This callback returns a boolean specifying whether further notifications
// should be sent, like `more` return param from `CheckFunc` above.
// - `StateChangeHandler` is called when the specified state change was observed
// on-chain, and a confidence threshold was reached, or the specified `timeout`
// height was reached with no state change observed. When this callback is
// invoked on a timeout, `oldTs` and `states are set to nil.
// This callback returns a boolean specifying whether further notifications
// should be sent, like `more` return param from `CheckFunc` above.
//
// * `RevertHandler` is called after apply handler, when we drop the tipset
// containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the event dropped may be re-applied
// in a different tipset in small amount of time.
// - `RevertHandler` is called after apply handler, when we drop the tipset
// containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the event dropped may be re-applied
// in a different tipset in small amount of time.
//
// * `StateMatchFunc` is called against each tipset state. If there is a match,
// the state change is queued up until the confidence interval has elapsed (and
// `StateChangeHandler` is called)
// - `StateMatchFunc` is called against each tipset state. If there is a match,
// the state change is queued up until the confidence interval has elapsed (and
// `StateChangeHandler` is called)
func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error {
hnd := func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
states, ok := data.(StateChange)
@ -503,33 +505,34 @@ type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.Ti
type MsgMatchFunc func(msg *types.Message) (matched bool, err error)
// Called registers a callback which is triggered when a specified method is
// called on an actor, or a timeout is reached.
//
// * `CheckFunc` callback is invoked immediately with a recent tipset, it
// returns two booleans - `done`, and `more`.
// called on an actor, or a timeout is reached.
//
// * `done` should be true when some on-chain action we are waiting for has
// happened. When `done` is set to true, timeout trigger is disabled.
// - `CheckFunc` callback is invoked immediately with a recent tipset, it
// returns two booleans - `done`, and `more`.
//
// * `more` should be false when we don't want to receive new notifications
// through MsgHandler. Note that notifications may still be delivered to
// RevertHandler
// - `done` should be true when some on-chain action we are waiting for has
// happened. When `done` is set to true, timeout trigger is disabled.
//
// * `MsgHandler` is called when the specified event was observed on-chain,
// and a confidence threshold was reached, or the specified `timeout` height
// was reached with no events observed. When this callback is invoked on a
// timeout, `msg` is set to nil. This callback returns a boolean specifying
// whether further notifications should be sent, like `more` return param
// from `CheckFunc` above.
// - `more` should be false when we don't want to receive new notifications
// through MsgHandler. Note that notifications may still be delivered to
// RevertHandler
//
// * `RevertHandler` is called after apply handler, when we drop the tipset
// containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the message dropped may be re-applied
// in a different tipset in small amount of time.
// - `MsgHandler` is called when the specified event was observed on-chain,
// and a confidence threshold was reached, or the specified `timeout` height
// was reached with no events observed. When this callback is invoked on a
// timeout, `msg` is set to nil. This callback returns a boolean specifying
// whether further notifications should be sent, like `more` return param
// from `CheckFunc` above.
//
// * `MsgMatchFunc` is called against each message. If there is a match, the
// message is queued up until the confidence interval has elapsed (and
// `MsgHandler` is called)
// - `RevertHandler` is called after apply handler, when we drop the tipset
// containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the message dropped may be re-applied
// in a different tipset in small amount of time.
//
// - `MsgMatchFunc` is called against each message. If there is a match, the
// message is queued up until the confidence interval has elapsed (and
// `MsgHandler` is called)
func (me *messageEvents) Called(ctx context.Context, check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error {
hnd := func(ctx context.Context, data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
msg, ok := data.(*types.Message)

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package events
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package state
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package events
import (

View File

@ -10,8 +10,8 @@
// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports
// two options at the moment:
//
// - include block contents
// - include block messages
// - include block contents
// - include block messages
//
// The response will include a status code, an optional message, and the
// response payload in case of success. The payload is a slice of serialized

View File

@ -21,11 +21,12 @@ const (
)
// FIXME: Bumped from original 800 to this to accommodate `syncFork()`
// use of `GetBlocks()`. It seems the expectation of that API is to
// fetch any amount of blocks leaving it to the internal logic here
// to partition and reassemble the requests if they go above the maximum.
// (Also as a consequence of this temporarily removing the `const`
// qualifier to avoid "const initializer [...] is not a constant" error.)
//
// use of `GetBlocks()`. It seems the expectation of that API is to
// fetch any amount of blocks leaving it to the internal logic here
// to partition and reassemble the requests if they go above the maximum.
// (Also as a consequence of this temporarily removing the `const`
// qualifier to avoid "const initializer [...] is not a constant" error.)
var MaxRequestLength = uint64(build.ForkLengthThreshold)
const (
@ -147,11 +148,12 @@ type BSTipSet struct {
// `BlsIncludes`/`SecpkIncludes` matches `Bls`/`Secpk` messages
// to blocks in the tipsets with the format:
// `BlsIncludes[BI][MI]`
// * BI: block index in the tipset.
// * MI: message index in `Bls` list
// - BI: block index in the tipset.
// - MI: message index in `Bls` list
//
// FIXME: The logic to decompress this structure should belong
// to itself, not to the consumer.
//
// to itself, not to the consumer.
type CompactedMessages struct {
Bls []*types.Message
BlsIncludes [][]uint64

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package gen
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package market
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package messagepool
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package messagepool
import (

View File

@ -1056,9 +1056,9 @@ func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address
// this method is provided for the gateway to push messages.
// differences from Push:
// - strict checks are enabled
// - extra strict add checks are used when adding the messages to the msgSet
// that means: no nonce gaps, at most 10 pending messages for the actor
// - strict checks are enabled
// - extra strict add checks are used when adding the messages to the msgSet
// that means: no nonce gaps, at most 10 pending messages for the actor
func (mp *MessagePool) PushUntrusted(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
err := mp.checkMessage(m)
if err != nil {

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package messagepool
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package messagepool
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package messagepool
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package messagesigner
import (

View File

@ -1,4 +1,4 @@
//stm:#unit
// stm:#unit
package rand_test
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package state
import (

View File

@ -30,9 +30,9 @@ var ErrExpensiveFork = errors.New("refusing explicit call due to state fork at e
// tipset's parent. In the presence of null blocks, the height at which the message is invoked may
// be less than the specified tipset.
//
// - If no tipset is specified, the first tipset without an expensive migration is used.
// - If executing a message at a given tipset would trigger an expensive migration, the call will
// fail with ErrExpensiveFork.
// - If no tipset is specified, the first tipset without an expensive migration is used.
// - If executing a message at a given tipset would trigger an expensive migration, the call will
// fail with ErrExpensiveFork.
func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) {
ctx, span := trace.StartSpan(ctx, "statemanager.Call")
defer span.End()

View File

@ -36,12 +36,12 @@ type MigrationCache interface {
// MigrationFunc is a migration function run at every upgrade.
//
// - The cache is a per-upgrade cache, pre-populated by pre-migrations.
// - The oldState is the state produced by the upgrade epoch.
// - The returned newState is the new state that will be used by the next epoch.
// - The height is the upgrade epoch height (already executed).
// - The tipset is the first non-null tipset after the upgrade height (the tipset in
// which the upgrade is executed). Do not assume that ts.Height() is the upgrade height.
// - The cache is a per-upgrade cache, pre-populated by pre-migrations.
// - The oldState is the state produced by the upgrade epoch.
// - The returned newState is the new state that will be used by the next epoch.
// - The height is the upgrade epoch height (already executed).
// - The tipset is the first non-null tipset after the upgrade height (the tipset in
// which the upgrade is executed). Do not assume that ts.Height() is the upgrade height.
//
// NOTE: In StateCompute and CallWithGas, the passed tipset is actually the tipset _before_ the
// upgrade. The tipset should really only be used for referencing the "current chain".

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package stmgr_test
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package stmgr_test
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package store_test
import (

View File

@ -9,12 +9,17 @@ import (
// WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer.
// minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will
// wait for that long to coalesce more head changes.
//
// wait for that long to coalesce more head changes.
//
// maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change
// more than that.
//
// more than that.
//
// mergeInterval is the interval that triggers additional coalesce delay; if the last head change was
// within the merge interval when the coalesce timer fires, then the coalesce time is extended
// by min delay and up to max delay total.
//
// within the merge interval when the coalesce timer fires, then the coalesce time is extended
// by min delay and up to max delay total.
func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee {
c := NewHeadChangeCoalescer(fn, minDelay, maxDelay, mergeInterval)
return c.HeadChange

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package store
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package store_test
import (

View File

@ -93,8 +93,8 @@ type WeightFunc func(ctx context.Context, stateBs bstore.Blockstore, ts *types.T
// store).
//
// To alleviate disk access, the ChainStore has two ARC caches:
// 1. a tipset cache
// 2. a block => messages references cache.
// 1. a tipset cache
// 2. a block => messages references cache.
type ChainStore struct {
chainBlockstore bstore.Blockstore
stateBlockstore bstore.Blockstore
@ -453,8 +453,9 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
// The "fast forward" case is covered in this logic as a valid fork of length 0.
//
// FIXME: We may want to replace some of the logic in `syncFork()` with this.
// `syncFork()` counts the length on both sides of the fork at the moment (we
// need to settle on that) but here we just enforce it on the `synced` side.
//
// `syncFork()` counts the length on both sides of the fork at the moment (we
// need to settle on that) but here we just enforce it on the `synced` side.
func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) {
if synced == nil || external == nil {
// FIXME: If `cs.heaviest` is nil we should just bypass the entire

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package store_test
import (

View File

@ -159,9 +159,12 @@ func FetchSignedMessagesByCids(
}
// Fetch `cids` from the block service, apply `cb` on each of them. Used
// by the fetch message functions above.
//
// by the fetch message functions above.
//
// We check that each block is received only once and we do not received
// blocks we did not request.
//
// blocks we did not request.
func fetchCids(
ctx context.Context,
bserv bserv.BlockGetter,

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package sub
import (

View File

@ -60,16 +60,16 @@ var (
// Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others:
//
// * Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager.
// * Applies the fork choice rule to select the correct side when confronted
// with a fork in the network.
// * Requests block headers and messages from other peers when not available
// in our BlockStore.
// * Tracks blocks marked as bad in a cache.
// * Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been
// committed.
// - Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager.
// - Applies the fork choice rule to select the correct side when confronted
// with a fork in the network.
// - Requests block headers and messages from other peers when not available
// in our BlockStore.
// - Tracks blocks marked as bad in a cache.
// - Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been
// committed.
//
// The Syncer does not run workers itself. It's mainly concerned with
// ensuring a consistent state of chain consensus. The reactive and network-
@ -671,9 +671,9 @@ func extractSyncState(ctx context.Context) *SyncerState {
// 2. Check the consistency of beacon entries in the from tipset. We check
// total equality of the BeaconEntries in each block.
// 3. Traverse the chain backwards, for each tipset:
// 3a. Load it from the chainstore; if found, it move on to its parent.
// 3b. Query our peers via client in batches, requesting up to a
// maximum of 500 tipsets every time.
// 3a. Load it from the chainstore; if found, it move on to its parent.
// 3b. Query our peers via client in batches, requesting up to a
// maximum of 500 tipsets every time.
//
// Once we've concluded, if we find a mismatching tipset at the height where the
// anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork
@ -1171,7 +1171,7 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co
// else we must drop part of our chain to connect to the peer's head
// (referred to as "forking").
//
// 2. StagePersistHeaders: now that we've collected the missing headers,
// 2. StagePersistHeaders: now that we've collected the missing headers,
// augmented by those on the other side of a fork, we persist them to the
// BlockStore.
//

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package chain
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package chain_test
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package types
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package types
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package types
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package types
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package types
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package types
import (

View File

@ -99,11 +99,11 @@ func tipsetSortFunc(blks []*BlockHeader) func(i, j int) bool {
}
// Checks:
// * A tipset is composed of at least one block. (Because of our variable
// number of blocks per tipset, determined by randomness, we do not impose
// an upper limit.)
// * All blocks have the same height.
// * All blocks have the same parents (same number of them and matching CIDs).
// - A tipset is composed of at least one block. (Because of our variable
// number of blocks per tipset, determined by randomness, we do not impose
// an upper limit.)
// - All blocks have the same height.
// - All blocks have the same parents (same number of them and matching CIDs).
func NewTipSet(blks []*BlockHeader) (*TipSet, error) {
if len(blks) == 0 {
return nil, xerrors.Errorf("NewTipSet called with zero length array of blocks")

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package types
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package types
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package chain
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package vectors
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package vm
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package vm
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package vm
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package vm
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package wallet
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package wallet
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package cli
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package cli
import (

View File

@ -1,5 +1,5 @@
//stm: ignore
//stm: #unit
// stm: ignore
// stm: #unit
package cli
import (

View File

@ -1,5 +1,5 @@
//stm: ignore
//stm: #unit
// stm: ignore
// stm: #unit
package cli
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package cli
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package cli
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package main
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package main
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package main
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package main
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package main
import (

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package stages
import (

View File

@ -55,10 +55,10 @@ func (fs *FundingStage) Fund(bb *blockbuilder.BlockBuilder, target address.Addre
// sendAndFund "packs" the given message, funding the actor if necessary. It:
//
// 1. Tries to send the given message.
// 2. If that fails, it checks to see if the exit code was ErrInsufficientFunds.
// 3. If so, it sends 1K FIL from the "burnt funds actor" (because we need to send it from
// somewhere) and re-tries the message.0
// 1. Tries to send the given message.
// 2. If that fails, it checks to see if the exit code was ErrInsufficientFunds.
// 3. If so, it sends 1K FIL from the "burnt funds actor" (because we need to send it from
// somewhere) and re-tries the message.0
func (fs *FundingStage) SendAndFund(bb *blockbuilder.BlockBuilder, msg *types.Message) (res *types.MessageReceipt, err error) {
for i := 0; i < 10; i++ {
res, err = bb.PushMessage(msg)

View File

@ -28,10 +28,10 @@ func (sim *Simulation) Step(ctx context.Context) (*types.TipSet, error) {
// popNextMessages generates/picks a set of messages to be included in the next block.
//
// - This function is destructive and should only be called once per epoch.
// - This function does not store anything in the repo.
// - This function handles all gas estimation. The returned messages should all fit in a single
// block.
// - This function is destructive and should only be called once per epoch.
// - This function does not store anything in the repo.
// - This function handles all gas estimation. The returned messages should all fit in a single
// block.
func (sim *Simulation) popNextMessages(ctx context.Context) ([]*types.Message, error) {
parentTs := sim.head

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package main
import (

View File

@ -146,10 +146,10 @@ type CallerValidationArgs struct {
// CallerValidation violates VM call validation constraints.
//
// CallerValidationBranchNone performs no validation.
// CallerValidationBranchTwice validates twice.
// CallerValidationBranchIsAddress validates caller against CallerValidationArgs.Addrs.
// CallerValidationBranchIsType validates caller against CallerValidationArgs.Types.
// CallerValidationBranchNone performs no validation.
// CallerValidationBranchTwice validates twice.
// CallerValidationBranchIsAddress validates caller against CallerValidationArgs.Addrs.
// CallerValidationBranchIsType validates caller against CallerValidationArgs.Types.
func (a Actor) CallerValidation(rt runtime2.Runtime, args *CallerValidationArgs) *abi.EmptyValue {
switch args.Branch {
case CallerValidationBranchNone:

View File

@ -1,4 +1,4 @@
//stm: #chaos
// stm: #chaos
package chaos
import (

View File

@ -1,4 +1,4 @@
//stm: ignore
// stm: ignore
// This file does not test any behaviors by itself; rather, it runs other test files
// Therefore, this file should not be annotated.
package conformance

View File

@ -276,7 +276,8 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP
// messages that originate from secp256k senders, leaving all
// others untouched.
// TODO: generate a signature in the DSL so that it's encoded in
// the test vector.
//
// the test vector.
func toChainMsg(msg *types.Message) (ret types.ChainMsg) {
ret = msg
if msg.From.Protocol() == address.SECP256K1 {

View File

@ -4397,26 +4397,26 @@ Response:
},
"seal/v0/datacid": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048,
"BaseMinMemory": 1073741824,
"MaxConcurrent": 0
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608,
"BaseMinMemory": 1073741824,
"MaxConcurrent": 0
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
@ -4433,8 +4433,8 @@ Response:
"MaxConcurrent": 0
},
"4": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
@ -4442,26 +4442,26 @@ Response:
"MaxConcurrent": 0
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048,
"BaseMinMemory": 1073741824,
"MaxConcurrent": 0
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608,
"BaseMinMemory": 1073741824,
"MaxConcurrent": 0
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
@ -4478,8 +4478,8 @@ Response:
"MaxConcurrent": 0
},
"9": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,

View File

@ -579,26 +579,26 @@ Response:
},
"seal/v0/datacid": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048,
"BaseMinMemory": 1073741824,
"MaxConcurrent": 0
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608,
"BaseMinMemory": 1073741824,
"MaxConcurrent": 0
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
@ -615,8 +615,8 @@ Response:
"MaxConcurrent": 0
},
"4": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
@ -624,26 +624,26 @@ Response:
"MaxConcurrent": 0
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048,
"BaseMinMemory": 1073741824,
"MaxConcurrent": 0
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608,
"BaseMinMemory": 1073741824,
"MaxConcurrent": 0
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
@ -660,8 +660,8 @@ Response:
"MaxConcurrent": 0
},
"9": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,

View File

@ -1,4 +1,4 @@
//stm: #unit
// stm: #unit
package gateway
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

View File

@ -1,4 +1,4 @@
//stm: #integration
// stm: #integration
package itests
import (

Some files were not shown because too many files have changed in this diff Show More