2021-06-18 18:45:29 +00:00
|
|
|
package kit
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"crypto/rand"
|
2022-07-15 12:03:36 +00:00
|
|
|
"encoding/binary"
|
2021-06-18 16:40:33 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2022-01-14 13:11:04 +00:00
|
|
|
"net/http"
|
2023-03-29 19:24:07 +00:00
|
|
|
"os"
|
2021-05-25 23:04:13 +00:00
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
"github.com/google/uuid"
|
2022-01-18 10:37:15 +00:00
|
|
|
"github.com/ipfs/go-datastore"
|
|
|
|
"github.com/ipfs/go-datastore/namespace"
|
2022-08-25 18:20:41 +00:00
|
|
|
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
2022-01-18 10:37:15 +00:00
|
|
|
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
|
|
|
"github.com/stretchr/testify/require"
|
2023-12-11 23:47:29 +00:00
|
|
|
"github.com/urfave/cli/v2"
|
2022-01-18 10:37:15 +00:00
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2022-07-15 14:09:13 +00:00
|
|
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
|
|
|
"github.com/filecoin-project/go-state-types/big"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/builtin"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/exitcode"
|
2021-06-16 17:11:34 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/network"
|
2022-01-18 10:37:15 +00:00
|
|
|
"github.com/filecoin-project/go-statestore"
|
2022-06-14 15:00:51 +00:00
|
|
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
|
|
|
power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power"
|
2021-08-20 14:53:24 +00:00
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2022-08-16 19:56:32 +00:00
|
|
|
"github.com/filecoin-project/lotus/api/v1api"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
|
|
|
"github.com/filecoin-project/lotus/chain"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
|
|
|
"github.com/filecoin-project/lotus/chain/gen"
|
|
|
|
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
|
|
|
"github.com/filecoin-project/lotus/chain/messagepool"
|
2021-08-06 00:46:05 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2022-06-14 17:21:34 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/wallet/key"
|
2024-03-15 21:38:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/cmd/curio/deps"
|
|
|
|
"github.com/filecoin-project/lotus/cmd/curio/rpc"
|
|
|
|
"github.com/filecoin-project/lotus/cmd/curio/tasks"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
2022-03-24 22:37:01 +00:00
|
|
|
"github.com/filecoin-project/lotus/cmd/lotus-worker/sealworker"
|
2023-01-26 14:20:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/gateway"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/genesis"
|
2023-07-14 23:05:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
2022-02-03 14:44:18 +00:00
|
|
|
"github.com/filecoin-project/lotus/markets/idxprov"
|
2022-06-15 10:06:22 +00:00
|
|
|
"github.com/filecoin-project/lotus/markets/idxprov/idxprov_test"
|
2021-05-25 23:04:13 +00:00
|
|
|
lotusminer "github.com/filecoin-project/lotus/miner"
|
|
|
|
"github.com/filecoin-project/lotus/node"
|
2021-06-18 16:40:33 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/config"
|
2023-07-14 23:05:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/impl"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/modules"
|
|
|
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
|
|
|
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
|
|
|
|
"github.com/filecoin-project/lotus/node/repo"
|
2022-06-14 18:25:52 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/paths"
|
2022-07-15 14:09:13 +00:00
|
|
|
pipeline "github.com/filecoin-project/lotus/storage/pipeline"
|
2022-06-14 18:03:38 +00:00
|
|
|
sectorstorage "github.com/filecoin-project/lotus/storage/sealer"
|
2022-06-15 10:06:22 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/mock"
|
2022-06-17 11:52:19 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
2021-05-25 23:04:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
chain.BootstrapPeerThreshold = 1
|
|
|
|
messagepool.HeadChangeCoalesceMinDelay = time.Microsecond
|
|
|
|
messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond
|
|
|
|
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
|
|
|
|
}
|
|
|
|
|
2021-06-11 17:26:25 +00:00
|
|
|
// Ensemble is a collection of nodes instantiated within a test.
|
|
|
|
//
|
|
|
|
// Create a new ensemble with:
|
|
|
|
//
|
2022-08-29 14:25:30 +00:00
|
|
|
// ens := kit.NewEnsemble()
|
2021-06-11 17:26:25 +00:00
|
|
|
//
|
|
|
|
// Create full nodes and miners:
|
|
|
|
//
|
2022-08-29 14:25:30 +00:00
|
|
|
// var full TestFullNode
|
|
|
|
// var miner TestMiner
|
2022-10-17 20:13:30 +00:00
|
|
|
// ens.FullNode(&full, opts...) // populates a full node
|
2022-08-29 14:25:30 +00:00
|
|
|
// ens.Miner(&miner, &full, opts...) // populates a miner, using the full node as its chain daemon
|
2021-06-11 17:26:25 +00:00
|
|
|
//
|
|
|
|
// It is possible to pass functional options to set initial balances,
|
|
|
|
// presealed sectors, owner keys, etc.
|
|
|
|
//
|
|
|
|
// After the initial nodes are added, call `ens.Start()` to forge genesis
|
|
|
|
// and start the network. Mining will NOT be started automatically. It needs
|
|
|
|
// to be started explicitly by calling `BeginMining`.
|
|
|
|
//
|
|
|
|
// Nodes also need to be connected with one another, either via `ens.Connect()`
|
|
|
|
// or `ens.InterconnectAll()`. A common inchantation for simple tests is to do:
|
|
|
|
//
|
2022-08-29 14:25:30 +00:00
|
|
|
// ens.InterconnectAll().BeginMining(blocktime)
|
2021-06-11 17:26:25 +00:00
|
|
|
//
|
|
|
|
// You can continue to add more nodes, but you must always follow with
|
|
|
|
// `ens.Start()` to activate the new nodes.
|
|
|
|
//
|
|
|
|
// The API is chainable, so it's possible to do a lot in a very succinct way:
|
|
|
|
//
|
2022-10-17 20:13:30 +00:00
|
|
|
// kit.NewEnsemble().FullNode(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining()
|
2021-06-11 17:26:25 +00:00
|
|
|
//
|
|
|
|
// You can also find convenient fullnode:miner presets, such as 1:1, 1:2,
|
|
|
|
// and 2:1, e.g.:
|
|
|
|
//
|
2022-08-29 14:25:30 +00:00
|
|
|
// kit.EnsembleMinimal()
|
|
|
|
// kit.EnsembleOneTwo()
|
|
|
|
// kit.EnsembleTwoOne()
|
2021-05-25 23:04:13 +00:00
|
|
|
type Ensemble struct {
|
|
|
|
t *testing.T
|
|
|
|
bootstrapped bool
|
|
|
|
genesisBlock bytes.Buffer
|
|
|
|
mn mocknet.Mocknet
|
2021-06-11 17:26:25 +00:00
|
|
|
options *ensembleOpts
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
inactive struct {
|
2023-12-11 23:47:29 +00:00
|
|
|
fullnodes []*TestFullNode
|
2024-05-14 19:50:31 +00:00
|
|
|
providernodes []*TestCurioNode
|
2023-12-11 23:47:29 +00:00
|
|
|
miners []*TestMiner
|
|
|
|
workers []*TestWorker
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
active struct {
|
2023-12-11 23:47:29 +00:00
|
|
|
fullnodes []*TestFullNode
|
2024-05-14 19:50:31 +00:00
|
|
|
providernodes []*TestCurioNode
|
2023-12-11 23:47:29 +00:00
|
|
|
miners []*TestMiner
|
|
|
|
workers []*TestWorker
|
|
|
|
bms map[*TestMiner]*BlockMiner
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
genesis struct {
|
2021-08-06 00:46:05 +00:00
|
|
|
version network.Version
|
2021-05-25 23:04:13 +00:00
|
|
|
miners []genesis.Miner
|
|
|
|
accounts []genesis.Actor
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-14 17:58:12 +00:00
|
|
|
// NewEnsemble instantiates a new blank Ensemble.
|
2021-06-11 17:26:25 +00:00
|
|
|
func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
|
|
|
|
options := DefaultEnsembleOpts
|
2021-05-25 23:04:13 +00:00
|
|
|
for _, o := range opts {
|
|
|
|
err := o(&options)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2021-06-16 17:11:34 +00:00
|
|
|
|
|
|
|
n := &Ensemble{t: t, options: &options}
|
2021-06-22 17:15:38 +00:00
|
|
|
n.active.bms = make(map[*TestMiner]*BlockMiner)
|
2021-06-16 17:11:34 +00:00
|
|
|
|
2021-08-06 00:46:05 +00:00
|
|
|
for _, up := range options.upgradeSchedule {
|
|
|
|
if up.Height < 0 {
|
|
|
|
n.genesis.version = up.Network
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-16 17:11:34 +00:00
|
|
|
// add accounts from ensemble options to genesis.
|
|
|
|
for _, acc := range options.accounts {
|
|
|
|
n.genesis.accounts = append(n.genesis.accounts, genesis.Actor{
|
|
|
|
Type: genesis.TAccount,
|
|
|
|
Balance: acc.initialBalance,
|
|
|
|
Meta: (&genesis.AccountMeta{Owner: acc.key.Address}).ActorMeta(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
feat: refactor: actor bundling system (#8838)
1. Include the builtin-actors in the lotus source tree.
2. Embed the bundle on build instead of downloading at runtime.
3. Avoid reading the bundle whenever possible by including bundle
metadata (the bundle CID, the actor CIDs, etc.).
4. Remove everything related to dependency injection.
1. We're no longer downloading the bundle, so doing anything ahead
of time doesn't really help.
2. We register the manifests on init because, unfortunately, they're
global.
3. We explicitly load the current actors bundle in the genesis
state-tree method.
4. For testing, we just change the in-use bundle with a bit of a
hack. It's not great, but using dependency injection doesn't make
any sense either because, again, the manifest information is
global.
5. Remove the bundle.toml file. Bundles may be overridden by
specifying an override path in the parameters file, or an
environment variable.
fixes #8701
2022-06-13 17:15:00 +00:00
|
|
|
// Ensure we're using the right actors. This really shouldn't be some global thing, but it's
|
|
|
|
// the best we can do for now.
|
|
|
|
if n.options.mockProofs {
|
|
|
|
require.NoError(t, build.UseNetworkBundle("testing-fake-proofs"))
|
|
|
|
} else {
|
|
|
|
require.NoError(t, build.UseNetworkBundle("testing"))
|
|
|
|
}
|
|
|
|
|
2023-08-11 14:36:17 +00:00
|
|
|
build.EquivocationDelaySecs = 0
|
|
|
|
|
2021-06-16 17:11:34 +00:00
|
|
|
return n
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2022-02-18 21:09:56 +00:00
|
|
|
// Mocknet returns the underlying mocknet.
|
|
|
|
func (n *Ensemble) Mocknet() mocknet.Mocknet {
|
|
|
|
return n.mn
|
|
|
|
}
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
func (n *Ensemble) NewPrivKey() (libp2pcrypto.PrivKey, peer.ID) {
|
|
|
|
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
peerId, err := peer.IDFromPrivateKey(privkey)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
return privkey, peerId
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// FullNode enrolls a new full node.
|
|
|
|
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
|
|
|
options := DefaultNodeOpts
|
|
|
|
for _, o := range opts {
|
|
|
|
err := o(&options)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
2022-06-14 17:21:34 +00:00
|
|
|
key, err := key.GenerateKey(types.KTBLS)
|
2021-06-18 18:23:32 +00:00
|
|
|
require.NoError(n.t, err)
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2021-06-18 18:23:32 +00:00
|
|
|
if !n.bootstrapped && !options.balance.IsZero() {
|
|
|
|
// if we still haven't forged genesis, create a key+address, and assign
|
|
|
|
// it some FIL; this will be set as the default wallet when the node is
|
|
|
|
// started.
|
2021-05-25 23:04:13 +00:00
|
|
|
genacc := genesis.Actor{
|
|
|
|
Type: genesis.TAccount,
|
|
|
|
Balance: options.balance,
|
|
|
|
Meta: (&genesis.AccountMeta{Owner: key.Address}).ActorMeta(),
|
|
|
|
}
|
|
|
|
|
|
|
|
n.genesis.accounts = append(n.genesis.accounts, genacc)
|
|
|
|
}
|
|
|
|
|
2023-01-26 14:20:49 +00:00
|
|
|
*full = TestFullNode{t: n.t, options: options, DefaultKey: key, EthSubRouter: gateway.NewEthSubHandler()}
|
2022-09-08 18:20:05 +00:00
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
n.inactive.fullnodes = append(n.inactive.fullnodes, full)
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2024-05-14 19:50:31 +00:00
|
|
|
// FullNode enrolls a new Curio node.
|
|
|
|
func (n *Ensemble) Curio(cu *TestCurioNode, opts ...NodeOpt) *Ensemble {
|
2023-12-11 23:47:29 +00:00
|
|
|
options := DefaultNodeOpts
|
|
|
|
for _, o := range opts {
|
|
|
|
err := o(&options)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
2024-05-14 19:50:31 +00:00
|
|
|
*cu = TestCurioNode{t: n.t, options: options, Deps: &deps.Deps{}}
|
2023-12-11 23:47:29 +00:00
|
|
|
|
2024-05-14 19:50:31 +00:00
|
|
|
n.inactive.providernodes = append(n.inactive.providernodes, cu)
|
2023-12-11 23:47:29 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Miner enrolls a new miner, using the provided full node for chain
|
|
|
|
// interactions.
|
2022-09-08 18:20:05 +00:00
|
|
|
func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NotNil(n.t, full, "full node required when instantiating miner")
|
|
|
|
|
|
|
|
options := DefaultNodeOpts
|
|
|
|
for _, o := range opts {
|
|
|
|
err := o(&options)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
peerId, err := peer.IDFromPrivateKey(privkey)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2023-03-29 19:24:07 +00:00
|
|
|
tdir, err := os.MkdirTemp("", "preseal-memgen")
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
minerCnt := len(n.inactive.miners) + len(n.active.miners)
|
|
|
|
|
|
|
|
actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt))
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
if options.mainMiner != nil {
|
|
|
|
actorAddr = options.mainMiner.ActorAddr
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
ownerKey := options.ownerKey
|
2022-08-02 14:05:29 +00:00
|
|
|
var presealSectors int
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
if !n.bootstrapped {
|
2022-08-02 14:05:29 +00:00
|
|
|
presealSectors = options.sectors
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
var (
|
2022-08-02 14:05:29 +00:00
|
|
|
k *types.KeyInfo
|
|
|
|
genm *genesis.Miner
|
2021-05-25 23:04:13 +00:00
|
|
|
)
|
|
|
|
|
2021-08-20 14:53:24 +00:00
|
|
|
// Will use 2KiB sectors by default (default value of sectorSize).
|
2023-05-26 20:32:57 +00:00
|
|
|
proofType, err := miner.SealProofTypeFromSectorSize(options.sectorSize, n.genesis.version, false)
|
2021-08-06 00:46:05 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-08-20 14:53:24 +00:00
|
|
|
// Create the preseal commitment.
|
2021-06-11 17:26:25 +00:00
|
|
|
if n.options.mockProofs {
|
2022-08-02 14:05:29 +00:00
|
|
|
genm, k, err = mock.PreSeal(proofType, actorAddr, presealSectors)
|
2021-05-25 23:04:13 +00:00
|
|
|
} else {
|
2022-08-02 14:05:29 +00:00
|
|
|
genm, k, err = seed.PreSeal(actorAddr, proofType, 0, presealSectors, tdir, []byte("make genesis mem random"), nil, true)
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
genm.PeerId = peerId
|
|
|
|
|
|
|
|
// create an owner key, and assign it some FIL.
|
2022-06-14 17:21:34 +00:00
|
|
|
ownerKey, err = key.NewKey(*k)
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
genacc := genesis.Actor{
|
|
|
|
Type: genesis.TAccount,
|
|
|
|
Balance: options.balance,
|
|
|
|
Meta: (&genesis.AccountMeta{Owner: ownerKey.Address}).ActorMeta(),
|
|
|
|
}
|
|
|
|
|
|
|
|
n.genesis.miners = append(n.genesis.miners, *genm)
|
|
|
|
n.genesis.accounts = append(n.genesis.accounts, genacc)
|
|
|
|
} else {
|
|
|
|
require.NotNil(n.t, ownerKey, "worker key can't be null if initializing a miner after genesis")
|
|
|
|
}
|
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
rl, err := net.Listen("tcp", "127.0.0.1:")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-08-06 00:46:05 +00:00
|
|
|
*minerNode = TestMiner{
|
2021-06-18 16:40:33 +00:00
|
|
|
t: n.t,
|
|
|
|
ActorAddr: actorAddr,
|
|
|
|
OwnerKey: ownerKey,
|
|
|
|
FullNode: full,
|
|
|
|
PresealDir: tdir,
|
2022-08-02 14:05:29 +00:00
|
|
|
PresealSectors: presealSectors,
|
2021-06-18 16:40:33 +00:00
|
|
|
options: options,
|
|
|
|
RemoteListener: rl,
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2021-08-06 00:46:05 +00:00
|
|
|
minerNode.Libp2p.PeerID = peerId
|
|
|
|
minerNode.Libp2p.PrivKey = privkey
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
func (n *Ensemble) AddInactiveMiner(m *TestMiner) {
|
|
|
|
n.inactive.miners = append(n.inactive.miners, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
|
|
|
n.MinerEnroll(minerNode, full, opts...)
|
|
|
|
n.AddInactiveMiner(minerNode)
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
// Worker enrolls a new worker, using the provided full node for chain
|
|
|
|
// interactions.
|
|
|
|
func (n *Ensemble) Worker(minerNode *TestMiner, worker *TestWorker, opts ...NodeOpt) *Ensemble {
|
|
|
|
require.NotNil(n.t, minerNode, "miner node required when instantiating worker")
|
|
|
|
|
|
|
|
options := DefaultNodeOpts
|
|
|
|
for _, o := range opts {
|
|
|
|
err := o(&options)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
rl, err := net.Listen("tcp", "127.0.0.1:")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
*worker = TestWorker{
|
|
|
|
t: n.t,
|
|
|
|
MinerNode: minerNode,
|
|
|
|
RemoteListener: rl,
|
|
|
|
options: options,
|
2023-03-06 13:30:22 +00:00
|
|
|
|
|
|
|
Stop: func(ctx context.Context) error { return nil },
|
2022-01-14 13:11:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
n.inactive.workers = append(n.inactive.workers, worker)
|
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Start starts all enrolled nodes.
|
|
|
|
func (n *Ensemble) Start() *Ensemble {
|
2021-06-22 15:35:58 +00:00
|
|
|
ctx := context.Background()
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
var gtempl *genesis.Template
|
|
|
|
if !n.bootstrapped {
|
|
|
|
// We haven't been bootstrapped yet, we need to generate genesis and
|
|
|
|
// create the networking backbone.
|
|
|
|
gtempl = n.generateGenesis()
|
2022-01-18 15:07:56 +00:00
|
|
|
n.mn = mocknet.New()
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2023-07-14 23:05:49 +00:00
|
|
|
sharedITestID := harmonydb.ITestNewID()
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// ---------------------
|
|
|
|
// FULL NODES
|
|
|
|
// ---------------------
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Create all inactive full nodes.
|
|
|
|
for i, full := range n.inactive.fullnodes {
|
2022-08-05 20:34:16 +00:00
|
|
|
|
|
|
|
var r repo.Repo
|
|
|
|
if !full.options.fsrepo {
|
|
|
|
rmem := repo.NewMemory(nil)
|
|
|
|
n.t.Cleanup(rmem.Cleanup)
|
|
|
|
r = rmem
|
|
|
|
} else {
|
|
|
|
repoPath := n.t.TempDir()
|
|
|
|
rfs, err := repo.NewFS(repoPath)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
require.NoError(n.t, rfs.Init(repo.FullNode))
|
|
|
|
r = rfs
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup config with options
|
|
|
|
lr, err := r.Lock(repo.FullNode)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
ks, err := lr.KeyStore()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
if full.Pkey != nil {
|
|
|
|
pk, err := libp2pcrypto.MarshalPrivateKey(full.Pkey.PrivKey)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
err = ks.Put("libp2p-host", types.KeyInfo{
|
|
|
|
Type: "libp2p-host",
|
|
|
|
PrivateKey: pk,
|
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-08-05 20:34:16 +00:00
|
|
|
c, err := lr.Config()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
cfg, ok := c.(*config.FullNode)
|
|
|
|
if !ok {
|
|
|
|
n.t.Fatalf("invalid config from repo, got: %T", c)
|
|
|
|
}
|
|
|
|
for _, opt := range full.options.cfgOpts {
|
|
|
|
require.NoError(n.t, opt(cfg))
|
|
|
|
}
|
|
|
|
err = lr.SetConfig(func(raw interface{}) {
|
|
|
|
rcfg := raw.(*config.FullNode)
|
|
|
|
*rcfg = *cfg
|
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
err = lr.Close()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
opts := []node.Option{
|
|
|
|
node.FullAPI(&full.FullNode, node.Lite(full.options.lite)),
|
2021-07-07 11:56:37 +00:00
|
|
|
node.Base(),
|
2021-06-24 14:02:51 +00:00
|
|
|
node.Repo(r),
|
2022-06-23 18:02:10 +00:00
|
|
|
node.If(full.options.disableLibp2p, node.MockHost(n.mn)),
|
2021-05-25 23:04:13 +00:00
|
|
|
node.Test(),
|
|
|
|
|
|
|
|
// so that we subscribe to pubsub topics immediately
|
|
|
|
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
|
2021-08-06 00:46:05 +00:00
|
|
|
|
|
|
|
// upgrades
|
|
|
|
node.Override(new(stmgr.UpgradeSchedule), n.options.upgradeSchedule),
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// append any node builder options.
|
|
|
|
opts = append(opts, full.options.extraNodeOpts...)
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Either generate the genesis or inject it.
|
|
|
|
if i == 0 && !n.bootstrapped {
|
|
|
|
opts = append(opts, node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&n.genesisBlock, *gtempl)))
|
|
|
|
} else {
|
|
|
|
opts = append(opts, node.Override(new(modules.Genesis), modules.LoadGenesis(n.genesisBlock.Bytes())))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Are we mocking proofs?
|
2021-06-11 17:26:25 +00:00
|
|
|
if n.options.mockProofs {
|
2021-06-10 12:25:36 +00:00
|
|
|
opts = append(opts,
|
2022-06-17 11:52:19 +00:00
|
|
|
node.Override(new(storiface.Verifier), mock.MockVerifier),
|
|
|
|
node.Override(new(storiface.Prover), mock.MockProver),
|
2021-06-10 12:25:36 +00:00
|
|
|
)
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2021-06-15 09:46:32 +00:00
|
|
|
// Call option builders, passing active nodes as the parameter
|
|
|
|
for _, bopt := range full.options.optBuilders {
|
|
|
|
opts = append(opts, bopt(n.active.fullnodes))
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Construct the full node.
|
|
|
|
stop, err := node.New(ctx, opts...)
|
2022-09-08 18:20:05 +00:00
|
|
|
full.Stop = stop
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
addr, err := full.WalletImport(context.Background(), &full.DefaultKey.KeyInfo)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
err = full.WalletSetDefault(context.Background(), addr)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-09-27 16:08:04 +00:00
|
|
|
var rpcShutdownOnce sync.Once
|
2022-11-14 16:19:45 +00:00
|
|
|
var stopOnce sync.Once
|
|
|
|
var stopErr error
|
|
|
|
|
|
|
|
stopFunc := stop
|
|
|
|
stop = func(ctx context.Context) error {
|
|
|
|
stopOnce.Do(func() {
|
|
|
|
stopErr = stopFunc(ctx)
|
|
|
|
})
|
|
|
|
return stopErr
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Are we hitting this node through its RPC?
|
|
|
|
if full.options.rpc {
|
2022-09-27 16:08:04 +00:00
|
|
|
withRPC, rpcCloser := fullRpc(n.t, full)
|
2021-05-25 23:04:13 +00:00
|
|
|
n.inactive.fullnodes[i] = withRPC
|
2022-09-27 16:08:04 +00:00
|
|
|
full.Stop = func(ctx2 context.Context) error {
|
|
|
|
rpcShutdownOnce.Do(rpcCloser)
|
|
|
|
return stop(ctx)
|
|
|
|
}
|
2022-11-14 16:19:45 +00:00
|
|
|
n.t.Cleanup(func() { rpcShutdownOnce.Do(rpcCloser) })
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2022-08-05 20:34:16 +00:00
|
|
|
n.t.Cleanup(func() {
|
|
|
|
_ = stop(context.Background())
|
|
|
|
})
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
n.active.fullnodes = append(n.active.fullnodes, full)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are here, we have processed all inactive fullnodes and moved them
|
|
|
|
// to active, so clear the slice.
|
|
|
|
n.inactive.fullnodes = n.inactive.fullnodes[:0]
|
|
|
|
|
|
|
|
// Link all the nodes.
|
|
|
|
err := n.mn.LinkAll()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// ---------------------
|
|
|
|
// MINERS
|
|
|
|
// ---------------------
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Create all inactive miners.
|
|
|
|
for i, m := range n.inactive.miners {
|
|
|
|
if n.bootstrapped {
|
2021-06-18 16:40:33 +00:00
|
|
|
if m.options.mainMiner == nil {
|
|
|
|
// this is a miner created after genesis, so it won't have a preseal.
|
|
|
|
// we need to create it on chain.
|
2021-08-20 14:53:24 +00:00
|
|
|
|
2023-04-19 22:40:18 +00:00
|
|
|
proofType, err := miner.WindowPoStProofTypeFromSectorSize(m.options.sectorSize, n.genesis.version)
|
2021-08-20 14:53:24 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-04-08 10:48:54 +00:00
|
|
|
params, aerr := actors.SerializeParams(&power3.CreateMinerParams{
|
|
|
|
Owner: m.OwnerKey.Address,
|
|
|
|
Worker: m.OwnerKey.Address,
|
|
|
|
WindowPoStProofType: proofType,
|
|
|
|
Peer: abi.PeerID(m.Libp2p.PeerID),
|
2021-06-18 16:40:33 +00:00
|
|
|
})
|
|
|
|
require.NoError(n.t, aerr)
|
|
|
|
|
|
|
|
createStorageMinerMsg := &types.Message{
|
|
|
|
From: m.OwnerKey.Address,
|
|
|
|
To: power.Address,
|
|
|
|
Value: big.Zero(),
|
|
|
|
|
|
|
|
Method: power.Methods.CreateMiner,
|
|
|
|
Params: params,
|
|
|
|
}
|
2022-09-08 18:20:05 +00:00
|
|
|
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, &api.MessageSendSpec{
|
|
|
|
MsgUuid: uuid.New(),
|
|
|
|
})
|
2021-06-18 16:40:33 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
|
|
|
|
|
2022-04-08 10:48:54 +00:00
|
|
|
var retval power3.CreateMinerReturn
|
2021-06-18 16:40:33 +00:00
|
|
|
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
|
|
|
|
require.NoError(n.t, err, "failed to create miner")
|
|
|
|
|
|
|
|
m.ActorAddr = retval.IDAddress
|
|
|
|
} else {
|
|
|
|
params, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
msg := &types.Message{
|
|
|
|
To: m.options.mainMiner.ActorAddr,
|
|
|
|
From: m.options.mainMiner.OwnerKey.Address,
|
2022-04-20 21:34:28 +00:00
|
|
|
Method: builtin.MethodsMiner.ChangePeerID,
|
2021-06-18 16:40:33 +00:00
|
|
|
Params: params,
|
|
|
|
Value: types.NewInt(0),
|
|
|
|
}
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
|
|
|
|
MsgUuid: uuid.New(),
|
|
|
|
})
|
2021-06-18 16:40:33 +00:00
|
|
|
require.NoError(n.t, err2)
|
|
|
|
|
|
|
|
mw, err2 := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
|
|
|
require.NoError(n.t, err2)
|
|
|
|
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
// Only import the owner's full key into our companion full node, if we
|
|
|
|
// don't have it still.
|
|
|
|
if !has {
|
|
|
|
_, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// // Set it as the default address.
|
2022-10-17 20:13:30 +00:00
|
|
|
// err = m.FullNode.WalletSetDefault(ctx, m.OwnerAddr.Address)
|
2021-05-25 23:04:13 +00:00
|
|
|
// require.NoError(n.t, err)
|
|
|
|
|
|
|
|
r := repo.NewMemory(nil)
|
2022-01-14 13:11:04 +00:00
|
|
|
n.t.Cleanup(r.Cleanup)
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
lr, err := r.Lock(repo.StorageMiner)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
c, err := lr.Config()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
cfg, ok := c.(*config.StorageMiner)
|
|
|
|
if !ok {
|
|
|
|
n.t.Fatalf("invalid config from repo, got: %T", c)
|
|
|
|
}
|
|
|
|
cfg.Common.API.RemoteListenAddress = m.RemoteListener.Addr().String()
|
2021-07-12 10:12:29 +00:00
|
|
|
cfg.Subsystems.EnableMarkets = m.options.subsystems.Has(SMarkets)
|
2021-06-18 16:40:33 +00:00
|
|
|
cfg.Subsystems.EnableMining = m.options.subsystems.Has(SMining)
|
|
|
|
cfg.Subsystems.EnableSealing = m.options.subsystems.Has(SSealing)
|
|
|
|
cfg.Subsystems.EnableSectorStorage = m.options.subsystems.Has(SSectorStorage)
|
2023-11-23 20:51:34 +00:00
|
|
|
cfg.Subsystems.EnableSectorIndexDB = m.options.subsystems.Has(SHarmony)
|
2021-09-06 15:39:35 +00:00
|
|
|
cfg.Dealmaking.MaxStagingDealsBytes = m.options.maxStagingDealsBytes
|
2021-06-18 16:40:33 +00:00
|
|
|
|
|
|
|
if m.options.mainMiner != nil {
|
2021-06-23 11:27:03 +00:00
|
|
|
token, err := m.options.mainMiner.FullNode.AuthNew(ctx, api.AllPermissions)
|
2021-06-18 16:40:33 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
cfg.Subsystems.SectorIndexApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
|
|
|
cfg.Subsystems.SealerApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lr.SetConfig(func(raw interface{}) {
|
|
|
|
rcfg := raw.(*config.StorageMiner)
|
|
|
|
*rcfg = *cfg
|
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
ks, err := lr.KeyStore()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-09-21 11:10:04 +00:00
|
|
|
pk, err := libp2pcrypto.MarshalPrivateKey(m.Libp2p.PrivKey)
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
err = ks.Put("libp2p-host", types.KeyInfo{
|
|
|
|
Type: "libp2p-host",
|
|
|
|
PrivateKey: pk,
|
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
ds, err := lr.Datastore(context.TODO(), "/metadata")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-12-14 14:06:59 +00:00
|
|
|
err = ds.Put(ctx, datastore.NewKey("miner-address"), m.ActorAddr.Bytes())
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-08-02 14:05:29 +00:00
|
|
|
if i < len(n.genesis.miners) && !n.bootstrapped {
|
|
|
|
// if this is a genesis miner, import preseal metadata
|
|
|
|
require.NoError(n.t, importPreSealMeta(ctx, n.genesis.miners[i], ds))
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
// using real proofs, therefore need real sectors.
|
|
|
|
if !n.bootstrapped && !n.options.mockProofs {
|
2022-01-18 11:11:59 +00:00
|
|
|
psd := m.PresealDir
|
2022-07-12 10:42:08 +00:00
|
|
|
noPaths := m.options.noStorage
|
2022-07-11 21:00:50 +00:00
|
|
|
|
2022-11-01 11:01:31 +00:00
|
|
|
err := lr.SetStorage(func(sc *storiface.StorageConfig) {
|
2022-07-11 21:00:50 +00:00
|
|
|
if noPaths {
|
2022-11-01 11:01:31 +00:00
|
|
|
sc.StoragePaths = []storiface.LocalPath{}
|
2022-07-11 21:00:50 +00:00
|
|
|
}
|
2022-11-01 11:01:31 +00:00
|
|
|
sc.StoragePaths = append(sc.StoragePaths, storiface.LocalPath{Path: psd})
|
2022-01-14 13:11:04 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
err = lr.Close()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
if m.options.mainMiner == nil {
|
|
|
|
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
|
|
|
|
require.NoError(n.t, err)
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
msg := &types.Message{
|
|
|
|
From: m.OwnerKey.Address,
|
|
|
|
To: m.ActorAddr,
|
2022-04-20 21:34:28 +00:00
|
|
|
Method: builtin.MethodsMiner.ChangePeerID,
|
2021-06-18 16:40:33 +00:00
|
|
|
Params: enc,
|
|
|
|
Value: types.NewInt(0),
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
|
|
|
|
MsgUuid: uuid.New(),
|
|
|
|
})
|
2021-06-18 16:40:33 +00:00
|
|
|
require.NoError(n.t, err2)
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2022-01-18 11:11:59 +00:00
|
|
|
noLocal := m.options.minerNoLocalSealing
|
2022-05-23 20:31:06 +00:00
|
|
|
assigner := m.options.minerAssigner
|
2022-05-23 23:13:30 +00:00
|
|
|
disallowRemoteFinalize := m.options.disallowRemoteFinalize
|
2022-01-18 11:11:59 +00:00
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
var mineBlock = make(chan lotusminer.MineReq)
|
2022-09-27 16:08:04 +00:00
|
|
|
|
2024-05-09 02:15:35 +00:00
|
|
|
minerCopy := *m.FullNode
|
|
|
|
minerCopy.FullNode = modules.MakeUuidWrapper(minerCopy.FullNode)
|
|
|
|
m.FullNode = &minerCopy
|
2022-09-27 16:08:04 +00:00
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
opts := []node.Option{
|
2021-07-07 11:56:37 +00:00
|
|
|
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
|
|
|
node.Base(),
|
2021-05-25 23:04:13 +00:00
|
|
|
node.Repo(r),
|
|
|
|
node.Test(),
|
|
|
|
|
2022-06-23 18:02:10 +00:00
|
|
|
node.If(m.options.disableLibp2p, node.MockHost(n.mn)),
|
2022-09-27 16:08:04 +00:00
|
|
|
node.Override(new(v1api.RawFullNodeAPI), m.FullNode),
|
2021-05-25 23:04:13 +00:00
|
|
|
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
2021-06-21 20:09:04 +00:00
|
|
|
|
|
|
|
// disable resource filtering so that local worker gets assigned tasks
|
|
|
|
// regardless of system pressure.
|
2022-11-06 16:38:25 +00:00
|
|
|
node.Override(new(config.SealerConfig), func() config.SealerConfig {
|
2021-06-22 13:22:08 +00:00
|
|
|
scfg := config.DefaultStorageMiner()
|
2022-01-14 13:11:04 +00:00
|
|
|
|
2022-01-18 11:11:59 +00:00
|
|
|
if noLocal {
|
2022-09-06 09:06:30 +00:00
|
|
|
scfg.Storage.AllowSectorDownload = false
|
2022-01-14 13:11:04 +00:00
|
|
|
scfg.Storage.AllowAddPiece = false
|
|
|
|
scfg.Storage.AllowPreCommit1 = false
|
|
|
|
scfg.Storage.AllowPreCommit2 = false
|
|
|
|
scfg.Storage.AllowCommit = false
|
2022-11-28 17:27:38 +00:00
|
|
|
scfg.Storage.AllowUnseal = false
|
2022-01-14 13:11:04 +00:00
|
|
|
}
|
|
|
|
|
2022-05-23 20:31:06 +00:00
|
|
|
scfg.Storage.Assigner = assigner
|
2022-05-23 23:13:30 +00:00
|
|
|
scfg.Storage.DisallowRemoteFinalize = disallowRemoteFinalize
|
2022-10-31 17:15:09 +00:00
|
|
|
scfg.Storage.ResourceFiltering = config.ResourceFilteringDisabled
|
2022-11-06 16:38:25 +00:00
|
|
|
return scfg.Storage
|
2021-06-22 13:22:08 +00:00
|
|
|
}),
|
2021-08-06 00:46:05 +00:00
|
|
|
|
|
|
|
// upgrades
|
|
|
|
node.Override(new(stmgr.UpgradeSchedule), n.options.upgradeSchedule),
|
2023-07-14 23:05:49 +00:00
|
|
|
|
|
|
|
node.Override(new(harmonydb.ITestID), sharedITestID),
|
|
|
|
node.Override(new(config.HarmonyDB), func() config.HarmonyDB {
|
|
|
|
return config.HarmonyDB{
|
|
|
|
Hosts: []string{envElse("LOTUS_HARMONYDB_HOSTS", "127.0.0.1")},
|
|
|
|
Database: "yugabyte",
|
|
|
|
Username: "yugabyte",
|
|
|
|
Password: "yugabyte",
|
|
|
|
Port: "5433",
|
|
|
|
}
|
|
|
|
}),
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2022-02-03 13:43:20 +00:00
|
|
|
if m.options.subsystems.Has(SMarkets) {
|
|
|
|
opts = append(opts,
|
2022-02-03 14:44:18 +00:00
|
|
|
node.Override(new(idxprov.MeshCreator), idxprov_test.NewNoopMeshCreator),
|
2022-02-03 13:43:20 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// append any node builder options.
|
|
|
|
opts = append(opts, m.options.extraNodeOpts...)
|
|
|
|
|
2021-06-22 13:22:08 +00:00
|
|
|
idAddr, err := address.IDFromAddress(m.ActorAddr)
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// preload preseals if the network still hasn't bootstrapped.
|
2021-06-22 13:22:08 +00:00
|
|
|
var presealSectors []abi.SectorID
|
|
|
|
if !n.bootstrapped {
|
|
|
|
sectors := n.genesis.miners[i].Sectors
|
|
|
|
for _, sector := range sectors {
|
|
|
|
presealSectors = append(presealSectors, abi.SectorID{
|
|
|
|
Miner: abi.ActorID(idAddr),
|
|
|
|
Number: sector.SectorID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2021-06-10 12:25:36 +00:00
|
|
|
|
2021-06-11 17:26:25 +00:00
|
|
|
if n.options.mockProofs {
|
2021-05-25 23:04:13 +00:00
|
|
|
opts = append(opts,
|
2022-06-14 19:23:17 +00:00
|
|
|
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
|
|
|
return mock.NewMockSectorMgr(presealSectors), nil
|
2021-06-22 13:22:08 +00:00
|
|
|
}),
|
2022-06-14 19:23:17 +00:00
|
|
|
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
|
|
|
|
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
|
|
|
|
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
|
2021-06-22 13:22:08 +00:00
|
|
|
|
2022-06-17 11:52:19 +00:00
|
|
|
node.Override(new(storiface.Verifier), mock.MockVerifier),
|
|
|
|
node.Override(new(storiface.Prover), mock.MockProver),
|
2021-06-22 13:22:08 +00:00
|
|
|
node.Unset(new(*sectorstorage.Manager)),
|
2021-05-25 23:04:13 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// start node
|
|
|
|
stop, err := node.New(ctx, opts...)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
n.t.Cleanup(func() { _ = stop(context.Background()) })
|
2023-07-14 23:05:49 +00:00
|
|
|
mCopy := m
|
|
|
|
n.t.Cleanup(func() {
|
2023-11-23 16:57:03 +00:00
|
|
|
if mCopy.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB != nil {
|
|
|
|
mCopy.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB.ITestDeleteAll()
|
|
|
|
}
|
2023-07-14 23:05:49 +00:00
|
|
|
})
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2022-01-21 12:33:47 +00:00
|
|
|
m.BaseAPI = m.StorageMiner
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Are we hitting this node through its RPC?
|
|
|
|
if m.options.rpc {
|
|
|
|
withRPC := minerRpc(n.t, m)
|
|
|
|
n.inactive.miners[i] = withRPC
|
|
|
|
}
|
|
|
|
|
|
|
|
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
|
|
|
|
select {
|
|
|
|
case mineBlock <- req:
|
|
|
|
return nil
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
m.MineOne = mineOne
|
|
|
|
m.Stop = stop
|
|
|
|
|
|
|
|
n.active.miners = append(n.active.miners, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are here, we have processed all inactive miners and moved them
|
|
|
|
// to active, so clear the slice.
|
|
|
|
n.inactive.miners = n.inactive.miners[:0]
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
// ---------------------
|
|
|
|
// WORKERS
|
|
|
|
// ---------------------
|
|
|
|
|
|
|
|
// Create all inactive workers.
|
|
|
|
for i, m := range n.inactive.workers {
|
|
|
|
r := repo.NewMemory(nil)
|
|
|
|
|
|
|
|
lr, err := r.Lock(repo.Worker)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-07-12 10:42:08 +00:00
|
|
|
if m.options.noStorage {
|
2022-11-01 11:01:31 +00:00
|
|
|
err := lr.SetStorage(func(sc *storiface.StorageConfig) {
|
|
|
|
sc.StoragePaths = []storiface.LocalPath{}
|
2022-07-12 10:42:08 +00:00
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
ds, err := lr.Datastore(context.Background(), "/metadata")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
addr := m.RemoteListener.Addr().String()
|
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
localStore, err := paths.NewLocal(ctx, lr, m.MinerNode, []string{"http://" + addr + "/remote"})
|
2022-01-14 13:11:04 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
auth := http.Header(nil)
|
|
|
|
|
2023-07-14 23:05:49 +00:00
|
|
|
// FUTURE: Use m.MinerNode.(BaseAPI).(impl.StorageMinerAPI).HarmonyDB to setup.
|
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
remote := paths.NewRemote(localStore, m.MinerNode, auth, 20, &paths.DefaultPartialFileHandler{})
|
2022-03-18 11:32:16 +00:00
|
|
|
store := m.options.workerStorageOpt(remote)
|
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
fh := &paths.FetchHandler{Local: localStore, PfHandler: &paths.DefaultPartialFileHandler{}}
|
2022-01-14 13:11:04 +00:00
|
|
|
m.FetchHandler = fh.ServeHTTP
|
|
|
|
|
|
|
|
wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix))
|
|
|
|
|
|
|
|
workerApi := &sealworker.Worker{
|
|
|
|
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
|
|
|
TaskTypes: m.options.workerTasks,
|
|
|
|
NoSwap: false,
|
2022-08-03 11:05:30 +00:00
|
|
|
Name: m.options.workerName,
|
2022-03-18 11:32:16 +00:00
|
|
|
}, store, localStore, m.MinerNode, m.MinerNode, wsts),
|
2022-01-14 13:11:04 +00:00
|
|
|
LocalStore: localStore,
|
|
|
|
Storage: lr,
|
|
|
|
}
|
|
|
|
|
|
|
|
m.Worker = workerApi
|
|
|
|
|
|
|
|
require.True(n.t, m.options.rpc)
|
|
|
|
|
|
|
|
withRPC := workerRpc(n.t, m)
|
|
|
|
n.inactive.workers[i] = withRPC
|
|
|
|
|
|
|
|
err = m.MinerNode.WorkerConnect(ctx, "http://"+addr+"/rpc/v0")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
n.active.workers = append(n.active.workers, m)
|
2023-07-14 23:05:49 +00:00
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we are here, we have processed all inactive workers and moved them
|
|
|
|
// to active, so clear the slice.
|
|
|
|
n.inactive.workers = n.inactive.workers[:0]
|
|
|
|
|
2023-12-11 23:47:29 +00:00
|
|
|
for _, p := range n.inactive.providernodes {
|
|
|
|
|
|
|
|
// TODO setup config with options
|
|
|
|
err := p.Deps.PopulateRemainingDeps(context.Background(), &cli.Context{}, false)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
shutdownChan := make(chan struct{})
|
|
|
|
taskEngine, err := tasks.StartTasks(ctx, p.Deps)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2024-03-30 16:40:32 +00:00
|
|
|
defer taskEngine.GracefullyTerminate()
|
2023-12-11 23:47:29 +00:00
|
|
|
|
|
|
|
err = rpc.ListenAndServe(ctx, p.Deps, shutdownChan) // Monitor for shutdown.
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
|
|
|
|
//node.ShutdownHandler{Component: "provider", StopFunc: stop},
|
|
|
|
|
|
|
|
<-finishCh
|
|
|
|
|
|
|
|
n.active.providernodes = append(n.active.providernodes, p)
|
|
|
|
}
|
2022-01-14 13:11:04 +00:00
|
|
|
// ---------------------
|
|
|
|
// MISC
|
|
|
|
// ---------------------
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Link all the nodes.
|
|
|
|
err = n.mn.LinkAll()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
if !n.bootstrapped && len(n.active.miners) > 0 {
|
2021-06-10 12:25:36 +00:00
|
|
|
// We have *just* bootstrapped, so mine 2 blocks to setup some CE stuff in some actors
|
2021-05-25 23:04:13 +00:00
|
|
|
var wait sync.Mutex
|
|
|
|
wait.Lock()
|
|
|
|
|
|
|
|
observer := n.active.fullnodes[0]
|
|
|
|
|
|
|
|
bm := NewBlockMiner(n.t, n.active.miners[0])
|
|
|
|
n.t.Cleanup(bm.Stop)
|
|
|
|
|
|
|
|
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
|
|
|
|
wait.Unlock()
|
|
|
|
})
|
|
|
|
wait.Lock()
|
|
|
|
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
|
|
|
|
wait.Unlock()
|
|
|
|
})
|
|
|
|
wait.Lock()
|
2022-09-08 18:20:05 +00:00
|
|
|
n.bootstrapped = true
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// InterconnectAll connects all miners and full nodes to one another.
|
2021-05-25 23:04:13 +00:00
|
|
|
func (n *Ensemble) InterconnectAll() *Ensemble {
|
2021-06-10 12:25:36 +00:00
|
|
|
// connect full nodes to miners.
|
|
|
|
for _, from := range n.active.fullnodes {
|
|
|
|
for _, to := range n.active.miners {
|
|
|
|
// []*TestMiner to []api.CommonAPI type coercion not possible
|
|
|
|
// so cannot use variadic form.
|
|
|
|
n.Connect(from, to)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// connect full nodes between each other, skipping ourselves.
|
2021-05-25 23:04:13 +00:00
|
|
|
last := len(n.active.fullnodes) - 1
|
|
|
|
for i, from := range n.active.fullnodes {
|
|
|
|
if i == last {
|
|
|
|
continue
|
|
|
|
}
|
2021-06-10 12:25:36 +00:00
|
|
|
for _, to := range n.active.fullnodes[i+1:] {
|
|
|
|
n.Connect(from, to)
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
// Connect connects one full node to the provided full nodes.
|
2021-06-29 12:07:00 +00:00
|
|
|
func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble {
|
2021-05-25 23:04:13 +00:00
|
|
|
addr, err := from.NetAddrsListen(context.Background())
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
for _, other := range to {
|
|
|
|
err = other.NetConnect(context.Background(), addr)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:11:19 +00:00
|
|
|
func (n *Ensemble) BeginMiningMustPost(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
// wait one second to make sure that nodes are connected and have handshaken.
|
|
|
|
// TODO make this deterministic by listening to identify events on the
|
|
|
|
// libp2p eventbus instead (or something else).
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
var bms []*BlockMiner
|
|
|
|
if len(miners) == 0 {
|
|
|
|
// no miners have been provided explicitly, instantiate block miners
|
|
|
|
// for all active miners that aren't still mining.
|
|
|
|
for _, m := range n.active.miners {
|
|
|
|
if _, ok := n.active.bms[m]; ok {
|
|
|
|
continue // skip, already have a block miner
|
|
|
|
}
|
|
|
|
miners = append(miners, m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(miners) > 1 {
|
|
|
|
n.t.Fatalf("Only one active miner for MustPost, but have %d", len(miners))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, m := range miners {
|
|
|
|
bm := NewBlockMiner(n.t, m)
|
|
|
|
bm.MineBlocksMustPost(ctx, blocktime)
|
|
|
|
n.t.Cleanup(bm.Stop)
|
|
|
|
|
|
|
|
bms = append(bms, bm)
|
|
|
|
|
|
|
|
n.active.bms[m] = bm
|
|
|
|
}
|
|
|
|
|
|
|
|
return bms
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// BeginMining kicks off mining for the specified miners. If nil or 0-length,
|
2021-06-10 12:25:36 +00:00
|
|
|
// it will kick off mining for all enrolled and active miners. It also adds a
|
|
|
|
// cleanup function to stop all mining operations on test teardown.
|
2021-05-25 23:04:13 +00:00
|
|
|
func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
// wait one second to make sure that nodes are connected and have handshaken.
|
|
|
|
// TODO make this deterministic by listening to identify events on the
|
|
|
|
// libp2p eventbus instead (or something else).
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
var bms []*BlockMiner
|
|
|
|
if len(miners) == 0 {
|
2021-06-22 17:15:38 +00:00
|
|
|
// no miners have been provided explicitly, instantiate block miners
|
|
|
|
// for all active miners that aren't still mining.
|
|
|
|
for _, m := range n.active.miners {
|
|
|
|
if _, ok := n.active.bms[m]; ok {
|
|
|
|
continue // skip, already have a block miner
|
|
|
|
}
|
|
|
|
miners = append(miners, m)
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, m := range miners {
|
|
|
|
bm := NewBlockMiner(n.t, m)
|
|
|
|
bm.MineBlocks(ctx, blocktime)
|
2021-06-10 12:25:36 +00:00
|
|
|
n.t.Cleanup(bm.Stop)
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
bms = append(bms, bm)
|
2021-06-22 17:15:38 +00:00
|
|
|
|
|
|
|
n.active.bms[m] = bm
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return bms
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *Ensemble) generateGenesis() *genesis.Template {
|
2021-06-16 17:11:34 +00:00
|
|
|
var verifRoot = gen.DefaultVerifregRootkeyActor
|
|
|
|
if k := n.options.verifiedRoot.key; k != nil {
|
|
|
|
verifRoot = genesis.Actor{
|
|
|
|
Type: genesis.TAccount,
|
|
|
|
Balance: n.options.verifiedRoot.initialBalance,
|
|
|
|
Meta: (&genesis.AccountMeta{Owner: k.Address}).ActorMeta(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
templ := &genesis.Template{
|
2021-08-06 00:46:05 +00:00
|
|
|
NetworkVersion: n.genesis.version,
|
2021-05-25 23:04:13 +00:00
|
|
|
Accounts: n.genesis.accounts,
|
|
|
|
Miners: n.genesis.miners,
|
|
|
|
NetworkName: "test",
|
2021-06-14 17:58:12 +00:00
|
|
|
Timestamp: uint64(time.Now().Unix() - int64(n.options.pastOffset.Seconds())),
|
2021-06-16 17:11:34 +00:00
|
|
|
VerifregRootKey: verifRoot,
|
2021-05-25 23:04:13 +00:00
|
|
|
RemainderAccount: gen.DefaultRemainderAccountActor,
|
|
|
|
}
|
|
|
|
|
|
|
|
return templ
|
|
|
|
}
|
2022-07-15 12:03:36 +00:00
|
|
|
|
|
|
|
func importPreSealMeta(ctx context.Context, meta genesis.Miner, mds dtypes.MetadataDS) error {
|
|
|
|
maxSectorID := abi.SectorNumber(0)
|
|
|
|
for _, sector := range meta.Sectors {
|
|
|
|
sectorKey := datastore.NewKey(pipeline.SectorStorePrefix).ChildString(fmt.Sprint(sector.SectorID))
|
|
|
|
|
|
|
|
commD := sector.CommD
|
|
|
|
commR := sector.CommR
|
|
|
|
|
|
|
|
info := &pipeline.SectorInfo{
|
|
|
|
State: pipeline.Proving,
|
|
|
|
SectorNumber: sector.SectorID,
|
chore: Merge nv22 into master (#11699)
* [WIP] feat: Add nv22 skeleton
Addition of Network Version 22 skeleton
* update FFI
* feat: drand: refactor round verification
* feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226)
* Initial work supporting DDO pieces in lotus-miner
* sealing: Update pipeline input to operate on UniversalPiece
* sealing: Update pipeline checks/sealing states to operate on UniversalPiece
* sealing: Make pipeline build with UniversalPiece
* move PieceDealInfo out of api
* make gen
* make sealing pipeline unit tests pass
* fix itest ensemble build
* don't panic in SectorsStatus with deals
* stop linter from complaining about checkPieces
* fix sector import tests
* mod tidy
* sealing: Add logic for (pre)committing DDO sectors
* sealing: state-types with method defs
* DDO non-snap pipeline works(?), DDO Itests
* DDO support in snapdeals pipeline
* make gen
* update actor bundles
* update the gst market fix
* fix: chain: use PreCommitSectorsBatch2 when setting up genesis
* some bug fixes
* integration working changes
* update actor bundles
* Make TestOnboardRawPieceSnap pass
* Appease the linter
* Make deadlines test pass with v12 actors
* Update go-state-types, abstract market DealState
* make gen
* mod tidy, lint fixes
* Fix some more tests
* Bump version in master
Bump version in master
* Make gen
Make gen
* fix sender
* fix: lotus-provider: Fix winning PoSt
* fix: sql Scan cannot write to an object
* Actually show miner-addrs in info-log
Actually show miner-addrs in lotus-provider info-log
* [WIP] feat: Add nv22 skeleton
Addition of Network Version 22 skeleton
* update FFI
* ddo is now nv22
* make gen
* temp actor bundle with ddo
* use working go-state-types
* gst with v13 market migration
* update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3
* actually working v13 migration, v13 migration itest
* Address review
* sealing: Correct DDO snap pledge math
* itests: Mixed ddo itest
* pipeline: Fix sectorWeight
* sealing: convert market deals into PAMs in mixed sectors
* sealing: make market to ddo conversion work
* fix lint
* update gst
* Update actors and GST to lastest integ branch
* commit batcher: Update ProveCommitSectors3Params builder logic
* make gen
* use builtin-actors master
* ddo: address review
* itests: Add commd assertions to ddo tests
* make gen
* gst with fixed types
* config knobs for RequireActivationSuccess
* storage: Drop obsolete flaky tasts
---------
Co-authored-by: Jennifer Wang <jiayingw703@gmail.com>
Co-authored-by: Aayush <arajasek94@gmail.com>
Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai>
Co-authored-by: Phi <orjan.roren@gmail.com>
Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com>
Co-authored-by: TippyFlits <james.bluett@protocol.ai>
* feat: implement FIP-0063
* chore: deps: update to go-multiaddr v0.12.2 (#11602)
* feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612)
This:
1. Adds nv22 support.
2. Updates the message tracing format.
Co-authored-by: Steven Allen <steven@stebalien.com>
* AggregateProofType nil when doing batch updates
Use latest nv22 go-state-types version with matching update
* Update to v13.0.0-rc.2 bundle
* chore: Upgrade heights and codename
Update upgrade heights
Co-Authored-By: Steven Allen <steven@stebalien.com>
* Update epoch after nv22 DRAND switch
Update epoch after nv22 DRAND switch
* Update Mango codename to Phoneix
Make the codename for the Drand-change inline with Dragon style.
* Add UpgradePhoenixHeight to API params
* set UpgradePhoenixHeight to be one hour after Dragon
* Make gen
Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet
* Update epoch heights (#11637)
Update epoch heights
* new: add forest bootstrap nodes (#11636)
Signed-off-by: samuelarogbonlo <sbayo971@gmail.com>
* Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes
Remove PL operated bootstrap nodes from mainnet.pi
* feat: api: new verified registry methods to get all allocations and claims (#11631)
* new verireg methods
* update changelog and add itest
* update itest and cli
* update new method's support till v9
* remove gateway APIs
* fix cli internal var names
* chore:: backport #11609 to the feat/nv22 branch (#11644)
* feat: api: improve the correctness of Eth's trace_block (#11609)
* Improve the correctness of Eth's trace_block
- Improve encoding/decoding of parameters and return values:
- Encode "native" parameters and return values with Solidity ABI.
- Correctly decode parameters to "create" calls.
- Use the correct (ish) output for "create" calls.
- Handle all forms of "create".
- Make robust with respect to reverts:
- Use the actor ID/address from the trace instead of looking it up in
the state-tree (may not exist in the state-tree due to a revert).
- Gracefully handle failed actor/contract creation.
- Improve performance:
- We avoid looking anything up in the state-tree when translating the
trace, which should significantly improve performance.
- Improve code readability:
- Remove all "backtracking" logic.
- Use an "environment" struct to store temporary state instead of
attaching it to the trace.
- Fix random bugs:
- Fix an allocation bug in the "address" logic (need to set the
capacity before modifying the slice).
- Improved error checking/handling.
- Use correct types for `trace_block` action/results (create, call, etc.).
- And use the correct types for Result/Action structs instead of reusing the same "Call" action every time.
- Improve error messages.
* Make gen
Make gen
---------
Co-authored-by: Steven Allen <steven@stebalien.com>
* fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648)
* chore: deps: update to go-state-types v13.0.0-rc.1
* do NOT update the cache when running the real migration
* Merge pull request #11632 from hanabi1224/hm/drand-test
feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet
* chore: deps: update to go-state-types v13.0.0-rc.2
chore: deps: update to go-state-types v13.0.0-rc.2
* feat: set migration config UpgradeEpoch for v13 actors upgrade
* Built-in actor events first draft
* itest for DDO non-market verified data w/ builtin actor events
* Tests for builtin actor events API
* Clean up DDO+Events tests, add lots of explainer comments
* Minor tweaks to events types
* Avoid duplicate messages when looking for receipts
* Rename internal events modules for clarity
* Adjust actor event API after review
* s/ActorEvents/Events/g in global config
* Manage event sending rate for SubscribeActorEvents
* Terminate SubscribeActorEvents chan when at max height
* Document future API changes
* More clarity in actor event API docs
* More post-review changes, lots of tests for SubscribeActorEvents
Use BlockDelay as the window for receiving events on the SubscribeActorEvents
channel. We expect the user to have received the initial batch of historical
events (if any) in one block's time. For real-time events we expect them to
not fall behind by roughly one block's time.
* Remove duplicate code from actor event type marshalling tests
Reduce verbosity and remove duplicate test logic from actor event types
JSON marshalling tests.
* Rename actor events test to follow go convention
Add missing `s` to `actor_events` test file to follow golang convention
used across the repo.
* Run actor events table tests in deterministic order
Refactor `map` usage for actor event table tests to ensure deterministic
test execution order, making debugging potential issues easier. If
non-determinism is a target, leverage Go's built-in parallel testing
capabilities.
* Reduce scope for filter removal failure when getting actor events
Use a fresh context to remove the temporary filter installed solely to
get the actor events. This should reduce chances of failure in a case
where the original context may be expired/cancelled.
Refactor removal into a `defer` statement for a more readable, concise
return statement.
* Use fixed RNG seed for actor event tests
Improve determinism in actor event tests by using a fixed RNG seed. This
makes up a more reproducible test suit.
* Use provided libraries to assert eventual conditions
Use the functionalities already provided by `testify` to assert eventual
conditions, and remove the use of `time.Sleep`.
Remove duplicate code in utility functions that are already defined.
Refactor assertion helper functions to use consistent terminology:
"require" implies fatal error, whereas "assert" implies error where the
test may proceed executing.
* Update changelog for actor events APIs
* Fix concerns and docs identified by review
* Update actor bundle to v13.0.0-rc3
Update actor bundle to v13.0.0-rc3
* Prep Lotus v1.26.0-rc1
- For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release
-Update Calibnet CIDs to v13.0.0-rc3
- Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog
Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
* Update CHANGELOG.md
Co-authored-by: Masih H. Derkani <m@derkani.org>
* Make gen
Make gen
* fix: beacon: validate drand change at nv16 correctly
* bump to v1.26.0-rc2
* test: cleanup ddo verified itest, extract steps to functions
also add allocation-removed event case
* test: extract verified DDO test to separate file, add more checks
* test: add additional actor events checks
* Add verification for "deal-activated" actor event
* docs(drand): document the meaning of "IsChained" (#11692)
* Resolve conflicts
I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them:
- go mod tidy
- Remove RaftState/RaftLeader
- Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired`
* fixup imports, make jen
* Update version
Update version in master to v1.27.0-dev
* Update node/impl/full/dummy.go
Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com>
* Adjust ListClaimsCmd
Adjust ListClaimsCmd according to review
---------
Signed-off-by: samuelarogbonlo <sbayo971@gmail.com>
Co-authored-by: TippyFlits <james.bluett@protocol.ai>
Co-authored-by: Aayush <arajasek94@gmail.com>
Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com>
Co-authored-by: Jennifer Wang <jiayingw703@gmail.com>
Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai>
Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com>
Co-authored-by: Steven Allen <steven@stebalien.com>
Co-authored-by: Rod Vagg <rod@vagg.org>
Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com>
Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com>
Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com>
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Masih H. Derkani <m@derkani.org>
Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
|
|
|
Pieces: []pipeline.SafeSectorPiece{
|
|
|
|
pipeline.SafePiece(api.SectorPiece{
|
2022-07-15 12:03:36 +00:00
|
|
|
Piece: abi.PieceInfo{
|
|
|
|
Size: abi.PaddedPieceSize(meta.SectorSize),
|
|
|
|
PieceCID: commD,
|
|
|
|
},
|
|
|
|
DealInfo: nil, // todo: likely possible to get, but not really that useful
|
chore: Merge nv22 into master (#11699)
* [WIP] feat: Add nv22 skeleton
Addition of Network Version 22 skeleton
* update FFI
* feat: drand: refactor round verification
* feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226)
* Initial work supporting DDO pieces in lotus-miner
* sealing: Update pipeline input to operate on UniversalPiece
* sealing: Update pipeline checks/sealing states to operate on UniversalPiece
* sealing: Make pipeline build with UniversalPiece
* move PieceDealInfo out of api
* make gen
* make sealing pipeline unit tests pass
* fix itest ensemble build
* don't panic in SectorsStatus with deals
* stop linter from complaining about checkPieces
* fix sector import tests
* mod tidy
* sealing: Add logic for (pre)committing DDO sectors
* sealing: state-types with method defs
* DDO non-snap pipeline works(?), DDO Itests
* DDO support in snapdeals pipeline
* make gen
* update actor bundles
* update the gst market fix
* fix: chain: use PreCommitSectorsBatch2 when setting up genesis
* some bug fixes
* integration working changes
* update actor bundles
* Make TestOnboardRawPieceSnap pass
* Appease the linter
* Make deadlines test pass with v12 actors
* Update go-state-types, abstract market DealState
* make gen
* mod tidy, lint fixes
* Fix some more tests
* Bump version in master
Bump version in master
* Make gen
Make gen
* fix sender
* fix: lotus-provider: Fix winning PoSt
* fix: sql Scan cannot write to an object
* Actually show miner-addrs in info-log
Actually show miner-addrs in lotus-provider info-log
* [WIP] feat: Add nv22 skeleton
Addition of Network Version 22 skeleton
* update FFI
* ddo is now nv22
* make gen
* temp actor bundle with ddo
* use working go-state-types
* gst with v13 market migration
* update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3
* actually working v13 migration, v13 migration itest
* Address review
* sealing: Correct DDO snap pledge math
* itests: Mixed ddo itest
* pipeline: Fix sectorWeight
* sealing: convert market deals into PAMs in mixed sectors
* sealing: make market to ddo conversion work
* fix lint
* update gst
* Update actors and GST to lastest integ branch
* commit batcher: Update ProveCommitSectors3Params builder logic
* make gen
* use builtin-actors master
* ddo: address review
* itests: Add commd assertions to ddo tests
* make gen
* gst with fixed types
* config knobs for RequireActivationSuccess
* storage: Drop obsolete flaky tasts
---------
Co-authored-by: Jennifer Wang <jiayingw703@gmail.com>
Co-authored-by: Aayush <arajasek94@gmail.com>
Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai>
Co-authored-by: Phi <orjan.roren@gmail.com>
Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com>
Co-authored-by: TippyFlits <james.bluett@protocol.ai>
* feat: implement FIP-0063
* chore: deps: update to go-multiaddr v0.12.2 (#11602)
* feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612)
This:
1. Adds nv22 support.
2. Updates the message tracing format.
Co-authored-by: Steven Allen <steven@stebalien.com>
* AggregateProofType nil when doing batch updates
Use latest nv22 go-state-types version with matching update
* Update to v13.0.0-rc.2 bundle
* chore: Upgrade heights and codename
Update upgrade heights
Co-Authored-By: Steven Allen <steven@stebalien.com>
* Update epoch after nv22 DRAND switch
Update epoch after nv22 DRAND switch
* Update Mango codename to Phoneix
Make the codename for the Drand-change inline with Dragon style.
* Add UpgradePhoenixHeight to API params
* set UpgradePhoenixHeight to be one hour after Dragon
* Make gen
Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet
* Update epoch heights (#11637)
Update epoch heights
* new: add forest bootstrap nodes (#11636)
Signed-off-by: samuelarogbonlo <sbayo971@gmail.com>
* Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes
Remove PL operated bootstrap nodes from mainnet.pi
* feat: api: new verified registry methods to get all allocations and claims (#11631)
* new verireg methods
* update changelog and add itest
* update itest and cli
* update new method's support till v9
* remove gateway APIs
* fix cli internal var names
* chore:: backport #11609 to the feat/nv22 branch (#11644)
* feat: api: improve the correctness of Eth's trace_block (#11609)
* Improve the correctness of Eth's trace_block
- Improve encoding/decoding of parameters and return values:
- Encode "native" parameters and return values with Solidity ABI.
- Correctly decode parameters to "create" calls.
- Use the correct (ish) output for "create" calls.
- Handle all forms of "create".
- Make robust with respect to reverts:
- Use the actor ID/address from the trace instead of looking it up in
the state-tree (may not exist in the state-tree due to a revert).
- Gracefully handle failed actor/contract creation.
- Improve performance:
- We avoid looking anything up in the state-tree when translating the
trace, which should significantly improve performance.
- Improve code readability:
- Remove all "backtracking" logic.
- Use an "environment" struct to store temporary state instead of
attaching it to the trace.
- Fix random bugs:
- Fix an allocation bug in the "address" logic (need to set the
capacity before modifying the slice).
- Improved error checking/handling.
- Use correct types for `trace_block` action/results (create, call, etc.).
- And use the correct types for Result/Action structs instead of reusing the same "Call" action every time.
- Improve error messages.
* Make gen
Make gen
---------
Co-authored-by: Steven Allen <steven@stebalien.com>
* fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648)
* chore: deps: update to go-state-types v13.0.0-rc.1
* do NOT update the cache when running the real migration
* Merge pull request #11632 from hanabi1224/hm/drand-test
feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet
* chore: deps: update to go-state-types v13.0.0-rc.2
chore: deps: update to go-state-types v13.0.0-rc.2
* feat: set migration config UpgradeEpoch for v13 actors upgrade
* Built-in actor events first draft
* itest for DDO non-market verified data w/ builtin actor events
* Tests for builtin actor events API
* Clean up DDO+Events tests, add lots of explainer comments
* Minor tweaks to events types
* Avoid duplicate messages when looking for receipts
* Rename internal events modules for clarity
* Adjust actor event API after review
* s/ActorEvents/Events/g in global config
* Manage event sending rate for SubscribeActorEvents
* Terminate SubscribeActorEvents chan when at max height
* Document future API changes
* More clarity in actor event API docs
* More post-review changes, lots of tests for SubscribeActorEvents
Use BlockDelay as the window for receiving events on the SubscribeActorEvents
channel. We expect the user to have received the initial batch of historical
events (if any) in one block's time. For real-time events we expect them to
not fall behind by roughly one block's time.
* Remove duplicate code from actor event type marshalling tests
Reduce verbosity and remove duplicate test logic from actor event types
JSON marshalling tests.
* Rename actor events test to follow go convention
Add missing `s` to `actor_events` test file to follow golang convention
used across the repo.
* Run actor events table tests in deterministic order
Refactor `map` usage for actor event table tests to ensure deterministic
test execution order, making debugging potential issues easier. If
non-determinism is a target, leverage Go's built-in parallel testing
capabilities.
* Reduce scope for filter removal failure when getting actor events
Use a fresh context to remove the temporary filter installed solely to
get the actor events. This should reduce chances of failure in a case
where the original context may be expired/cancelled.
Refactor removal into a `defer` statement for a more readable, concise
return statement.
* Use fixed RNG seed for actor event tests
Improve determinism in actor event tests by using a fixed RNG seed. This
makes up a more reproducible test suit.
* Use provided libraries to assert eventual conditions
Use the functionalities already provided by `testify` to assert eventual
conditions, and remove the use of `time.Sleep`.
Remove duplicate code in utility functions that are already defined.
Refactor assertion helper functions to use consistent terminology:
"require" implies fatal error, whereas "assert" implies error where the
test may proceed executing.
* Update changelog for actor events APIs
* Fix concerns and docs identified by review
* Update actor bundle to v13.0.0-rc3
Update actor bundle to v13.0.0-rc3
* Prep Lotus v1.26.0-rc1
- For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release
-Update Calibnet CIDs to v13.0.0-rc3
- Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog
Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
* Update CHANGELOG.md
Co-authored-by: Masih H. Derkani <m@derkani.org>
* Make gen
Make gen
* fix: beacon: validate drand change at nv16 correctly
* bump to v1.26.0-rc2
* test: cleanup ddo verified itest, extract steps to functions
also add allocation-removed event case
* test: extract verified DDO test to separate file, add more checks
* test: add additional actor events checks
* Add verification for "deal-activated" actor event
* docs(drand): document the meaning of "IsChained" (#11692)
* Resolve conflicts
I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them:
- go mod tidy
- Remove RaftState/RaftLeader
- Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired`
* fixup imports, make jen
* Update version
Update version in master to v1.27.0-dev
* Update node/impl/full/dummy.go
Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com>
* Adjust ListClaimsCmd
Adjust ListClaimsCmd according to review
---------
Signed-off-by: samuelarogbonlo <sbayo971@gmail.com>
Co-authored-by: TippyFlits <james.bluett@protocol.ai>
Co-authored-by: Aayush <arajasek94@gmail.com>
Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com>
Co-authored-by: Jennifer Wang <jiayingw703@gmail.com>
Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai>
Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com>
Co-authored-by: Steven Allen <steven@stebalien.com>
Co-authored-by: Rod Vagg <rod@vagg.org>
Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com>
Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com>
Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com>
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Masih H. Derkani <m@derkani.org>
Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
|
|
|
}),
|
2022-07-15 12:03:36 +00:00
|
|
|
},
|
|
|
|
CommD: &commD,
|
|
|
|
CommR: &commR,
|
|
|
|
}
|
|
|
|
|
|
|
|
b, err := cborutil.Dump(info)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := mds.Put(ctx, sectorKey, b); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if sector.SectorID > maxSectorID {
|
|
|
|
maxSectorID = sector.SectorID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, binary.MaxVarintLen64)
|
|
|
|
size := binary.PutUvarint(buf, uint64(maxSectorID))
|
2022-08-17 15:53:44 +00:00
|
|
|
return mds.Put(ctx, datastore.NewKey(pipeline.StorageCounterDSPrefix), buf[:size])
|
2022-07-15 12:03:36 +00:00
|
|
|
}
|
2023-07-14 23:05:49 +00:00
|
|
|
|
|
|
|
func envElse(env, els string) string {
|
|
|
|
if v := os.Getenv(env); v != "" {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
return els
|
|
|
|
}
|