2021-06-18 18:45:29 +00:00
|
|
|
package kit
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"crypto/rand"
|
2022-07-15 12:03:36 +00:00
|
|
|
"encoding/binary"
|
2021-06-18 16:40:33 +00:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2022-01-14 13:11:04 +00:00
|
|
|
"net/http"
|
2023-03-29 19:24:07 +00:00
|
|
|
"os"
|
2021-05-25 23:04:13 +00:00
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
"github.com/google/uuid"
|
2022-01-18 10:37:15 +00:00
|
|
|
"github.com/ipfs/go-datastore"
|
|
|
|
"github.com/ipfs/go-datastore/namespace"
|
2022-08-25 18:20:41 +00:00
|
|
|
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
2022-01-18 10:37:15 +00:00
|
|
|
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2022-07-15 14:09:13 +00:00
|
|
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
|
|
|
"github.com/filecoin-project/go-state-types/big"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/builtin"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/exitcode"
|
2021-06-16 17:11:34 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/network"
|
2022-01-18 10:37:15 +00:00
|
|
|
"github.com/filecoin-project/go-statestore"
|
2022-06-14 15:00:51 +00:00
|
|
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
|
|
|
power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power"
|
2021-08-20 14:53:24 +00:00
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2022-08-16 19:56:32 +00:00
|
|
|
"github.com/filecoin-project/lotus/api/v1api"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
|
|
|
"github.com/filecoin-project/lotus/chain"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
|
|
|
"github.com/filecoin-project/lotus/chain/gen"
|
|
|
|
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
|
|
|
"github.com/filecoin-project/lotus/chain/messagepool"
|
2021-08-06 00:46:05 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2022-06-14 17:21:34 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/wallet/key"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
2022-03-24 22:37:01 +00:00
|
|
|
"github.com/filecoin-project/lotus/cmd/lotus-worker/sealworker"
|
2023-01-26 14:20:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/gateway"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/genesis"
|
2023-07-14 23:05:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
2022-02-03 14:44:18 +00:00
|
|
|
"github.com/filecoin-project/lotus/markets/idxprov"
|
2022-06-15 10:06:22 +00:00
|
|
|
"github.com/filecoin-project/lotus/markets/idxprov/idxprov_test"
|
2021-05-25 23:04:13 +00:00
|
|
|
lotusminer "github.com/filecoin-project/lotus/miner"
|
|
|
|
"github.com/filecoin-project/lotus/node"
|
2021-06-18 16:40:33 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/config"
|
2023-07-14 23:05:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/impl"
|
2021-05-25 23:04:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/modules"
|
|
|
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
|
|
|
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
|
|
|
|
"github.com/filecoin-project/lotus/node/repo"
|
2022-06-14 18:25:52 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/paths"
|
2022-07-15 14:09:13 +00:00
|
|
|
pipeline "github.com/filecoin-project/lotus/storage/pipeline"
|
2022-06-14 18:03:38 +00:00
|
|
|
sectorstorage "github.com/filecoin-project/lotus/storage/sealer"
|
2022-06-15 10:06:22 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/mock"
|
2022-06-17 11:52:19 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
2021-05-25 23:04:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
chain.BootstrapPeerThreshold = 1
|
|
|
|
messagepool.HeadChangeCoalesceMinDelay = time.Microsecond
|
|
|
|
messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond
|
|
|
|
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
|
|
|
|
}
|
|
|
|
|
2021-06-11 17:26:25 +00:00
|
|
|
// Ensemble is a collection of nodes instantiated within a test.
|
|
|
|
//
|
|
|
|
// Create a new ensemble with:
|
|
|
|
//
|
2022-08-29 14:25:30 +00:00
|
|
|
// ens := kit.NewEnsemble()
|
2021-06-11 17:26:25 +00:00
|
|
|
//
|
|
|
|
// Create full nodes and miners:
|
|
|
|
//
|
2022-08-29 14:25:30 +00:00
|
|
|
// var full TestFullNode
|
|
|
|
// var miner TestMiner
|
2022-10-17 20:13:30 +00:00
|
|
|
// ens.FullNode(&full, opts...) // populates a full node
|
2022-08-29 14:25:30 +00:00
|
|
|
// ens.Miner(&miner, &full, opts...) // populates a miner, using the full node as its chain daemon
|
2021-06-11 17:26:25 +00:00
|
|
|
//
|
|
|
|
// It is possible to pass functional options to set initial balances,
|
|
|
|
// presealed sectors, owner keys, etc.
|
|
|
|
//
|
|
|
|
// After the initial nodes are added, call `ens.Start()` to forge genesis
|
|
|
|
// and start the network. Mining will NOT be started automatically. It needs
|
|
|
|
// to be started explicitly by calling `BeginMining`.
|
|
|
|
//
|
|
|
|
// Nodes also need to be connected with one another, either via `ens.Connect()`
|
|
|
|
// or `ens.InterconnectAll()`. A common inchantation for simple tests is to do:
|
|
|
|
//
|
2022-08-29 14:25:30 +00:00
|
|
|
// ens.InterconnectAll().BeginMining(blocktime)
|
2021-06-11 17:26:25 +00:00
|
|
|
//
|
|
|
|
// You can continue to add more nodes, but you must always follow with
|
|
|
|
// `ens.Start()` to activate the new nodes.
|
|
|
|
//
|
|
|
|
// The API is chainable, so it's possible to do a lot in a very succinct way:
|
|
|
|
//
|
2022-10-17 20:13:30 +00:00
|
|
|
// kit.NewEnsemble().FullNode(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining()
|
2021-06-11 17:26:25 +00:00
|
|
|
//
|
|
|
|
// You can also find convenient fullnode:miner presets, such as 1:1, 1:2,
|
|
|
|
// and 2:1, e.g.:
|
|
|
|
//
|
2022-08-29 14:25:30 +00:00
|
|
|
// kit.EnsembleMinimal()
|
|
|
|
// kit.EnsembleOneTwo()
|
|
|
|
// kit.EnsembleTwoOne()
|
2021-05-25 23:04:13 +00:00
|
|
|
type Ensemble struct {
|
|
|
|
t *testing.T
|
|
|
|
bootstrapped bool
|
|
|
|
genesisBlock bytes.Buffer
|
|
|
|
mn mocknet.Mocknet
|
2021-06-11 17:26:25 +00:00
|
|
|
options *ensembleOpts
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
inactive struct {
|
|
|
|
fullnodes []*TestFullNode
|
|
|
|
miners []*TestMiner
|
2022-01-14 13:11:04 +00:00
|
|
|
workers []*TestWorker
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
active struct {
|
|
|
|
fullnodes []*TestFullNode
|
|
|
|
miners []*TestMiner
|
2022-01-14 13:11:04 +00:00
|
|
|
workers []*TestWorker
|
2021-06-22 17:15:38 +00:00
|
|
|
bms map[*TestMiner]*BlockMiner
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
genesis struct {
|
2021-08-06 00:46:05 +00:00
|
|
|
version network.Version
|
2021-05-25 23:04:13 +00:00
|
|
|
miners []genesis.Miner
|
|
|
|
accounts []genesis.Actor
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-14 17:58:12 +00:00
|
|
|
// NewEnsemble instantiates a new blank Ensemble.
|
2021-06-11 17:26:25 +00:00
|
|
|
func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
|
|
|
|
options := DefaultEnsembleOpts
|
2021-05-25 23:04:13 +00:00
|
|
|
for _, o := range opts {
|
|
|
|
err := o(&options)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2021-06-16 17:11:34 +00:00
|
|
|
|
|
|
|
n := &Ensemble{t: t, options: &options}
|
2021-06-22 17:15:38 +00:00
|
|
|
n.active.bms = make(map[*TestMiner]*BlockMiner)
|
2021-06-16 17:11:34 +00:00
|
|
|
|
2021-08-06 00:46:05 +00:00
|
|
|
for _, up := range options.upgradeSchedule {
|
|
|
|
if up.Height < 0 {
|
|
|
|
n.genesis.version = up.Network
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-16 17:11:34 +00:00
|
|
|
// add accounts from ensemble options to genesis.
|
|
|
|
for _, acc := range options.accounts {
|
|
|
|
n.genesis.accounts = append(n.genesis.accounts, genesis.Actor{
|
|
|
|
Type: genesis.TAccount,
|
|
|
|
Balance: acc.initialBalance,
|
|
|
|
Meta: (&genesis.AccountMeta{Owner: acc.key.Address}).ActorMeta(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
feat: refactor: actor bundling system (#8838)
1. Include the builtin-actors in the lotus source tree.
2. Embed the bundle on build instead of downloading at runtime.
3. Avoid reading the bundle whenever possible by including bundle
metadata (the bundle CID, the actor CIDs, etc.).
4. Remove everything related to dependency injection.
1. We're no longer downloading the bundle, so doing anything ahead
of time doesn't really help.
2. We register the manifests on init because, unfortunately, they're
global.
3. We explicitly load the current actors bundle in the genesis
state-tree method.
4. For testing, we just change the in-use bundle with a bit of a
hack. It's not great, but using dependency injection doesn't make
any sense either because, again, the manifest information is
global.
5. Remove the bundle.toml file. Bundles may be overridden by
specifying an override path in the parameters file, or an
environment variable.
fixes #8701
2022-06-13 17:15:00 +00:00
|
|
|
// Ensure we're using the right actors. This really shouldn't be some global thing, but it's
|
|
|
|
// the best we can do for now.
|
|
|
|
if n.options.mockProofs {
|
|
|
|
require.NoError(t, build.UseNetworkBundle("testing-fake-proofs"))
|
|
|
|
} else {
|
|
|
|
require.NoError(t, build.UseNetworkBundle("testing"))
|
|
|
|
}
|
|
|
|
|
2021-06-16 17:11:34 +00:00
|
|
|
return n
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2022-02-18 21:09:56 +00:00
|
|
|
// Mocknet returns the underlying mocknet.
|
|
|
|
func (n *Ensemble) Mocknet() mocknet.Mocknet {
|
|
|
|
return n.mn
|
|
|
|
}
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
func (n *Ensemble) NewPrivKey() (libp2pcrypto.PrivKey, peer.ID) {
|
|
|
|
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
peerId, err := peer.IDFromPrivateKey(privkey)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
return privkey, peerId
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// FullNode enrolls a new full node.
|
|
|
|
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
|
|
|
options := DefaultNodeOpts
|
|
|
|
for _, o := range opts {
|
|
|
|
err := o(&options)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
2022-06-14 17:21:34 +00:00
|
|
|
key, err := key.GenerateKey(types.KTBLS)
|
2021-06-18 18:23:32 +00:00
|
|
|
require.NoError(n.t, err)
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2021-06-18 18:23:32 +00:00
|
|
|
if !n.bootstrapped && !options.balance.IsZero() {
|
|
|
|
// if we still haven't forged genesis, create a key+address, and assign
|
|
|
|
// it some FIL; this will be set as the default wallet when the node is
|
|
|
|
// started.
|
2021-05-25 23:04:13 +00:00
|
|
|
genacc := genesis.Actor{
|
|
|
|
Type: genesis.TAccount,
|
|
|
|
Balance: options.balance,
|
|
|
|
Meta: (&genesis.AccountMeta{Owner: key.Address}).ActorMeta(),
|
|
|
|
}
|
|
|
|
|
|
|
|
n.genesis.accounts = append(n.genesis.accounts, genacc)
|
|
|
|
}
|
|
|
|
|
2023-01-26 14:20:49 +00:00
|
|
|
*full = TestFullNode{t: n.t, options: options, DefaultKey: key, EthSubRouter: gateway.NewEthSubHandler()}
|
2022-09-08 18:20:05 +00:00
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
n.inactive.fullnodes = append(n.inactive.fullnodes, full)
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
// Miner enrolls a new miner, using the provided full node for chain
|
|
|
|
// interactions.
|
2022-09-08 18:20:05 +00:00
|
|
|
func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NotNil(n.t, full, "full node required when instantiating miner")
|
|
|
|
|
|
|
|
options := DefaultNodeOpts
|
|
|
|
for _, o := range opts {
|
|
|
|
err := o(&options)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
peerId, err := peer.IDFromPrivateKey(privkey)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2023-03-29 19:24:07 +00:00
|
|
|
tdir, err := os.MkdirTemp("", "preseal-memgen")
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
minerCnt := len(n.inactive.miners) + len(n.active.miners)
|
|
|
|
|
|
|
|
actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt))
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
if options.mainMiner != nil {
|
|
|
|
actorAddr = options.mainMiner.ActorAddr
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
ownerKey := options.ownerKey
|
2022-08-02 14:05:29 +00:00
|
|
|
var presealSectors int
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
if !n.bootstrapped {
|
2022-08-02 14:05:29 +00:00
|
|
|
presealSectors = options.sectors
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
var (
|
2022-08-02 14:05:29 +00:00
|
|
|
k *types.KeyInfo
|
|
|
|
genm *genesis.Miner
|
2021-05-25 23:04:13 +00:00
|
|
|
)
|
|
|
|
|
2021-08-20 14:53:24 +00:00
|
|
|
// Will use 2KiB sectors by default (default value of sectorSize).
|
|
|
|
proofType, err := miner.SealProofTypeFromSectorSize(options.sectorSize, n.genesis.version)
|
2021-08-06 00:46:05 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-08-20 14:53:24 +00:00
|
|
|
// Create the preseal commitment.
|
2021-06-11 17:26:25 +00:00
|
|
|
if n.options.mockProofs {
|
2022-08-02 14:05:29 +00:00
|
|
|
genm, k, err = mock.PreSeal(proofType, actorAddr, presealSectors)
|
2021-05-25 23:04:13 +00:00
|
|
|
} else {
|
2022-08-02 14:05:29 +00:00
|
|
|
genm, k, err = seed.PreSeal(actorAddr, proofType, 0, presealSectors, tdir, []byte("make genesis mem random"), nil, true)
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
genm.PeerId = peerId
|
|
|
|
|
|
|
|
// create an owner key, and assign it some FIL.
|
2022-06-14 17:21:34 +00:00
|
|
|
ownerKey, err = key.NewKey(*k)
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
genacc := genesis.Actor{
|
|
|
|
Type: genesis.TAccount,
|
|
|
|
Balance: options.balance,
|
|
|
|
Meta: (&genesis.AccountMeta{Owner: ownerKey.Address}).ActorMeta(),
|
|
|
|
}
|
|
|
|
|
|
|
|
n.genesis.miners = append(n.genesis.miners, *genm)
|
|
|
|
n.genesis.accounts = append(n.genesis.accounts, genacc)
|
|
|
|
} else {
|
|
|
|
require.NotNil(n.t, ownerKey, "worker key can't be null if initializing a miner after genesis")
|
|
|
|
}
|
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
rl, err := net.Listen("tcp", "127.0.0.1:")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-08-06 00:46:05 +00:00
|
|
|
*minerNode = TestMiner{
|
2021-06-18 16:40:33 +00:00
|
|
|
t: n.t,
|
|
|
|
ActorAddr: actorAddr,
|
|
|
|
OwnerKey: ownerKey,
|
|
|
|
FullNode: full,
|
|
|
|
PresealDir: tdir,
|
2022-08-02 14:05:29 +00:00
|
|
|
PresealSectors: presealSectors,
|
2021-06-18 16:40:33 +00:00
|
|
|
options: options,
|
|
|
|
RemoteListener: rl,
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2021-08-06 00:46:05 +00:00
|
|
|
minerNode.Libp2p.PeerID = peerId
|
|
|
|
minerNode.Libp2p.PrivKey = privkey
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
func (n *Ensemble) AddInactiveMiner(m *TestMiner) {
|
|
|
|
n.inactive.miners = append(n.inactive.miners, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
|
|
|
n.MinerEnroll(minerNode, full, opts...)
|
|
|
|
n.AddInactiveMiner(minerNode)
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
// Worker enrolls a new worker, using the provided full node for chain
|
|
|
|
// interactions.
|
|
|
|
func (n *Ensemble) Worker(minerNode *TestMiner, worker *TestWorker, opts ...NodeOpt) *Ensemble {
|
|
|
|
require.NotNil(n.t, minerNode, "miner node required when instantiating worker")
|
|
|
|
|
|
|
|
options := DefaultNodeOpts
|
|
|
|
for _, o := range opts {
|
|
|
|
err := o(&options)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
rl, err := net.Listen("tcp", "127.0.0.1:")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
*worker = TestWorker{
|
|
|
|
t: n.t,
|
|
|
|
MinerNode: minerNode,
|
|
|
|
RemoteListener: rl,
|
|
|
|
options: options,
|
2023-03-06 13:30:22 +00:00
|
|
|
|
|
|
|
Stop: func(ctx context.Context) error { return nil },
|
2022-01-14 13:11:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
n.inactive.workers = append(n.inactive.workers, worker)
|
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Start starts all enrolled nodes.
|
|
|
|
func (n *Ensemble) Start() *Ensemble {
|
2021-06-22 15:35:58 +00:00
|
|
|
ctx := context.Background()
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
var gtempl *genesis.Template
|
|
|
|
if !n.bootstrapped {
|
|
|
|
// We haven't been bootstrapped yet, we need to generate genesis and
|
|
|
|
// create the networking backbone.
|
|
|
|
gtempl = n.generateGenesis()
|
2022-01-18 15:07:56 +00:00
|
|
|
n.mn = mocknet.New()
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2023-07-14 23:05:49 +00:00
|
|
|
sharedITestID := harmonydb.ITestNewID()
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// ---------------------
|
|
|
|
// FULL NODES
|
|
|
|
// ---------------------
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Create all inactive full nodes.
|
|
|
|
for i, full := range n.inactive.fullnodes {
|
2022-08-05 20:34:16 +00:00
|
|
|
|
|
|
|
var r repo.Repo
|
|
|
|
if !full.options.fsrepo {
|
|
|
|
rmem := repo.NewMemory(nil)
|
|
|
|
n.t.Cleanup(rmem.Cleanup)
|
|
|
|
r = rmem
|
|
|
|
} else {
|
|
|
|
repoPath := n.t.TempDir()
|
|
|
|
rfs, err := repo.NewFS(repoPath)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
require.NoError(n.t, rfs.Init(repo.FullNode))
|
|
|
|
r = rfs
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup config with options
|
|
|
|
lr, err := r.Lock(repo.FullNode)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
ks, err := lr.KeyStore()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
if full.Pkey != nil {
|
|
|
|
pk, err := libp2pcrypto.MarshalPrivateKey(full.Pkey.PrivKey)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
err = ks.Put("libp2p-host", types.KeyInfo{
|
|
|
|
Type: "libp2p-host",
|
|
|
|
PrivateKey: pk,
|
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-08-05 20:34:16 +00:00
|
|
|
c, err := lr.Config()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
cfg, ok := c.(*config.FullNode)
|
|
|
|
if !ok {
|
|
|
|
n.t.Fatalf("invalid config from repo, got: %T", c)
|
|
|
|
}
|
|
|
|
for _, opt := range full.options.cfgOpts {
|
|
|
|
require.NoError(n.t, opt(cfg))
|
|
|
|
}
|
|
|
|
err = lr.SetConfig(func(raw interface{}) {
|
|
|
|
rcfg := raw.(*config.FullNode)
|
|
|
|
*rcfg = *cfg
|
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
err = lr.Close()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
opts := []node.Option{
|
|
|
|
node.FullAPI(&full.FullNode, node.Lite(full.options.lite)),
|
2021-07-07 11:56:37 +00:00
|
|
|
node.Base(),
|
2021-06-24 14:02:51 +00:00
|
|
|
node.Repo(r),
|
2022-06-23 18:02:10 +00:00
|
|
|
node.If(full.options.disableLibp2p, node.MockHost(n.mn)),
|
2021-05-25 23:04:13 +00:00
|
|
|
node.Test(),
|
|
|
|
|
|
|
|
// so that we subscribe to pubsub topics immediately
|
|
|
|
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
|
2021-08-06 00:46:05 +00:00
|
|
|
|
|
|
|
// upgrades
|
|
|
|
node.Override(new(stmgr.UpgradeSchedule), n.options.upgradeSchedule),
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// append any node builder options.
|
|
|
|
opts = append(opts, full.options.extraNodeOpts...)
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Either generate the genesis or inject it.
|
|
|
|
if i == 0 && !n.bootstrapped {
|
|
|
|
opts = append(opts, node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&n.genesisBlock, *gtempl)))
|
|
|
|
} else {
|
|
|
|
opts = append(opts, node.Override(new(modules.Genesis), modules.LoadGenesis(n.genesisBlock.Bytes())))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Are we mocking proofs?
|
2021-06-11 17:26:25 +00:00
|
|
|
if n.options.mockProofs {
|
2021-06-10 12:25:36 +00:00
|
|
|
opts = append(opts,
|
2022-06-17 11:52:19 +00:00
|
|
|
node.Override(new(storiface.Verifier), mock.MockVerifier),
|
|
|
|
node.Override(new(storiface.Prover), mock.MockProver),
|
2021-06-10 12:25:36 +00:00
|
|
|
)
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2021-06-15 09:46:32 +00:00
|
|
|
// Call option builders, passing active nodes as the parameter
|
|
|
|
for _, bopt := range full.options.optBuilders {
|
|
|
|
opts = append(opts, bopt(n.active.fullnodes))
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Construct the full node.
|
|
|
|
stop, err := node.New(ctx, opts...)
|
2022-09-08 18:20:05 +00:00
|
|
|
full.Stop = stop
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
addr, err := full.WalletImport(context.Background(), &full.DefaultKey.KeyInfo)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
err = full.WalletSetDefault(context.Background(), addr)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-09-27 16:08:04 +00:00
|
|
|
var rpcShutdownOnce sync.Once
|
2022-11-14 16:19:45 +00:00
|
|
|
var stopOnce sync.Once
|
|
|
|
var stopErr error
|
|
|
|
|
|
|
|
stopFunc := stop
|
|
|
|
stop = func(ctx context.Context) error {
|
|
|
|
stopOnce.Do(func() {
|
|
|
|
stopErr = stopFunc(ctx)
|
|
|
|
})
|
|
|
|
return stopErr
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Are we hitting this node through its RPC?
|
|
|
|
if full.options.rpc {
|
2022-09-27 16:08:04 +00:00
|
|
|
withRPC, rpcCloser := fullRpc(n.t, full)
|
2021-05-25 23:04:13 +00:00
|
|
|
n.inactive.fullnodes[i] = withRPC
|
2022-09-27 16:08:04 +00:00
|
|
|
full.Stop = func(ctx2 context.Context) error {
|
|
|
|
rpcShutdownOnce.Do(rpcCloser)
|
|
|
|
return stop(ctx)
|
|
|
|
}
|
2022-11-14 16:19:45 +00:00
|
|
|
n.t.Cleanup(func() { rpcShutdownOnce.Do(rpcCloser) })
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2022-08-05 20:34:16 +00:00
|
|
|
n.t.Cleanup(func() {
|
|
|
|
_ = stop(context.Background())
|
|
|
|
})
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
n.active.fullnodes = append(n.active.fullnodes, full)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are here, we have processed all inactive fullnodes and moved them
|
|
|
|
// to active, so clear the slice.
|
|
|
|
n.inactive.fullnodes = n.inactive.fullnodes[:0]
|
|
|
|
|
|
|
|
// Link all the nodes.
|
|
|
|
err := n.mn.LinkAll()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// ---------------------
|
|
|
|
// MINERS
|
|
|
|
// ---------------------
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Create all inactive miners.
|
|
|
|
for i, m := range n.inactive.miners {
|
|
|
|
if n.bootstrapped {
|
2021-06-18 16:40:33 +00:00
|
|
|
if m.options.mainMiner == nil {
|
|
|
|
// this is a miner created after genesis, so it won't have a preseal.
|
|
|
|
// we need to create it on chain.
|
2021-08-20 14:53:24 +00:00
|
|
|
|
2023-04-19 22:40:18 +00:00
|
|
|
proofType, err := miner.WindowPoStProofTypeFromSectorSize(m.options.sectorSize, n.genesis.version)
|
2021-08-20 14:53:24 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-04-08 10:48:54 +00:00
|
|
|
params, aerr := actors.SerializeParams(&power3.CreateMinerParams{
|
|
|
|
Owner: m.OwnerKey.Address,
|
|
|
|
Worker: m.OwnerKey.Address,
|
|
|
|
WindowPoStProofType: proofType,
|
|
|
|
Peer: abi.PeerID(m.Libp2p.PeerID),
|
2021-06-18 16:40:33 +00:00
|
|
|
})
|
|
|
|
require.NoError(n.t, aerr)
|
|
|
|
|
|
|
|
createStorageMinerMsg := &types.Message{
|
|
|
|
From: m.OwnerKey.Address,
|
|
|
|
To: power.Address,
|
|
|
|
Value: big.Zero(),
|
|
|
|
|
|
|
|
Method: power.Methods.CreateMiner,
|
|
|
|
Params: params,
|
|
|
|
}
|
2022-09-08 18:20:05 +00:00
|
|
|
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, &api.MessageSendSpec{
|
|
|
|
MsgUuid: uuid.New(),
|
|
|
|
})
|
2021-06-18 16:40:33 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
|
|
|
|
|
2022-04-08 10:48:54 +00:00
|
|
|
var retval power3.CreateMinerReturn
|
2021-06-18 16:40:33 +00:00
|
|
|
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
|
|
|
|
require.NoError(n.t, err, "failed to create miner")
|
|
|
|
|
|
|
|
m.ActorAddr = retval.IDAddress
|
|
|
|
} else {
|
|
|
|
params, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
msg := &types.Message{
|
|
|
|
To: m.options.mainMiner.ActorAddr,
|
|
|
|
From: m.options.mainMiner.OwnerKey.Address,
|
2022-04-20 21:34:28 +00:00
|
|
|
Method: builtin.MethodsMiner.ChangePeerID,
|
2021-06-18 16:40:33 +00:00
|
|
|
Params: params,
|
|
|
|
Value: types.NewInt(0),
|
|
|
|
}
|
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
|
|
|
|
MsgUuid: uuid.New(),
|
|
|
|
})
|
2021-06-18 16:40:33 +00:00
|
|
|
require.NoError(n.t, err2)
|
|
|
|
|
|
|
|
mw, err2 := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
|
|
|
require.NoError(n.t, err2)
|
|
|
|
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
// Only import the owner's full key into our companion full node, if we
|
|
|
|
// don't have it still.
|
|
|
|
if !has {
|
|
|
|
_, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// // Set it as the default address.
|
2022-10-17 20:13:30 +00:00
|
|
|
// err = m.FullNode.WalletSetDefault(ctx, m.OwnerAddr.Address)
|
2021-05-25 23:04:13 +00:00
|
|
|
// require.NoError(n.t, err)
|
|
|
|
|
|
|
|
r := repo.NewMemory(nil)
|
2022-01-14 13:11:04 +00:00
|
|
|
n.t.Cleanup(r.Cleanup)
|
2021-05-25 23:04:13 +00:00
|
|
|
|
|
|
|
lr, err := r.Lock(repo.StorageMiner)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
c, err := lr.Config()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
cfg, ok := c.(*config.StorageMiner)
|
|
|
|
if !ok {
|
|
|
|
n.t.Fatalf("invalid config from repo, got: %T", c)
|
|
|
|
}
|
|
|
|
cfg.Common.API.RemoteListenAddress = m.RemoteListener.Addr().String()
|
2021-07-12 10:12:29 +00:00
|
|
|
cfg.Subsystems.EnableMarkets = m.options.subsystems.Has(SMarkets)
|
2021-06-18 16:40:33 +00:00
|
|
|
cfg.Subsystems.EnableMining = m.options.subsystems.Has(SMining)
|
|
|
|
cfg.Subsystems.EnableSealing = m.options.subsystems.Has(SSealing)
|
|
|
|
cfg.Subsystems.EnableSectorStorage = m.options.subsystems.Has(SSectorStorage)
|
2021-09-06 15:39:35 +00:00
|
|
|
cfg.Dealmaking.MaxStagingDealsBytes = m.options.maxStagingDealsBytes
|
2021-06-18 16:40:33 +00:00
|
|
|
|
|
|
|
if m.options.mainMiner != nil {
|
2021-06-23 11:27:03 +00:00
|
|
|
token, err := m.options.mainMiner.FullNode.AuthNew(ctx, api.AllPermissions)
|
2021-06-18 16:40:33 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
cfg.Subsystems.SectorIndexApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
|
|
|
cfg.Subsystems.SealerApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lr.SetConfig(func(raw interface{}) {
|
|
|
|
rcfg := raw.(*config.StorageMiner)
|
|
|
|
*rcfg = *cfg
|
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
ks, err := lr.KeyStore()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-09-21 11:10:04 +00:00
|
|
|
pk, err := libp2pcrypto.MarshalPrivateKey(m.Libp2p.PrivKey)
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
err = ks.Put("libp2p-host", types.KeyInfo{
|
|
|
|
Type: "libp2p-host",
|
|
|
|
PrivateKey: pk,
|
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
ds, err := lr.Datastore(context.TODO(), "/metadata")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-12-14 14:06:59 +00:00
|
|
|
err = ds.Put(ctx, datastore.NewKey("miner-address"), m.ActorAddr.Bytes())
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-08-02 14:05:29 +00:00
|
|
|
if i < len(n.genesis.miners) && !n.bootstrapped {
|
|
|
|
// if this is a genesis miner, import preseal metadata
|
|
|
|
require.NoError(n.t, importPreSealMeta(ctx, n.genesis.miners[i], ds))
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
// using real proofs, therefore need real sectors.
|
|
|
|
if !n.bootstrapped && !n.options.mockProofs {
|
2022-01-18 11:11:59 +00:00
|
|
|
psd := m.PresealDir
|
2022-07-12 10:42:08 +00:00
|
|
|
noPaths := m.options.noStorage
|
2022-07-11 21:00:50 +00:00
|
|
|
|
2022-11-01 11:01:31 +00:00
|
|
|
err := lr.SetStorage(func(sc *storiface.StorageConfig) {
|
2022-07-11 21:00:50 +00:00
|
|
|
if noPaths {
|
2022-11-01 11:01:31 +00:00
|
|
|
sc.StoragePaths = []storiface.LocalPath{}
|
2022-07-11 21:00:50 +00:00
|
|
|
}
|
2022-11-01 11:01:31 +00:00
|
|
|
sc.StoragePaths = append(sc.StoragePaths, storiface.LocalPath{Path: psd})
|
2022-01-14 13:11:04 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
err = lr.Close()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
if m.options.mainMiner == nil {
|
|
|
|
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
|
|
|
|
require.NoError(n.t, err)
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2021-06-18 16:40:33 +00:00
|
|
|
msg := &types.Message{
|
|
|
|
From: m.OwnerKey.Address,
|
|
|
|
To: m.ActorAddr,
|
2022-04-20 21:34:28 +00:00
|
|
|
Method: builtin.MethodsMiner.ChangePeerID,
|
2021-06-18 16:40:33 +00:00
|
|
|
Params: enc,
|
|
|
|
Value: types.NewInt(0),
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2022-09-08 18:20:05 +00:00
|
|
|
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
|
|
|
|
MsgUuid: uuid.New(),
|
|
|
|
})
|
2021-06-18 16:40:33 +00:00
|
|
|
require.NoError(n.t, err2)
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2022-01-18 11:11:59 +00:00
|
|
|
noLocal := m.options.minerNoLocalSealing
|
2022-05-23 20:31:06 +00:00
|
|
|
assigner := m.options.minerAssigner
|
2022-05-23 23:13:30 +00:00
|
|
|
disallowRemoteFinalize := m.options.disallowRemoteFinalize
|
2022-01-18 11:11:59 +00:00
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
var mineBlock = make(chan lotusminer.MineReq)
|
2022-09-27 16:08:04 +00:00
|
|
|
|
|
|
|
copy := *m.FullNode
|
|
|
|
copy.FullNode = modules.MakeUuidWrapper(copy.FullNode)
|
|
|
|
m.FullNode = ©
|
|
|
|
|
|
|
|
//m.FullNode.FullNode = modules.MakeUuidWrapper(fn.FullNode)
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
opts := []node.Option{
|
2021-07-07 11:56:37 +00:00
|
|
|
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
|
|
|
node.Base(),
|
2021-05-25 23:04:13 +00:00
|
|
|
node.Repo(r),
|
|
|
|
node.Test(),
|
|
|
|
|
2022-06-23 18:02:10 +00:00
|
|
|
node.If(m.options.disableLibp2p, node.MockHost(n.mn)),
|
2022-09-27 16:08:04 +00:00
|
|
|
//node.Override(new(v1api.RawFullNodeAPI), func() api.FullNode { return modules.MakeUuidWrapper(m.FullNode) }),
|
|
|
|
//node.Override(new(v1api.RawFullNodeAPI), modules.MakeUuidWrapper),
|
|
|
|
node.Override(new(v1api.RawFullNodeAPI), m.FullNode),
|
2021-05-25 23:04:13 +00:00
|
|
|
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
2021-06-21 20:09:04 +00:00
|
|
|
|
|
|
|
// disable resource filtering so that local worker gets assigned tasks
|
|
|
|
// regardless of system pressure.
|
2022-11-06 16:38:25 +00:00
|
|
|
node.Override(new(config.SealerConfig), func() config.SealerConfig {
|
2021-06-22 13:22:08 +00:00
|
|
|
scfg := config.DefaultStorageMiner()
|
2022-01-14 13:11:04 +00:00
|
|
|
|
2022-01-18 11:11:59 +00:00
|
|
|
if noLocal {
|
2022-09-06 09:06:30 +00:00
|
|
|
scfg.Storage.AllowSectorDownload = false
|
2022-01-14 13:11:04 +00:00
|
|
|
scfg.Storage.AllowAddPiece = false
|
|
|
|
scfg.Storage.AllowPreCommit1 = false
|
|
|
|
scfg.Storage.AllowPreCommit2 = false
|
|
|
|
scfg.Storage.AllowCommit = false
|
2022-11-28 17:27:38 +00:00
|
|
|
scfg.Storage.AllowUnseal = false
|
2022-01-14 13:11:04 +00:00
|
|
|
}
|
|
|
|
|
2022-05-23 20:31:06 +00:00
|
|
|
scfg.Storage.Assigner = assigner
|
2022-05-23 23:13:30 +00:00
|
|
|
scfg.Storage.DisallowRemoteFinalize = disallowRemoteFinalize
|
2022-10-31 17:15:09 +00:00
|
|
|
scfg.Storage.ResourceFiltering = config.ResourceFilteringDisabled
|
2022-11-06 16:38:25 +00:00
|
|
|
return scfg.Storage
|
2021-06-22 13:22:08 +00:00
|
|
|
}),
|
2021-08-06 00:46:05 +00:00
|
|
|
|
|
|
|
// upgrades
|
|
|
|
node.Override(new(stmgr.UpgradeSchedule), n.options.upgradeSchedule),
|
2023-07-14 23:05:49 +00:00
|
|
|
|
|
|
|
node.Override(new(harmonydb.ITestID), sharedITestID),
|
|
|
|
node.Override(new(config.HarmonyDB), func() config.HarmonyDB {
|
|
|
|
return config.HarmonyDB{
|
|
|
|
Hosts: []string{envElse("LOTUS_HARMONYDB_HOSTS", "127.0.0.1")},
|
|
|
|
Database: "yugabyte",
|
|
|
|
Username: "yugabyte",
|
|
|
|
Password: "yugabyte",
|
|
|
|
Port: "5433",
|
|
|
|
}
|
|
|
|
}),
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2022-02-03 13:43:20 +00:00
|
|
|
if m.options.subsystems.Has(SMarkets) {
|
|
|
|
opts = append(opts,
|
2022-02-03 14:44:18 +00:00
|
|
|
node.Override(new(idxprov.MeshCreator), idxprov_test.NewNoopMeshCreator),
|
2022-02-03 13:43:20 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// append any node builder options.
|
|
|
|
opts = append(opts, m.options.extraNodeOpts...)
|
|
|
|
|
2021-06-22 13:22:08 +00:00
|
|
|
idAddr, err := address.IDFromAddress(m.ActorAddr)
|
2021-05-25 23:04:13 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// preload preseals if the network still hasn't bootstrapped.
|
2021-06-22 13:22:08 +00:00
|
|
|
var presealSectors []abi.SectorID
|
|
|
|
if !n.bootstrapped {
|
|
|
|
sectors := n.genesis.miners[i].Sectors
|
|
|
|
for _, sector := range sectors {
|
|
|
|
presealSectors = append(presealSectors, abi.SectorID{
|
|
|
|
Miner: abi.ActorID(idAddr),
|
|
|
|
Number: sector.SectorID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2021-06-10 12:25:36 +00:00
|
|
|
|
2021-06-11 17:26:25 +00:00
|
|
|
if n.options.mockProofs {
|
2021-05-25 23:04:13 +00:00
|
|
|
opts = append(opts,
|
2022-06-14 19:23:17 +00:00
|
|
|
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
|
|
|
return mock.NewMockSectorMgr(presealSectors), nil
|
2021-06-22 13:22:08 +00:00
|
|
|
}),
|
2022-06-14 19:23:17 +00:00
|
|
|
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
|
|
|
|
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
|
|
|
|
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
|
2021-06-22 13:22:08 +00:00
|
|
|
|
2022-06-17 11:52:19 +00:00
|
|
|
node.Override(new(storiface.Verifier), mock.MockVerifier),
|
|
|
|
node.Override(new(storiface.Prover), mock.MockProver),
|
2021-06-22 13:22:08 +00:00
|
|
|
node.Unset(new(*sectorstorage.Manager)),
|
2021-05-25 23:04:13 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// start node
|
|
|
|
stop, err := node.New(ctx, opts...)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
n.t.Cleanup(func() { _ = stop(context.Background()) })
|
2023-07-14 23:05:49 +00:00
|
|
|
mCopy := m
|
|
|
|
n.t.Cleanup(func() {
|
|
|
|
mCopy.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB.ITestDeleteAll()
|
|
|
|
})
|
2021-05-25 23:04:13 +00:00
|
|
|
|
2022-01-21 12:33:47 +00:00
|
|
|
m.BaseAPI = m.StorageMiner
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Are we hitting this node through its RPC?
|
|
|
|
if m.options.rpc {
|
|
|
|
withRPC := minerRpc(n.t, m)
|
|
|
|
n.inactive.miners[i] = withRPC
|
|
|
|
}
|
|
|
|
|
|
|
|
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
|
|
|
|
select {
|
|
|
|
case mineBlock <- req:
|
|
|
|
return nil
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
m.MineOne = mineOne
|
|
|
|
m.Stop = stop
|
|
|
|
|
|
|
|
n.active.miners = append(n.active.miners, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are here, we have processed all inactive miners and moved them
|
|
|
|
// to active, so clear the slice.
|
|
|
|
n.inactive.miners = n.inactive.miners[:0]
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
// ---------------------
|
|
|
|
// WORKERS
|
|
|
|
// ---------------------
|
|
|
|
|
|
|
|
// Create all inactive workers.
|
|
|
|
for i, m := range n.inactive.workers {
|
|
|
|
r := repo.NewMemory(nil)
|
|
|
|
|
|
|
|
lr, err := r.Lock(repo.Worker)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
2022-07-12 10:42:08 +00:00
|
|
|
if m.options.noStorage {
|
2022-11-01 11:01:31 +00:00
|
|
|
err := lr.SetStorage(func(sc *storiface.StorageConfig) {
|
|
|
|
sc.StoragePaths = []storiface.LocalPath{}
|
2022-07-12 10:42:08 +00:00
|
|
|
})
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
ds, err := lr.Datastore(context.Background(), "/metadata")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
addr := m.RemoteListener.Addr().String()
|
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
localStore, err := paths.NewLocal(ctx, lr, m.MinerNode, []string{"http://" + addr + "/remote"})
|
2022-01-14 13:11:04 +00:00
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
auth := http.Header(nil)
|
|
|
|
|
2023-07-14 23:05:49 +00:00
|
|
|
// FUTURE: Use m.MinerNode.(BaseAPI).(impl.StorageMinerAPI).HarmonyDB to setup.
|
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
remote := paths.NewRemote(localStore, m.MinerNode, auth, 20, &paths.DefaultPartialFileHandler{})
|
2022-03-18 11:32:16 +00:00
|
|
|
store := m.options.workerStorageOpt(remote)
|
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
fh := &paths.FetchHandler{Local: localStore, PfHandler: &paths.DefaultPartialFileHandler{}}
|
2022-01-14 13:11:04 +00:00
|
|
|
m.FetchHandler = fh.ServeHTTP
|
|
|
|
|
|
|
|
wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix))
|
|
|
|
|
|
|
|
workerApi := &sealworker.Worker{
|
|
|
|
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
|
|
|
TaskTypes: m.options.workerTasks,
|
|
|
|
NoSwap: false,
|
2022-08-03 11:05:30 +00:00
|
|
|
Name: m.options.workerName,
|
2022-03-18 11:32:16 +00:00
|
|
|
}, store, localStore, m.MinerNode, m.MinerNode, wsts),
|
2022-01-14 13:11:04 +00:00
|
|
|
LocalStore: localStore,
|
|
|
|
Storage: lr,
|
|
|
|
}
|
|
|
|
|
|
|
|
m.Worker = workerApi
|
|
|
|
|
|
|
|
require.True(n.t, m.options.rpc)
|
|
|
|
|
|
|
|
withRPC := workerRpc(n.t, m)
|
|
|
|
n.inactive.workers[i] = withRPC
|
|
|
|
|
|
|
|
err = m.MinerNode.WorkerConnect(ctx, "http://"+addr+"/rpc/v0")
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
n.active.workers = append(n.active.workers, m)
|
2023-07-14 23:05:49 +00:00
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we are here, we have processed all inactive workers and moved them
|
|
|
|
// to active, so clear the slice.
|
|
|
|
n.inactive.workers = n.inactive.workers[:0]
|
|
|
|
|
|
|
|
// ---------------------
|
|
|
|
// MISC
|
|
|
|
// ---------------------
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// Link all the nodes.
|
|
|
|
err = n.mn.LinkAll()
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
if !n.bootstrapped && len(n.active.miners) > 0 {
|
2021-06-10 12:25:36 +00:00
|
|
|
// We have *just* bootstrapped, so mine 2 blocks to setup some CE stuff in some actors
|
2021-05-25 23:04:13 +00:00
|
|
|
var wait sync.Mutex
|
|
|
|
wait.Lock()
|
|
|
|
|
|
|
|
observer := n.active.fullnodes[0]
|
|
|
|
|
|
|
|
bm := NewBlockMiner(n.t, n.active.miners[0])
|
|
|
|
n.t.Cleanup(bm.Stop)
|
|
|
|
|
|
|
|
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
|
|
|
|
wait.Unlock()
|
|
|
|
})
|
|
|
|
wait.Lock()
|
|
|
|
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
|
|
|
|
wait.Unlock()
|
|
|
|
})
|
|
|
|
wait.Lock()
|
2022-09-08 18:20:05 +00:00
|
|
|
n.bootstrapped = true
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2021-06-10 12:25:36 +00:00
|
|
|
// InterconnectAll connects all miners and full nodes to one another.
|
2021-05-25 23:04:13 +00:00
|
|
|
func (n *Ensemble) InterconnectAll() *Ensemble {
|
2021-06-10 12:25:36 +00:00
|
|
|
// connect full nodes to miners.
|
|
|
|
for _, from := range n.active.fullnodes {
|
|
|
|
for _, to := range n.active.miners {
|
|
|
|
// []*TestMiner to []api.CommonAPI type coercion not possible
|
|
|
|
// so cannot use variadic form.
|
|
|
|
n.Connect(from, to)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// connect full nodes between each other, skipping ourselves.
|
2021-05-25 23:04:13 +00:00
|
|
|
last := len(n.active.fullnodes) - 1
|
|
|
|
for i, from := range n.active.fullnodes {
|
|
|
|
if i == last {
|
|
|
|
continue
|
|
|
|
}
|
2021-06-10 12:25:36 +00:00
|
|
|
for _, to := range n.active.fullnodes[i+1:] {
|
|
|
|
n.Connect(from, to)
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
// Connect connects one full node to the provided full nodes.
|
2021-06-29 12:07:00 +00:00
|
|
|
func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble {
|
2021-05-25 23:04:13 +00:00
|
|
|
addr, err := from.NetAddrsListen(context.Background())
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
|
|
|
|
for _, other := range to {
|
|
|
|
err = other.NetConnect(context.Background(), addr)
|
|
|
|
require.NoError(n.t, err)
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:11:19 +00:00
|
|
|
func (n *Ensemble) BeginMiningMustPost(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
// wait one second to make sure that nodes are connected and have handshaken.
|
|
|
|
// TODO make this deterministic by listening to identify events on the
|
|
|
|
// libp2p eventbus instead (or something else).
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
var bms []*BlockMiner
|
|
|
|
if len(miners) == 0 {
|
|
|
|
// no miners have been provided explicitly, instantiate block miners
|
|
|
|
// for all active miners that aren't still mining.
|
|
|
|
for _, m := range n.active.miners {
|
|
|
|
if _, ok := n.active.bms[m]; ok {
|
|
|
|
continue // skip, already have a block miner
|
|
|
|
}
|
|
|
|
miners = append(miners, m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(miners) > 1 {
|
|
|
|
n.t.Fatalf("Only one active miner for MustPost, but have %d", len(miners))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, m := range miners {
|
|
|
|
bm := NewBlockMiner(n.t, m)
|
|
|
|
bm.MineBlocksMustPost(ctx, blocktime)
|
|
|
|
n.t.Cleanup(bm.Stop)
|
|
|
|
|
|
|
|
bms = append(bms, bm)
|
|
|
|
|
|
|
|
n.active.bms[m] = bm
|
|
|
|
}
|
|
|
|
|
|
|
|
return bms
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
// BeginMining kicks off mining for the specified miners. If nil or 0-length,
|
2021-06-10 12:25:36 +00:00
|
|
|
// it will kick off mining for all enrolled and active miners. It also adds a
|
|
|
|
// cleanup function to stop all mining operations on test teardown.
|
2021-05-25 23:04:13 +00:00
|
|
|
func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
// wait one second to make sure that nodes are connected and have handshaken.
|
|
|
|
// TODO make this deterministic by listening to identify events on the
|
|
|
|
// libp2p eventbus instead (or something else).
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
var bms []*BlockMiner
|
|
|
|
if len(miners) == 0 {
|
2021-06-22 17:15:38 +00:00
|
|
|
// no miners have been provided explicitly, instantiate block miners
|
|
|
|
// for all active miners that aren't still mining.
|
|
|
|
for _, m := range n.active.miners {
|
|
|
|
if _, ok := n.active.bms[m]; ok {
|
|
|
|
continue // skip, already have a block miner
|
|
|
|
}
|
|
|
|
miners = append(miners, m)
|
|
|
|
}
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, m := range miners {
|
|
|
|
bm := NewBlockMiner(n.t, m)
|
|
|
|
bm.MineBlocks(ctx, blocktime)
|
2021-06-10 12:25:36 +00:00
|
|
|
n.t.Cleanup(bm.Stop)
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
bms = append(bms, bm)
|
2021-06-22 17:15:38 +00:00
|
|
|
|
|
|
|
n.active.bms[m] = bm
|
2021-05-25 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return bms
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *Ensemble) generateGenesis() *genesis.Template {
|
2021-06-16 17:11:34 +00:00
|
|
|
var verifRoot = gen.DefaultVerifregRootkeyActor
|
|
|
|
if k := n.options.verifiedRoot.key; k != nil {
|
|
|
|
verifRoot = genesis.Actor{
|
|
|
|
Type: genesis.TAccount,
|
|
|
|
Balance: n.options.verifiedRoot.initialBalance,
|
|
|
|
Meta: (&genesis.AccountMeta{Owner: k.Address}).ActorMeta(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-25 23:04:13 +00:00
|
|
|
templ := &genesis.Template{
|
2021-08-06 00:46:05 +00:00
|
|
|
NetworkVersion: n.genesis.version,
|
2021-05-25 23:04:13 +00:00
|
|
|
Accounts: n.genesis.accounts,
|
|
|
|
Miners: n.genesis.miners,
|
|
|
|
NetworkName: "test",
|
2021-06-14 17:58:12 +00:00
|
|
|
Timestamp: uint64(time.Now().Unix() - int64(n.options.pastOffset.Seconds())),
|
2021-06-16 17:11:34 +00:00
|
|
|
VerifregRootKey: verifRoot,
|
2021-05-25 23:04:13 +00:00
|
|
|
RemainderAccount: gen.DefaultRemainderAccountActor,
|
|
|
|
}
|
|
|
|
|
|
|
|
return templ
|
|
|
|
}
|
2022-07-15 12:03:36 +00:00
|
|
|
|
|
|
|
func importPreSealMeta(ctx context.Context, meta genesis.Miner, mds dtypes.MetadataDS) error {
|
|
|
|
maxSectorID := abi.SectorNumber(0)
|
|
|
|
for _, sector := range meta.Sectors {
|
|
|
|
sectorKey := datastore.NewKey(pipeline.SectorStorePrefix).ChildString(fmt.Sprint(sector.SectorID))
|
|
|
|
|
|
|
|
commD := sector.CommD
|
|
|
|
commR := sector.CommR
|
|
|
|
|
|
|
|
info := &pipeline.SectorInfo{
|
|
|
|
State: pipeline.Proving,
|
|
|
|
SectorNumber: sector.SectorID,
|
2022-08-26 00:37:36 +00:00
|
|
|
Pieces: []api.SectorPiece{
|
2022-07-15 12:03:36 +00:00
|
|
|
{
|
|
|
|
Piece: abi.PieceInfo{
|
|
|
|
Size: abi.PaddedPieceSize(meta.SectorSize),
|
|
|
|
PieceCID: commD,
|
|
|
|
},
|
|
|
|
DealInfo: nil, // todo: likely possible to get, but not really that useful
|
|
|
|
},
|
|
|
|
},
|
|
|
|
CommD: &commD,
|
|
|
|
CommR: &commR,
|
|
|
|
}
|
|
|
|
|
|
|
|
b, err := cborutil.Dump(info)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := mds.Put(ctx, sectorKey, b); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if sector.SectorID > maxSectorID {
|
|
|
|
maxSectorID = sector.SectorID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, binary.MaxVarintLen64)
|
|
|
|
size := binary.PutUvarint(buf, uint64(maxSectorID))
|
2022-08-17 15:53:44 +00:00
|
|
|
return mds.Put(ctx, datastore.NewKey(pipeline.StorageCounterDSPrefix), buf[:size])
|
2022-07-15 12:03:36 +00:00
|
|
|
}
|
2023-07-14 23:05:49 +00:00
|
|
|
|
|
|
|
func envElse(env, els string) string {
|
|
|
|
if v := os.Getenv(env); v != "" {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
return els
|
|
|
|
}
|