merge origin/master into plan/drand-halting
This commit is contained in:
commit
ea6dbc912b
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
[global]
|
[global]
|
||||||
plan = "lotus-soup"
|
plan = "lotus-soup"
|
||||||
case = "lotus-baseline"
|
case = "deals-e2e"
|
||||||
total_instances = 14
|
total_instances = 14
|
||||||
builder = "docker:go"
|
builder = "docker:go"
|
||||||
runner = "cluster:k8s"
|
runner = "cluster:k8s"
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
[global]
|
[global]
|
||||||
plan = "lotus-soup"
|
plan = "lotus-soup"
|
||||||
case = "lotus-baseline"
|
case = "deals-e2e"
|
||||||
total_instances = 5
|
total_instances = 5
|
||||||
builder = "docker:go"
|
builder = "docker:go"
|
||||||
runner = "cluster:k8s"
|
runner = "cluster:k8s"
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
[global]
|
[global]
|
||||||
plan = "lotus-soup"
|
plan = "lotus-soup"
|
||||||
case = "lotus-baseline"
|
case = "deals-e2e"
|
||||||
total_instances = 6
|
total_instances = 6
|
||||||
builder = "docker:go"
|
builder = "docker:go"
|
||||||
runner = "cluster:k8s"
|
runner = "cluster:k8s"
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
[global]
|
[global]
|
||||||
plan = "lotus-soup"
|
plan = "lotus-soup"
|
||||||
case = "lotus-baseline"
|
case = "deals-e2e"
|
||||||
total_instances = 3
|
total_instances = 3
|
||||||
builder = "docker:go"
|
builder = "docker:go"
|
||||||
runner = "cluster:k8s"
|
runner = "cluster:k8s"
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
[global]
|
[global]
|
||||||
plan = "lotus-soup"
|
plan = "lotus-soup"
|
||||||
case = "lotus-baseline"
|
case = "deals-e2e"
|
||||||
total_instances = 6
|
total_instances = 6
|
||||||
builder = "docker:go"
|
builder = "docker:go"
|
||||||
runner = "local:docker"
|
runner = "local:docker"
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
[global]
|
[global]
|
||||||
plan = "lotus-soup"
|
plan = "lotus-soup"
|
||||||
case = "lotus-baseline"
|
case = "deals-e2e"
|
||||||
total_instances = 6
|
total_instances = 6
|
||||||
builder = "docker:go"
|
builder = "docker:go"
|
||||||
runner = "local:docker"
|
runner = "local:docker"
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
[global]
|
[global]
|
||||||
plan = "lotus-soup"
|
plan = "lotus-soup"
|
||||||
case = "lotus-baseline"
|
case = "deals-e2e"
|
||||||
total_instances = 7
|
total_instances = 7
|
||||||
builder = "docker:go"
|
builder = "docker:go"
|
||||||
runner = "local:docker"
|
runner = "local:docker"
|
||||||
|
@ -4,8 +4,8 @@
|
|||||||
|
|
||||||
[global]
|
[global]
|
||||||
plan = "lotus-soup"
|
plan = "lotus-soup"
|
||||||
case = "lotus-baseline"
|
case = "deals-e2e"
|
||||||
total_instances = 3
|
total_instances = 6
|
||||||
builder = "docker:go"
|
builder = "docker:go"
|
||||||
runner = "local:docker"
|
runner = "local:docker"
|
||||||
|
|
||||||
@ -35,7 +35,7 @@
|
|||||||
[[groups]]
|
[[groups]]
|
||||||
id = "miners"
|
id = "miners"
|
||||||
[groups.instances]
|
[groups.instances]
|
||||||
count = 1
|
count = 2
|
||||||
percentage = 0.0
|
percentage = 0.0
|
||||||
[groups.run]
|
[groups.run]
|
||||||
[groups.run.test_params]
|
[groups.run.test_params]
|
||||||
@ -44,7 +44,7 @@
|
|||||||
[[groups]]
|
[[groups]]
|
||||||
id = "clients"
|
id = "clients"
|
||||||
[groups.instances]
|
[groups.instances]
|
||||||
count = 1
|
count = 3
|
||||||
percentage = 0.0
|
percentage = 0.0
|
||||||
[groups.run]
|
[groups.run]
|
||||||
[groups.run.test_params]
|
[groups.run.test_params]
|
||||||
|
@ -264,6 +264,7 @@ github.com/filecoin-project/sector-storage v0.0.0-20200623224636-de544b531601/go
|
|||||||
github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
|
github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
|
||||||
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
|
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
|
||||||
github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
||||||
|
github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
||||||
github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121 h1:oRA+b4iN4H86xXDXbU3TOyvmBZp7//c5VqTc0oJ6nLg=
|
github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121 h1:oRA+b4iN4H86xXDXbU3TOyvmBZp7//c5VqTc0oJ6nLg=
|
||||||
github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
||||||
github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
|
github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
@ -12,164 +13,37 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/drand/drand/chain"
|
"github.com/drand/drand/chain"
|
||||||
"github.com/drand/drand/client"
|
|
||||||
hclient "github.com/drand/drand/client/http"
|
hclient "github.com/drand/drand/client/http"
|
||||||
"github.com/drand/drand/core"
|
"github.com/drand/drand/demo/node"
|
||||||
"github.com/drand/drand/key"
|
|
||||||
"github.com/drand/drand/log"
|
"github.com/drand/drand/log"
|
||||||
"github.com/drand/drand/lp2p"
|
"github.com/drand/drand/lp2p"
|
||||||
dnet "github.com/drand/drand/net"
|
|
||||||
"github.com/drand/drand/protobuf/drand"
|
|
||||||
dtest "github.com/drand/drand/test"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
ma "github.com/multiformats/go-multiaddr"
|
ma "github.com/multiformats/go-multiaddr"
|
||||||
"github.com/testground/sdk-go/sync"
|
"github.com/testground/sdk-go/sync"
|
||||||
|
|
||||||
"github.com/filecoin-project/oni/lotus-soup/statemachine"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var PrepareDrandTimeout = time.Minute
|
||||||
PrepareDrandTimeout = time.Minute
|
|
||||||
secretDKG = "dkgsecret"
|
|
||||||
)
|
|
||||||
|
|
||||||
type DrandInstance struct {
|
type DrandInstance struct {
|
||||||
daemon *core.Drand
|
|
||||||
httpClient client.Client
|
|
||||||
ctrlClient *dnet.ControlClient
|
|
||||||
gossipRelay *lp2p.GossipRelayNode
|
|
||||||
|
|
||||||
t *TestEnvironment
|
t *TestEnvironment
|
||||||
|
|
||||||
|
Node node.Node
|
||||||
|
GossipRelay *lp2p.GossipRelayNode
|
||||||
|
|
||||||
stateDir string
|
stateDir string
|
||||||
priv *key.Pair
|
|
||||||
pubAddr string
|
|
||||||
privAddr string
|
|
||||||
ctrlAddr string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dr *DrandInstance) Start() error {
|
func (d *DrandInstance) Cleanup() error {
|
||||||
opts := []core.ConfigOption{
|
return os.RemoveAll(d.stateDir)
|
||||||
core.WithLogLevel(getLogLevel(dr.t)),
|
|
||||||
core.WithConfigFolder(dr.stateDir),
|
|
||||||
core.WithPublicListenAddress(dr.pubAddr),
|
|
||||||
core.WithPrivateListenAddress(dr.privAddr),
|
|
||||||
core.WithControlPort(dr.ctrlAddr),
|
|
||||||
core.WithInsecure(),
|
|
||||||
}
|
|
||||||
conf := core.NewConfig(opts...)
|
|
||||||
fs := key.NewFileStore(conf.ConfigFolder())
|
|
||||||
fs.SaveKeyPair(dr.priv)
|
|
||||||
key.Save(path.Join(dr.stateDir, "public.toml"), dr.priv.Public, false)
|
|
||||||
if dr.daemon == nil {
|
|
||||||
drand, err := core.NewDrand(fs, conf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dr.daemon = drand
|
|
||||||
} else {
|
|
||||||
drand, err := core.LoadDrand(fs, conf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
drand.StartBeacon(true)
|
|
||||||
dr.daemon = drand
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dr *DrandInstance) Ping() bool {
|
func (d *DrandInstance) RunDefault() error {
|
||||||
cl := dr.ctrl()
|
d.t.RecordMessage("running drand node")
|
||||||
if err := cl.Ping(); err != nil {
|
defer d.Cleanup()
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dr *DrandInstance) Close() error {
|
// TODO add ability to halt / recover on demand
|
||||||
dr.gossipRelay.Shutdown()
|
d.t.WaitUntilAllDone()
|
||||||
dr.daemon.Stop(context.Background())
|
|
||||||
return os.RemoveAll(dr.stateDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dr *DrandInstance) ctrl() *dnet.ControlClient {
|
|
||||||
if dr.ctrlClient != nil {
|
|
||||||
return dr.ctrlClient
|
|
||||||
}
|
|
||||||
cl, err := dnet.NewControlClient(dr.ctrlAddr)
|
|
||||||
if err != nil {
|
|
||||||
dr.t.RecordMessage("drand can't instantiate control client: %w", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
dr.ctrlClient = cl
|
|
||||||
return cl
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dr *DrandInstance) RunDKG(nodes, thr int, period time.Duration, timeout string, leader bool, leaderAddr string, beaconOffset int) *key.Group {
|
|
||||||
cl := dr.ctrl()
|
|
||||||
t, _ := time.ParseDuration(timeout)
|
|
||||||
var grp *drand.GroupPacket
|
|
||||||
var err error
|
|
||||||
if leader {
|
|
||||||
grp, err = cl.InitDKGLeader(nodes, thr, period, t, nil, secretDKG, beaconOffset)
|
|
||||||
} else {
|
|
||||||
leader := dnet.CreatePeer(leaderAddr, false)
|
|
||||||
grp, err = cl.InitDKG(leader, nil, secretDKG)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
dr.t.RecordMessage("drand dkg run failed: %w", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
kg, _ := key.GroupFromProto(grp)
|
|
||||||
return kg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dr *DrandInstance) Halt() {
|
|
||||||
dr.t.RecordMessage("drand node #%d halting", dr.t.GroupSeq)
|
|
||||||
dr.daemon.StopBeacon()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dr *DrandInstance) Resume() {
|
|
||||||
dr.t.RecordMessage("drand node #%d resuming", dr.t.GroupSeq)
|
|
||||||
dr.daemon.StartBeacon(true)
|
|
||||||
// block until we can fetch the round corresponding to the current time
|
|
||||||
startTime := time.Now()
|
|
||||||
round := dr.httpClient.RoundAt(startTime)
|
|
||||||
timeout := 30 * time.Second
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
done := make(chan struct{}, 1)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
res, err := dr.httpClient.Get(ctx, round)
|
|
||||||
if err == nil {
|
|
||||||
dr.t.RecordMessage("drand chain caught up to round %d", res.Round())
|
|
||||||
done <- struct{}{}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
dr.t.RecordMessage("drand chain failed to catch up after %s", timeout.String())
|
|
||||||
case <-done:
|
|
||||||
dr.t.RecordMessage("drand chain resumed after %s catchup time", time.Since(startTime))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func (dr *DrandInstance) RunDefault() error {
|
|
||||||
dr.t.RecordMessage("running drand node")
|
|
||||||
defer dr.Close()
|
|
||||||
|
|
||||||
if dr.t.IsParamSet("suspend_events") {
|
|
||||||
suspender := statemachine.NewSuspender(dr, dr.t.RecordMessage)
|
|
||||||
suspender.RunEvents(dr.t.StringParam("suspend_events"))
|
|
||||||
}
|
|
||||||
|
|
||||||
dr.t.WaitUntilAllDone()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,20 +70,13 @@ func PrepareDrandInstance(t *TestEnvironment) (*DrandInstance, error) {
|
|||||||
ctx, cancel := context.WithTimeout(context.Background(), PrepareDrandTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), PrepareDrandTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
stateDir, err := ioutil.TempDir("/tmp", fmt.Sprintf("drand-%d", t.GroupSeq))
|
stateDir, err := ioutil.TempDir("", fmt.Sprintf("drand-%d", t.GroupSeq))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dr := DrandInstance{
|
// TODO(maybe): use TLS?
|
||||||
t: t,
|
n := node.NewLocalNode(int(seq), period.String(), stateDir, false, myAddr.String())
|
||||||
stateDir: stateDir,
|
|
||||||
pubAddr: dtest.FreeBind(myAddr.String()),
|
|
||||||
privAddr: dtest.FreeBind(myAddr.String()),
|
|
||||||
ctrlAddr: dtest.FreeBind("localhost"),
|
|
||||||
}
|
|
||||||
dr.priv = key.NewKeyPair(dr.privAddr)
|
|
||||||
|
|
||||||
// share the node addresses with other nodes
|
// share the node addresses with other nodes
|
||||||
// TODO: if we implement TLS, this is where we'd share public TLS keys
|
// TODO: if we implement TLS, this is where we'd share public TLS keys
|
||||||
@ -224,8 +91,8 @@ func PrepareDrandInstance(t *TestEnvironment) (*DrandInstance, error) {
|
|||||||
var leaderAddr string
|
var leaderAddr string
|
||||||
ch := make(chan *NodeAddr)
|
ch := make(chan *NodeAddr)
|
||||||
_, sub := t.SyncClient.MustPublishSubscribe(ctx, addrTopic, &NodeAddr{
|
_, sub := t.SyncClient.MustPublishSubscribe(ctx, addrTopic, &NodeAddr{
|
||||||
PrivateAddr: dr.privAddr,
|
PrivateAddr: n.PrivateAddr(),
|
||||||
PublicAddr: dr.pubAddr,
|
PublicAddr: n.PublicAddr(),
|
||||||
IsLeader: isLeader,
|
IsLeader: isLeader,
|
||||||
}, ch)
|
}, ch)
|
||||||
|
|
||||||
@ -247,14 +114,14 @@ func PrepareDrandInstance(t *TestEnvironment) (*DrandInstance, error) {
|
|||||||
|
|
||||||
t.SyncClient.MustSignalAndWait(ctx, "drand-start", nNodes)
|
t.SyncClient.MustSignalAndWait(ctx, "drand-start", nNodes)
|
||||||
t.RecordMessage("Starting drand sharing ceremony")
|
t.RecordMessage("Starting drand sharing ceremony")
|
||||||
if err := dr.Start(); err != nil {
|
if err := n.Start(stateDir); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
alive := false
|
alive := false
|
||||||
waitSecs := 10
|
waitSecs := 10
|
||||||
for i := 0; i < waitSecs; i++ {
|
for i := 0; i < waitSecs; i++ {
|
||||||
if !dr.Ping() {
|
if !n.Ping() {
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -271,7 +138,7 @@ func PrepareDrandInstance(t *TestEnvironment) (*DrandInstance, error) {
|
|||||||
if !isLeader {
|
if !isLeader {
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
grp := dr.RunDKG(nNodes, threshold, period, "10s", isLeader, leaderAddr, beaconOffset)
|
grp := n.RunDKG(nNodes, threshold, period.String(), isLeader, leaderAddr, beaconOffset)
|
||||||
if grp == nil {
|
if grp == nil {
|
||||||
return nil, fmt.Errorf("drand dkg failed")
|
return nil, fmt.Errorf("drand dkg failed")
|
||||||
}
|
}
|
||||||
@ -285,18 +152,19 @@ func PrepareDrandInstance(t *TestEnvironment) (*DrandInstance, error) {
|
|||||||
t.RecordMessage("drand beacon chain started, fetching initial round via http")
|
t.RecordMessage("drand beacon chain started, fetching initial round via http")
|
||||||
// verify that we can get a round of randomness from the chain using an http client
|
// verify that we can get a round of randomness from the chain using an http client
|
||||||
info := chain.NewChainInfo(grp)
|
info := chain.NewChainInfo(grp)
|
||||||
myPublicAddr := fmt.Sprintf("http://%s", dr.pubAddr)
|
myPublicAddr := fmt.Sprintf("http://%s", n.PublicAddr())
|
||||||
dr.httpClient, err = hclient.NewWithInfo(myPublicAddr, info, nil)
|
client, err := hclient.NewWithInfo(myPublicAddr, info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to create drand http client: %w", err)
|
return nil, fmt.Errorf("unable to create drand http client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = dr.httpClient.Get(ctx, 1)
|
_, err = client.Get(ctx, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to get initial drand round: %w", err)
|
return nil, fmt.Errorf("unable to get initial drand round: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// start gossip relay (unless disabled via testplan parameter)
|
// start gossip relay (unless disabled via testplan parameter)
|
||||||
|
var gossipRelay *lp2p.GossipRelayNode
|
||||||
var relayAddrs []peer.AddrInfo
|
var relayAddrs []peer.AddrInfo
|
||||||
|
|
||||||
if runGossipRelay {
|
if runGossipRelay {
|
||||||
@ -308,17 +176,17 @@ func PrepareDrandInstance(t *TestEnvironment) (*DrandInstance, error) {
|
|||||||
DataDir: gossipDir,
|
DataDir: gossipDir,
|
||||||
IdentityPath: path.Join(gossipDir, "identity.key"),
|
IdentityPath: path.Join(gossipDir, "identity.key"),
|
||||||
Insecure: true,
|
Insecure: true,
|
||||||
Client: dr.httpClient,
|
Client: client,
|
||||||
}
|
}
|
||||||
t.RecordMessage("starting drand gossip relay")
|
t.RecordMessage("starting drand gossip relay")
|
||||||
dr.gossipRelay, err = lp2p.NewGossipRelayNode(log.NewLogger(getLogLevel(t)), &relayCfg)
|
gossipRelay, err = lp2p.NewGossipRelayNode(log.DefaultLogger, &relayCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to construct drand gossip relay: %w", err)
|
return nil, fmt.Errorf("failed to construct drand gossip relay: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.RecordMessage("sharing gossip relay addrs")
|
t.RecordMessage("sharing gossip relay addrs")
|
||||||
// share the gossip relay addrs so we can publish them in DrandRuntimeInfo
|
// share the gossip relay addrs so we can publish them in DrandRuntimeInfo
|
||||||
relayInfo, err := relayAddrInfo(dr.gossipRelay.Multiaddrs(), myAddr)
|
relayInfo, err := relayAddrInfo(gossipRelay.Multiaddrs(), myAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -349,13 +217,17 @@ func PrepareDrandInstance(t *TestEnvironment) (*DrandInstance, error) {
|
|||||||
},
|
},
|
||||||
GossipBootstrap: relayAddrs,
|
GossipBootstrap: relayAddrs,
|
||||||
}
|
}
|
||||||
t.DebugSpew("publishing drand config on sync topic: %#v", cfg)
|
dump, _ := json.Marshal(cfg)
|
||||||
|
t.RecordMessage("publishing drand config on sync topic: %s", string(dump))
|
||||||
t.SyncClient.MustPublish(ctx, DrandConfigTopic, &cfg)
|
t.SyncClient.MustPublish(ctx, DrandConfigTopic, &cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// signal that we're ready to start the test
|
return &DrandInstance{
|
||||||
t.SyncClient.MustSignalAndWait(ctx, StateReady, t.TestInstanceCount)
|
t: t,
|
||||||
return &dr, nil
|
Node: n,
|
||||||
|
GossipRelay: gossipRelay,
|
||||||
|
stateDir: stateDir,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForDrandConfig should be called by filecoin instances before constructing the lotus Node
|
// waitForDrandConfig should be called by filecoin instances before constructing the lotus Node
|
||||||
@ -380,14 +252,3 @@ func relayAddrInfo(addrs []ma.Multiaddr, dataIP net.IP) (*peer.AddrInfo, error)
|
|||||||
}
|
}
|
||||||
return nil, fmt.Errorf("no addr found with data ip %s in addrs: %v", dataIP, addrs)
|
return nil, fmt.Errorf("no addr found with data ip %s in addrs: %v", dataIP, addrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getLogLevel(t *TestEnvironment) int {
|
|
||||||
switch t.StringParam("drand_log_level") {
|
|
||||||
case "info":
|
|
||||||
return log.LogInfo
|
|
||||||
case "debug":
|
|
||||||
return log.LogDebug
|
|
||||||
default:
|
|
||||||
return log.LogNone
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
|
||||||
"github.com/testground/sdk-go/run"
|
"github.com/testground/sdk-go/run"
|
||||||
"github.com/testground/sdk-go/runtime"
|
"github.com/testground/sdk-go/runtime"
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user