2020-06-25 15:29:01 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-06-26 14:27:10 +00:00
|
|
|
"fmt"
|
2020-06-25 15:29:01 +00:00
|
|
|
"math/rand"
|
|
|
|
"time"
|
2020-06-26 14:27:10 +00:00
|
|
|
|
2020-06-26 12:09:58 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2020-06-26 14:27:10 +00:00
|
|
|
"github.com/testground/sdk-go/sync"
|
2020-06-25 15:29:01 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func runBootstrapper(t *TestEnvironment) error {
|
|
|
|
t.RecordMessage("running bootstrapper")
|
|
|
|
_, err := prepareBootstrapper(t)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
t.SyncClient.MustSignalAndWait(ctx, stateDone, t.TestInstanceCount)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func runMiner(t *TestEnvironment) error {
|
|
|
|
t.RecordMessage("running miner")
|
|
|
|
miner, err := prepareMiner(t)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-26 12:09:58 +00:00
|
|
|
t.RecordMessage("block delay: %v", build.BlockDelay)
|
|
|
|
t.D().Gauge("miner.block-delay").Update(build.BlockDelay)
|
|
|
|
|
2020-06-25 15:29:01 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
clients := t.IntParam("clients")
|
|
|
|
miners := t.IntParam("miners")
|
|
|
|
|
|
|
|
// mine / stop mining
|
|
|
|
mine := true
|
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
defer close(done)
|
2020-06-26 14:44:40 +00:00
|
|
|
var i int
|
|
|
|
for i = 0; mine; i++ {
|
2020-06-25 15:29:01 +00:00
|
|
|
|
|
|
|
// synchronize all miners to mine the next block
|
2020-06-26 14:27:10 +00:00
|
|
|
t.RecordMessage("synchronizing all miners to mine next block [%d]", i)
|
|
|
|
stateMineNext := sync.State(fmt.Sprintf("mine-block-%d", i))
|
2020-06-25 15:29:01 +00:00
|
|
|
t.SyncClient.MustSignalAndWait(ctx, stateMineNext, miners)
|
|
|
|
|
|
|
|
// add some random delay to encourage a different miner winning each round
|
|
|
|
time.Sleep(time.Duration(100 + rand.Intn(int(100*time.Millisecond))))
|
|
|
|
|
|
|
|
err := miner.MineOne(ctx, func(bool) {
|
2020-06-26 12:09:58 +00:00
|
|
|
|
|
|
|
t.D().Counter("miner.mine").Inc(1)
|
2020-06-25 15:29:01 +00:00
|
|
|
// after a block is mined
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
2020-06-26 14:44:40 +00:00
|
|
|
|
|
|
|
// signal the last block to make sure no miners are left stuck waiting for the next block signal
|
|
|
|
// while the others have stopped
|
|
|
|
stateMineLast := sync.State(fmt.Sprintf("mine-block-%d", i))
|
|
|
|
t.SyncClient.MustSignalEntry(ctx, stateMineLast)
|
2020-06-25 15:29:01 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// wait for a signal from all clients to stop mining
|
|
|
|
err = <-t.SyncClient.MustBarrier(ctx, stateStopMining, clients).C
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
mine = false
|
|
|
|
t.RecordMessage("shutting down mining")
|
|
|
|
<-done
|
|
|
|
|
|
|
|
t.SyncClient.MustSignalAndWait(ctx, stateDone, t.TestInstanceCount)
|
|
|
|
return nil
|
|
|
|
}
|
2020-06-25 15:56:12 +00:00
|
|
|
|
|
|
|
func runDrandNode(t *TestEnvironment) error {
|
|
|
|
t.RecordMessage("running drand node")
|
2020-06-25 16:38:40 +00:00
|
|
|
dr, err := prepareDrandNode(t)
|
2020-06-25 15:56:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-06-25 16:38:40 +00:00
|
|
|
defer dr.Cleanup()
|
2020-06-25 15:56:12 +00:00
|
|
|
|
|
|
|
// TODO add ability to halt / recover on demand
|
|
|
|
ctx := context.Background()
|
|
|
|
t.SyncClient.MustSignalAndWait(ctx, stateDone, t.TestInstanceCount)
|
|
|
|
return nil
|
|
|
|
}
|