2020-09-10 23:22:11 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"crypto/rand"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2020-09-11 21:36:38 +00:00
|
|
|
goruntime "runtime"
|
2020-11-24 16:52:37 +00:00
|
|
|
"strings"
|
2020-09-10 23:22:11 +00:00
|
|
|
"time"
|
|
|
|
|
2020-11-24 16:52:37 +00:00
|
|
|
"github.com/dustin/go-humanize"
|
2020-09-24 23:34:49 +00:00
|
|
|
allselector "github.com/hannahhoward/all-selector"
|
2020-09-10 23:22:11 +00:00
|
|
|
"github.com/ipfs/go-blockservice"
|
|
|
|
"github.com/ipfs/go-cid"
|
2020-09-11 21:36:38 +00:00
|
|
|
ds "github.com/ipfs/go-datastore"
|
2020-09-10 23:22:11 +00:00
|
|
|
dss "github.com/ipfs/go-datastore/sync"
|
2020-09-11 21:36:38 +00:00
|
|
|
"github.com/ipfs/go-graphsync/storeutil"
|
2020-09-24 23:34:49 +00:00
|
|
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
|
|
|
chunk "github.com/ipfs/go-ipfs-chunker"
|
|
|
|
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
|
|
files "github.com/ipfs/go-ipfs-files"
|
|
|
|
format "github.com/ipfs/go-ipld-format"
|
2020-09-10 23:22:11 +00:00
|
|
|
"github.com/ipfs/go-merkledag"
|
|
|
|
"github.com/ipfs/go-unixfs/importer/balanced"
|
|
|
|
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
|
2020-09-24 23:34:49 +00:00
|
|
|
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
|
2020-09-11 21:36:38 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/metrics"
|
2020-09-10 23:22:11 +00:00
|
|
|
"github.com/testground/sdk-go/network"
|
2020-09-11 21:36:38 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2020-09-10 23:22:11 +00:00
|
|
|
|
|
|
|
gs "github.com/ipfs/go-graphsync"
|
|
|
|
gsi "github.com/ipfs/go-graphsync/impl"
|
|
|
|
gsnet "github.com/ipfs/go-graphsync/network"
|
|
|
|
|
|
|
|
"github.com/libp2p/go-libp2p"
|
|
|
|
"github.com/libp2p/go-libp2p-core/host"
|
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
2020-10-08 18:43:46 +00:00
|
|
|
noise "github.com/libp2p/go-libp2p-noise"
|
2020-09-24 23:34:49 +00:00
|
|
|
secio "github.com/libp2p/go-libp2p-secio"
|
2020-09-10 23:22:11 +00:00
|
|
|
tls "github.com/libp2p/go-libp2p-tls"
|
|
|
|
|
|
|
|
"github.com/testground/sdk-go/run"
|
|
|
|
"github.com/testground/sdk-go/runtime"
|
|
|
|
"github.com/testground/sdk-go/sync"
|
|
|
|
)
|
|
|
|
|
|
|
|
var testcases = map[string]interface{}{
|
|
|
|
"stress": run.InitializedTestCaseFn(runStress),
|
|
|
|
}
|
|
|
|
|
|
|
|
func main() {
|
|
|
|
run.InvokeMap(testcases)
|
|
|
|
}
|
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
type networkParams struct {
|
|
|
|
latency time.Duration
|
|
|
|
bandwidth uint64
|
|
|
|
}
|
|
|
|
|
2020-10-07 23:08:30 +00:00
|
|
|
func (p networkParams) String() string {
|
|
|
|
return fmt.Sprintf("<lat: %s, bandwidth: %d>", p.latency, p.bandwidth)
|
|
|
|
}
|
|
|
|
|
2020-09-10 23:22:11 +00:00
|
|
|
func runStress(runenv *runtime.RunEnv, initCtx *run.InitContext) error {
|
|
|
|
var (
|
2020-09-11 21:36:38 +00:00
|
|
|
size = runenv.SizeParam("size")
|
|
|
|
concurrency = runenv.IntParam("concurrency")
|
2020-09-10 23:22:11 +00:00
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
networkParams = parseNetworkConfig(runenv)
|
|
|
|
)
|
2020-09-10 23:22:11 +00:00
|
|
|
runenv.RecordMessage("started test instance")
|
2020-10-07 23:08:30 +00:00
|
|
|
runenv.RecordMessage("network params: %v", networkParams)
|
2020-09-10 23:22:11 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
initCtx.MustWaitAllInstancesInitialized(ctx)
|
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
host, peers, _ := makeHost(ctx, runenv, initCtx)
|
2020-09-10 23:22:11 +00:00
|
|
|
defer host.Close()
|
|
|
|
|
|
|
|
var (
|
|
|
|
// make datastore, blockstore, dag service, graphsync
|
2020-09-11 21:36:38 +00:00
|
|
|
bs = blockstore.NewBlockstore(dss.MutexWrap(ds.NewMapDatastore()))
|
2020-09-10 23:22:11 +00:00
|
|
|
dagsrv = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
2020-09-11 21:36:38 +00:00
|
|
|
gsync = gsi.New(ctx,
|
|
|
|
gsnet.NewFromLibp2pHost(host),
|
|
|
|
storeutil.LoaderForBlockstore(bs),
|
|
|
|
storeutil.StorerForBlockstore(bs),
|
|
|
|
)
|
2020-09-10 23:22:11 +00:00
|
|
|
)
|
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
defer initCtx.SyncClient.MustSignalAndWait(ctx, "done", runenv.TestInstanceCount)
|
|
|
|
|
2020-09-10 23:22:11 +00:00
|
|
|
switch runenv.TestGroupID {
|
|
|
|
case "providers":
|
|
|
|
if runenv.TestGroupInstanceCount > 1 {
|
|
|
|
panic("test case only supports one provider")
|
|
|
|
}
|
|
|
|
|
|
|
|
runenv.RecordMessage("we are the provider")
|
|
|
|
defer runenv.RecordMessage("done provider")
|
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
gsync.RegisterIncomingRequestHook(func(p peer.ID, request gs.RequestData, hookActions gs.IncomingRequestHookActions) {
|
|
|
|
hookActions.ValidateRequest()
|
|
|
|
})
|
|
|
|
|
|
|
|
return runProvider(ctx, runenv, initCtx, dagsrv, size, networkParams, concurrency)
|
2020-09-10 23:22:11 +00:00
|
|
|
|
|
|
|
case "requestors":
|
|
|
|
runenv.RecordMessage("we are the requestor")
|
|
|
|
defer runenv.RecordMessage("done requestor")
|
|
|
|
|
|
|
|
p := *peers[0]
|
|
|
|
if err := host.Connect(ctx, p); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
runenv.RecordMessage("done dialling provider")
|
2020-10-08 18:43:46 +00:00
|
|
|
return runRequestor(ctx, runenv, initCtx, gsync, p, dagsrv, networkParams, concurrency, size)
|
2020-09-10 23:22:11 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
panic("unsupported group ID")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
func parseNetworkConfig(runenv *runtime.RunEnv) []networkParams {
|
|
|
|
var (
|
|
|
|
bandwidths = runenv.SizeArrayParam("bandwidths")
|
|
|
|
latencies []time.Duration
|
|
|
|
)
|
2020-09-10 23:22:11 +00:00
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
lats := runenv.StringArrayParam("latencies")
|
|
|
|
for _, l := range lats {
|
|
|
|
d, err := time.ParseDuration(l)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
latencies = append(latencies, d)
|
|
|
|
}
|
|
|
|
|
|
|
|
// prepend bandwidth=0 and latency=0 zero values; the first iteration will
|
|
|
|
// be a control iteration. The sidecar interprets zero values as no
|
|
|
|
// limitation on that attribute.
|
|
|
|
bandwidths = append([]uint64{0}, bandwidths...)
|
|
|
|
latencies = append([]time.Duration{0}, latencies...)
|
|
|
|
|
|
|
|
var ret []networkParams
|
|
|
|
for _, bandwidth := range bandwidths {
|
|
|
|
for _, latency := range latencies {
|
|
|
|
ret = append(ret, networkParams{
|
|
|
|
latency: latency,
|
|
|
|
bandwidth: bandwidth,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2020-10-08 18:43:46 +00:00
|
|
|
func runRequestor(ctx context.Context, runenv *runtime.RunEnv, initCtx *run.InitContext, gsync gs.GraphExchange, p peer.AddrInfo, dagsrv format.DAGService, networkParams []networkParams, concurrency int, size uint64) error {
|
2020-09-11 21:36:38 +00:00
|
|
|
var (
|
|
|
|
cids []cid.Cid
|
|
|
|
// create a selector for the whole UnixFS dag
|
2020-09-24 23:34:49 +00:00
|
|
|
sel = allselector.AllSelector
|
2020-09-11 21:36:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
for round, np := range networkParams {
|
|
|
|
var (
|
|
|
|
topicCid = sync.NewTopic(fmt.Sprintf("cid-%d", round), []cid.Cid{})
|
|
|
|
stateNext = sync.State(fmt.Sprintf("next-%d", round))
|
|
|
|
stateNet = sync.State(fmt.Sprintf("network-configured-%d", round))
|
|
|
|
)
|
|
|
|
|
|
|
|
// wait for all instances to be ready for the next state.
|
|
|
|
initCtx.SyncClient.MustSignalAndWait(ctx, stateNext, runenv.TestInstanceCount)
|
|
|
|
|
|
|
|
// clean up previous CIDs to attempt to free memory
|
|
|
|
// TODO does this work?
|
|
|
|
_ = dagsrv.RemoveMany(ctx, cids)
|
|
|
|
|
|
|
|
runenv.RecordMessage("===== ROUND %d: latency=%s, bandwidth=%d =====", round, np.latency, np.bandwidth)
|
|
|
|
|
|
|
|
sctx, scancel := context.WithCancel(ctx)
|
|
|
|
cidCh := make(chan []cid.Cid, 1)
|
|
|
|
initCtx.SyncClient.MustSubscribe(sctx, topicCid, cidCh)
|
|
|
|
cids = <-cidCh
|
|
|
|
scancel()
|
|
|
|
|
|
|
|
// run GC to get accurate-ish stats.
|
|
|
|
goruntime.GC()
|
|
|
|
goruntime.GC()
|
|
|
|
|
|
|
|
<-initCtx.SyncClient.MustBarrier(ctx, stateNet, 1).C
|
|
|
|
|
|
|
|
errgrp, grpctx := errgroup.WithContext(ctx)
|
|
|
|
for _, c := range cids {
|
|
|
|
c := c // capture
|
|
|
|
np := np // capture
|
|
|
|
|
|
|
|
errgrp.Go(func() error {
|
|
|
|
// make a go-ipld-prime link for the root UnixFS node
|
|
|
|
clink := cidlink.Link{Cid: c}
|
|
|
|
|
|
|
|
// execute the traversal.
|
|
|
|
runenv.RecordMessage("\t>>> requesting CID %s", c)
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
_, errCh := gsync.Request(grpctx, p.ID, clink, sel)
|
|
|
|
for err := range errCh {
|
|
|
|
return err
|
|
|
|
}
|
2020-10-08 18:43:46 +00:00
|
|
|
dur := time.Since(start)
|
2020-09-11 21:36:38 +00:00
|
|
|
|
|
|
|
runenv.RecordMessage("\t<<< request complete with no errors")
|
2020-10-08 18:43:46 +00:00
|
|
|
runenv.RecordMessage("***** ROUND %d observed duration (lat=%s,bw=%d): %s", round, np.latency, np.bandwidth, dur)
|
2020-11-24 16:52:37 +00:00
|
|
|
|
|
|
|
measurement := fmt.Sprintf("duration.sec,lat=%s,bw=%s,concurrency=%d,size=%s", np.latency, humanize.IBytes(np.bandwidth), concurrency, humanize.Bytes(size))
|
|
|
|
measurement = strings.Replace(measurement, " ", "", -1)
|
|
|
|
runenv.R().RecordPoint(measurement, float64(dur)/float64(time.Second))
|
2020-09-11 21:36:38 +00:00
|
|
|
|
|
|
|
// verify that we have the CID now.
|
|
|
|
if node, err := dagsrv.Get(grpctx, c); err != nil {
|
|
|
|
return err
|
|
|
|
} else if node == nil {
|
|
|
|
return fmt.Errorf("finished graphsync request, but CID not in store")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := errgrp.Wait(); err != nil {
|
|
|
|
return err
|
2020-09-10 23:22:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
func runProvider(ctx context.Context, runenv *runtime.RunEnv, initCtx *run.InitContext, dagsrv format.DAGService, size uint64, networkParams []networkParams, concurrency int) error {
|
|
|
|
var (
|
|
|
|
cids []cid.Cid
|
|
|
|
bufferedDS = format.NewBufferedDAG(ctx, dagsrv)
|
|
|
|
)
|
|
|
|
|
|
|
|
for round, np := range networkParams {
|
|
|
|
var (
|
|
|
|
topicCid = sync.NewTopic(fmt.Sprintf("cid-%d", round), []cid.Cid{})
|
|
|
|
stateNext = sync.State(fmt.Sprintf("next-%d", round))
|
|
|
|
stateNet = sync.State(fmt.Sprintf("network-configured-%d", round))
|
|
|
|
)
|
2020-09-10 23:22:11 +00:00
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
// wait for all instances to be ready for the next state.
|
|
|
|
initCtx.SyncClient.MustSignalAndWait(ctx, stateNext, runenv.TestInstanceCount)
|
2020-09-10 23:22:11 +00:00
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
// remove the previous CIDs from the dag service; hopefully this
|
|
|
|
// will delete them from the store and free up memory.
|
|
|
|
for _, c := range cids {
|
|
|
|
_ = dagsrv.Remove(ctx, c)
|
|
|
|
}
|
|
|
|
cids = cids[:0]
|
2020-09-10 23:22:11 +00:00
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
runenv.RecordMessage("===== ROUND %d: latency=%s, bandwidth=%d =====", round, np.latency, np.bandwidth)
|
|
|
|
|
|
|
|
// generate as many random files as the concurrency level.
|
|
|
|
for i := 0; i < concurrency; i++ {
|
2020-09-10 23:22:11 +00:00
|
|
|
// file with random data
|
|
|
|
file := files.NewReaderFile(io.LimitReader(rand.Reader, int64(size)))
|
|
|
|
|
2020-09-11 22:42:09 +00:00
|
|
|
const unixfsChunkSize uint64 = 1 << 20
|
2020-09-10 23:22:11 +00:00
|
|
|
const unixfsLinksPerLevel = 1024
|
|
|
|
|
|
|
|
params := ihelper.DagBuilderParams{
|
|
|
|
Maxlinks: unixfsLinksPerLevel,
|
|
|
|
RawLeaves: true,
|
|
|
|
CidBuilder: nil,
|
|
|
|
Dagserv: bufferedDS,
|
|
|
|
}
|
|
|
|
|
|
|
|
db, err := params.New(chunk.NewSizeSplitter(file, int64(unixfsChunkSize)))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to setup dag builder: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
node, err := balanced.Layout(db)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to create unix fs node: %w", err)
|
|
|
|
}
|
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
cids = append(cids, node.Cid())
|
|
|
|
}
|
2020-09-10 23:22:11 +00:00
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
if err := bufferedDS.Commit(); err != nil {
|
|
|
|
return fmt.Errorf("unable to commit unix fs node: %w", err)
|
2020-09-10 23:22:11 +00:00
|
|
|
}
|
2020-09-11 21:36:38 +00:00
|
|
|
|
|
|
|
// run GC to get accurate-ish stats.
|
|
|
|
goruntime.GC()
|
|
|
|
goruntime.GC()
|
|
|
|
|
|
|
|
runenv.RecordMessage("\tCIDs are: %v", cids)
|
|
|
|
initCtx.SyncClient.MustPublish(ctx, topicCid, cids)
|
|
|
|
|
|
|
|
runenv.RecordMessage("\tconfiguring network for round %d", round)
|
|
|
|
initCtx.NetClient.MustConfigureNetwork(ctx, &network.Config{
|
|
|
|
Network: "default",
|
|
|
|
Enable: true,
|
|
|
|
Default: network.LinkShape{
|
|
|
|
Latency: np.latency,
|
|
|
|
Bandwidth: np.bandwidth * 8, // bps
|
|
|
|
},
|
|
|
|
CallbackState: stateNet,
|
|
|
|
CallbackTarget: 1,
|
|
|
|
})
|
|
|
|
runenv.RecordMessage("\tnetwork configured for round %d", round)
|
2020-09-10 23:22:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
func makeHost(ctx context.Context, runenv *runtime.RunEnv, initCtx *run.InitContext) (host.Host, []*peer.AddrInfo, *metrics.BandwidthCounter) {
|
2020-09-10 23:22:11 +00:00
|
|
|
secureChannel := runenv.StringParam("secure_channel")
|
|
|
|
|
|
|
|
var security libp2p.Option
|
|
|
|
switch secureChannel {
|
|
|
|
case "noise":
|
|
|
|
security = libp2p.Security(noise.ID, noise.New)
|
|
|
|
case "secio":
|
|
|
|
security = libp2p.Security(secio.ID, secio.New)
|
|
|
|
case "tls":
|
|
|
|
security = libp2p.Security(tls.ID, tls.New)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ☎️ Let's construct the libp2p node.
|
|
|
|
ip := initCtx.NetClient.MustGetDataNetworkIP()
|
|
|
|
listenAddr := fmt.Sprintf("/ip4/%s/tcp/0", ip)
|
2020-09-11 21:36:38 +00:00
|
|
|
bwcounter := metrics.NewBandwidthCounter()
|
2020-09-10 23:22:11 +00:00
|
|
|
host, err := libp2p.New(ctx,
|
|
|
|
security,
|
|
|
|
libp2p.ListenAddrStrings(listenAddr),
|
2020-09-11 21:36:38 +00:00
|
|
|
libp2p.BandwidthReporter(bwcounter),
|
2020-09-10 23:22:11 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("failed to instantiate libp2p instance: %s", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Record our listen addrs.
|
|
|
|
runenv.RecordMessage("my listen addrs: %v", host.Addrs())
|
|
|
|
|
|
|
|
// Obtain our own address info, and use the sync service to publish it to a
|
|
|
|
// 'peersTopic' topic, where others will read from.
|
|
|
|
var (
|
|
|
|
id = host.ID()
|
|
|
|
ai = &peer.AddrInfo{ID: id, Addrs: host.Addrs()}
|
|
|
|
|
|
|
|
// the peers topic where all instances will advertise their AddrInfo.
|
|
|
|
peersTopic = sync.NewTopic("peers", new(peer.AddrInfo))
|
|
|
|
|
|
|
|
// initialize a slice to store the AddrInfos of all other peers in the run.
|
|
|
|
peers = make([]*peer.AddrInfo, 0, runenv.TestInstanceCount-1)
|
|
|
|
)
|
|
|
|
|
|
|
|
// Publish our own.
|
|
|
|
initCtx.SyncClient.MustPublish(ctx, peersTopic, ai)
|
|
|
|
|
|
|
|
// Now subscribe to the peers topic and consume all addresses, storing them
|
|
|
|
// in the peers slice.
|
|
|
|
peersCh := make(chan *peer.AddrInfo)
|
|
|
|
sctx, scancel := context.WithCancel(ctx)
|
|
|
|
defer scancel()
|
|
|
|
|
|
|
|
sub := initCtx.SyncClient.MustSubscribe(sctx, peersTopic, peersCh)
|
|
|
|
|
|
|
|
// Receive the expected number of AddrInfos.
|
|
|
|
for len(peers) < cap(peers) {
|
|
|
|
select {
|
|
|
|
case ai := <-peersCh:
|
|
|
|
if ai.ID == id {
|
|
|
|
continue // skip over ourselves.
|
|
|
|
}
|
|
|
|
peers = append(peers, ai)
|
|
|
|
case err := <-sub.Done():
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-11 21:36:38 +00:00
|
|
|
return host, peers, bwcounter
|
2020-09-10 23:22:11 +00:00
|
|
|
}
|