2018-03-07 21:29:21 +00:00
|
|
|
// Copyright 2017 The go-ethereum Authors
|
2018-01-29 19:44:18 +00:00
|
|
|
// This file is part of go-ethereum.
|
|
|
|
//
|
|
|
|
// go-ethereum is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// go-ethereum is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2018-09-05 15:36:14 +00:00
|
|
|
"context"
|
|
|
|
"crypto/ecdsa"
|
2018-11-21 15:30:00 +00:00
|
|
|
"flag"
|
2018-01-29 19:44:18 +00:00
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"net"
|
|
|
|
"os"
|
2018-09-05 15:36:14 +00:00
|
|
|
"path"
|
2018-01-29 19:44:18 +00:00
|
|
|
"path/filepath"
|
|
|
|
"runtime"
|
2018-09-05 15:36:14 +00:00
|
|
|
"sync"
|
|
|
|
"syscall"
|
2018-01-29 19:44:18 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/docker/docker/pkg/reexec"
|
2018-03-07 21:29:21 +00:00
|
|
|
"github.com/ethereum/go-ethereum/accounts"
|
2018-01-29 19:44:18 +00:00
|
|
|
"github.com/ethereum/go-ethereum/accounts/keystore"
|
|
|
|
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
|
|
|
"github.com/ethereum/go-ethereum/node"
|
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
|
|
|
"github.com/ethereum/go-ethereum/swarm"
|
2018-11-21 15:30:00 +00:00
|
|
|
"github.com/ethereum/go-ethereum/swarm/api"
|
|
|
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
2018-01-29 19:44:18 +00:00
|
|
|
)
|
|
|
|
|
2018-11-21 15:30:00 +00:00
|
|
|
var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
|
|
|
|
2018-01-29 19:44:18 +00:00
|
|
|
func init() {
|
|
|
|
// Run the app if we've been exec'd as "swarm-test" in runSwarm.
|
|
|
|
reexec.Register("swarm-test", func() {
|
|
|
|
if err := app.Run(os.Args); err != nil {
|
|
|
|
fmt.Fprintln(os.Stderr, err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
os.Exit(0)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-11-21 15:30:00 +00:00
|
|
|
func serverFunc(api *api.API) swarmhttp.TestServer {
|
|
|
|
return swarmhttp.NewServer(api, "")
|
|
|
|
}
|
2018-01-29 19:44:18 +00:00
|
|
|
func TestMain(m *testing.M) {
|
|
|
|
// check if we have been reexec'd
|
|
|
|
if reexec.Init() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
os.Exit(m.Run())
|
|
|
|
}
|
|
|
|
|
|
|
|
func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd {
|
|
|
|
tt := cmdtest.NewTestCmd(t, nil)
|
|
|
|
|
|
|
|
// Boot "swarm". This actually runs the test binary but the TestMain
|
|
|
|
// function will prevent any tests from running.
|
|
|
|
tt.Run("swarm-test", args...)
|
|
|
|
|
|
|
|
return tt
|
|
|
|
}
|
|
|
|
|
|
|
|
type testCluster struct {
|
|
|
|
Nodes []*testNode
|
|
|
|
TmpDir string
|
|
|
|
}
|
|
|
|
|
|
|
|
// newTestCluster starts a test swarm cluster of the given size.
|
|
|
|
//
|
|
|
|
// A temporary directory is created and each node gets a data directory inside
|
|
|
|
// it.
|
|
|
|
//
|
|
|
|
// Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
|
|
|
|
// ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
|
|
|
|
// as flags).
|
|
|
|
//
|
|
|
|
// When starting more than one node, they are connected together using the
|
|
|
|
// admin SetPeer RPC method.
|
2018-09-05 15:36:14 +00:00
|
|
|
|
2018-01-29 19:44:18 +00:00
|
|
|
func newTestCluster(t *testing.T, size int) *testCluster {
|
|
|
|
cluster := &testCluster{}
|
|
|
|
defer func() {
|
|
|
|
if t.Failed() {
|
|
|
|
cluster.Shutdown()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "swarm-test")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
cluster.TmpDir = tmpdir
|
|
|
|
|
|
|
|
// start the nodes
|
2018-09-05 15:36:14 +00:00
|
|
|
cluster.StartNewNodes(t, size)
|
2018-01-29 19:44:18 +00:00
|
|
|
|
|
|
|
if size == 1 {
|
|
|
|
return cluster
|
|
|
|
}
|
|
|
|
|
|
|
|
// connect the nodes together
|
|
|
|
for _, node := range cluster.Nodes {
|
|
|
|
if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait until all nodes have the correct number of peers
|
|
|
|
outer:
|
|
|
|
for _, node := range cluster.Nodes {
|
|
|
|
var peers []*p2p.PeerInfo
|
|
|
|
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) {
|
|
|
|
if err := node.Client.Call(&peers, "admin_peers"); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if len(peers) == len(cluster.Nodes)-1 {
|
|
|
|
continue outer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1)
|
|
|
|
}
|
|
|
|
|
|
|
|
return cluster
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *testCluster) Shutdown() {
|
|
|
|
for _, node := range c.Nodes {
|
|
|
|
node.Shutdown()
|
|
|
|
}
|
|
|
|
os.RemoveAll(c.TmpDir)
|
|
|
|
}
|
|
|
|
|
2018-09-05 15:36:14 +00:00
|
|
|
func (c *testCluster) Stop() {
|
|
|
|
for _, node := range c.Nodes {
|
|
|
|
node.Shutdown()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *testCluster) StartNewNodes(t *testing.T, size int) {
|
|
|
|
c.Nodes = make([]*testNode, 0, size)
|
|
|
|
for i := 0; i < size; i++ {
|
|
|
|
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
|
|
|
|
if err := os.Mkdir(dir, 0700); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
node := newTestNode(t, dir)
|
|
|
|
node.Name = fmt.Sprintf("swarm%02d", i)
|
|
|
|
|
|
|
|
c.Nodes = append(c.Nodes, node)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
|
|
|
|
c.Nodes = make([]*testNode, 0, size)
|
|
|
|
for i := 0; i < size; i++ {
|
|
|
|
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
|
|
|
|
node := existingTestNode(t, dir, bzzaccount)
|
|
|
|
node.Name = fmt.Sprintf("swarm%02d", i)
|
|
|
|
|
|
|
|
c.Nodes = append(c.Nodes, node)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *testCluster) Cleanup() {
|
|
|
|
os.RemoveAll(c.TmpDir)
|
|
|
|
}
|
|
|
|
|
2018-01-29 19:44:18 +00:00
|
|
|
type testNode struct {
|
2018-09-05 15:36:14 +00:00
|
|
|
Name string
|
|
|
|
Addr string
|
|
|
|
URL string
|
|
|
|
Enode string
|
|
|
|
Dir string
|
|
|
|
IpcPath string
|
|
|
|
PrivateKey *ecdsa.PrivateKey
|
|
|
|
Client *rpc.Client
|
|
|
|
Cmd *cmdtest.TestCmd
|
2018-01-29 19:44:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const testPassphrase = "swarm-test-passphrase"
|
|
|
|
|
2018-03-07 21:29:21 +00:00
|
|
|
func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) {
|
2018-01-29 19:44:18 +00:00
|
|
|
// create key
|
2018-03-07 21:29:21 +00:00
|
|
|
conf = &node.Config{
|
2018-01-29 19:44:18 +00:00
|
|
|
DataDir: dir,
|
|
|
|
IPCPath: "bzzd.ipc",
|
|
|
|
NoUSB: true,
|
|
|
|
}
|
|
|
|
n, err := node.New(conf)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-03-07 21:29:21 +00:00
|
|
|
account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
|
2018-01-29 19:44:18 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// use a unique IPCPath when running tests on Windows
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
|
|
|
|
}
|
|
|
|
|
2018-03-07 21:29:21 +00:00
|
|
|
return conf, account
|
|
|
|
}
|
|
|
|
|
2018-09-05 15:36:14 +00:00
|
|
|
func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
|
|
|
conf, _ := getTestAccount(t, dir)
|
2018-03-07 21:29:21 +00:00
|
|
|
node := &testNode{Dir: dir}
|
|
|
|
|
2018-09-05 15:36:14 +00:00
|
|
|
// use a unique IPCPath when running tests on Windows
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
|
|
|
|
}
|
|
|
|
|
2018-01-29 19:44:18 +00:00
|
|
|
// assign ports
|
2018-09-05 15:36:14 +00:00
|
|
|
ports, err := getAvailableTCPPorts(2)
|
2018-01-29 19:44:18 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-09-05 15:36:14 +00:00
|
|
|
p2pPort := ports[0]
|
|
|
|
httpPort := ports[1]
|
|
|
|
|
|
|
|
// start the node
|
|
|
|
node.Cmd = runSwarm(t,
|
|
|
|
"--port", p2pPort,
|
2018-11-21 15:30:00 +00:00
|
|
|
"--nat", "extip:127.0.0.1",
|
2018-09-05 15:36:14 +00:00
|
|
|
"--nodiscover",
|
|
|
|
"--datadir", dir,
|
|
|
|
"--ipcpath", conf.IPCPath,
|
|
|
|
"--ens-api", "",
|
|
|
|
"--bzzaccount", bzzaccount,
|
|
|
|
"--bzznetworkid", "321",
|
|
|
|
"--bzzport", httpPort,
|
2018-11-21 15:30:00 +00:00
|
|
|
"--verbosity", fmt.Sprint(*loglevel),
|
2018-09-05 15:36:14 +00:00
|
|
|
)
|
|
|
|
node.Cmd.InputLine(testPassphrase)
|
|
|
|
defer func() {
|
|
|
|
if t.Failed() {
|
|
|
|
node.Shutdown()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
// ensure that all ports have active listeners
|
|
|
|
// so that the next node will not get the same
|
|
|
|
// when calling getAvailableTCPPorts
|
|
|
|
err = waitTCPPorts(ctx, ports...)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait for the node to start
|
|
|
|
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
|
|
|
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if node.Client == nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// load info
|
|
|
|
var info swarm.Info
|
|
|
|
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
|
|
|
|
node.URL = "http://" + node.Addr
|
|
|
|
|
|
|
|
var nodeInfo p2p.NodeInfo
|
|
|
|
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-11-21 15:30:00 +00:00
|
|
|
node.Enode = nodeInfo.Enode
|
|
|
|
node.IpcPath = conf.IPCPath
|
2018-09-05 15:36:14 +00:00
|
|
|
return node
|
|
|
|
}
|
|
|
|
|
|
|
|
func newTestNode(t *testing.T, dir string) *testNode {
|
|
|
|
|
|
|
|
conf, account := getTestAccount(t, dir)
|
|
|
|
ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1)
|
|
|
|
|
|
|
|
pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase})
|
|
|
|
|
|
|
|
node := &testNode{Dir: dir, PrivateKey: pk}
|
|
|
|
|
|
|
|
// assign ports
|
|
|
|
ports, err := getAvailableTCPPorts(2)
|
2018-01-29 19:44:18 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-09-05 15:36:14 +00:00
|
|
|
p2pPort := ports[0]
|
|
|
|
httpPort := ports[1]
|
2018-01-29 19:44:18 +00:00
|
|
|
|
|
|
|
// start the node
|
|
|
|
node.Cmd = runSwarm(t,
|
|
|
|
"--port", p2pPort,
|
2018-11-21 15:30:00 +00:00
|
|
|
"--nat", "extip:127.0.0.1",
|
2018-01-29 19:44:18 +00:00
|
|
|
"--nodiscover",
|
|
|
|
"--datadir", dir,
|
|
|
|
"--ipcpath", conf.IPCPath,
|
|
|
|
"--ens-api", "",
|
|
|
|
"--bzzaccount", account.Address.String(),
|
|
|
|
"--bzznetworkid", "321",
|
|
|
|
"--bzzport", httpPort,
|
2018-11-21 15:30:00 +00:00
|
|
|
"--verbosity", fmt.Sprint(*loglevel),
|
2018-01-29 19:44:18 +00:00
|
|
|
)
|
|
|
|
node.Cmd.InputLine(testPassphrase)
|
|
|
|
defer func() {
|
|
|
|
if t.Failed() {
|
|
|
|
node.Shutdown()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-09-05 15:36:14 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
// ensure that all ports have active listeners
|
|
|
|
// so that the next node will not get the same
|
|
|
|
// when calling getAvailableTCPPorts
|
|
|
|
err = waitTCPPorts(ctx, ports...)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-01-29 19:44:18 +00:00
|
|
|
// wait for the node to start
|
|
|
|
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
|
|
|
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if node.Client == nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// load info
|
|
|
|
var info swarm.Info
|
|
|
|
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
|
|
|
|
node.URL = "http://" + node.Addr
|
|
|
|
|
|
|
|
var nodeInfo p2p.NodeInfo
|
|
|
|
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-11-21 15:30:00 +00:00
|
|
|
node.Enode = nodeInfo.Enode
|
2018-09-05 15:36:14 +00:00
|
|
|
node.IpcPath = conf.IPCPath
|
2018-01-29 19:44:18 +00:00
|
|
|
return node
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *testNode) Shutdown() {
|
|
|
|
if n.Cmd != nil {
|
|
|
|
n.Cmd.Kill()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-05 15:36:14 +00:00
|
|
|
// getAvailableTCPPorts returns a set of ports that
|
|
|
|
// nothing is listening on at the time.
|
|
|
|
//
|
|
|
|
// Function assignTCPPort cannot be called in sequence
|
|
|
|
// and guardantee that the same port will be returned in
|
|
|
|
// different calls as the listener is closed within the function,
|
|
|
|
// not after all listeners are started and selected unique
|
|
|
|
// available ports.
|
|
|
|
func getAvailableTCPPorts(count int) (ports []string, err error) {
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
l, err := net.Listen("tcp", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// defer close in the loop to be sure the same port will not
|
|
|
|
// be selected in the next iteration
|
|
|
|
defer l.Close()
|
|
|
|
|
|
|
|
_, port, err := net.SplitHostPort(l.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
ports = append(ports, port)
|
2018-01-29 19:44:18 +00:00
|
|
|
}
|
2018-09-05 15:36:14 +00:00
|
|
|
return ports, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitTCPPorts blocks until tcp connections can be
|
|
|
|
// established on all provided ports. It runs all
|
|
|
|
// ports dialers in parallel, and returns the first
|
|
|
|
// encountered error.
|
|
|
|
// See waitTCPPort also.
|
|
|
|
func waitTCPPorts(ctx context.Context, ports ...string) error {
|
|
|
|
var err error
|
|
|
|
// mu locks err variable that is assigned in
|
|
|
|
// other goroutines
|
|
|
|
var mu sync.Mutex
|
|
|
|
|
|
|
|
// cancel is canceling all goroutines
|
|
|
|
// when the firs error is returned
|
|
|
|
// to prevent unnecessary waiting
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, port := range ports {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(port string) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
e := waitTCPPort(ctx, port)
|
|
|
|
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
if e != nil && err == nil {
|
|
|
|
err = e
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
}(port)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitTCPPort blocks until tcp connection can be established
|
|
|
|
// ona provided port. It has a 3 minute timeout as maximum,
|
|
|
|
// to prevent long waiting, but it can be shortened with
|
|
|
|
// a provided context instance. Dialer has a 10 second timeout
|
|
|
|
// in every iteration, and connection refused error will be
|
|
|
|
// retried in 100 milliseconds periods.
|
|
|
|
func waitTCPPort(ctx context.Context, port string) error {
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port)
|
|
|
|
if err != nil {
|
|
|
|
if operr, ok := err.(*net.OpError); ok {
|
|
|
|
if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return c.Close()
|
2018-01-29 19:44:18 +00:00
|
|
|
}
|
|
|
|
}
|