cmd/swarm, swarm/api/http, swarm/bmt, swarm/fuse, swarm/network/stream, swarm/storage, swarm/storage/encryption, swarm/testutil: use pseudo-random instead of crypto-random for test files content generation (#18083)
- Replace "crypto/rand" to "math/rand" for files content generation - Remove swarm/network_test.go.Shuffle and swarm/btm/btm_test.go.Shuffle - because go1.9 support dropped (see https://github.com/ethereum/go-ethereum/pull/17807 and comments to swarm/network_test.go.Shuffle)
This commit is contained in:
parent
cff97119a7
commit
eb8fa3cc89
@ -38,6 +38,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -54,7 +55,7 @@ var DefaultCurve = crypto.S256()
|
|||||||
// is then fetched through 2nd node. since the tested code is not key-aware - we can just
|
// is then fetched through 2nd node. since the tested code is not key-aware - we can just
|
||||||
// fetch from the 2nd node using HTTP BasicAuth
|
// fetch from the 2nd node using HTTP BasicAuth
|
||||||
func TestAccessPassword(t *testing.T) {
|
func TestAccessPassword(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
dataFilename := testutil.TempFileWithContent(t, data)
|
dataFilename := testutil.TempFileWithContent(t, data)
|
||||||
|
@ -19,9 +19,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/rand"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -29,6 +27,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm"
|
"github.com/ethereum/go-ethereum/swarm"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestCLISwarmExportImport perform the following test:
|
// TestCLISwarmExportImport perform the following test:
|
||||||
@ -45,11 +44,12 @@ func TestCLISwarmExportImport(t *testing.T) {
|
|||||||
cluster := newTestCluster(t, 1)
|
cluster := newTestCluster(t, 1)
|
||||||
|
|
||||||
// generate random 10mb file
|
// generate random 10mb file
|
||||||
f, cleanup := generateRandomFile(t, 10000000)
|
content := testutil.RandomBytes(1, 10000000)
|
||||||
defer cleanup()
|
fileName := testutil.TempFileWithContent(t, string(content))
|
||||||
|
defer os.Remove(fileName)
|
||||||
|
|
||||||
// upload the file with 'swarm up' and expect a hash
|
// upload the file with 'swarm up' and expect a hash
|
||||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
|
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", fileName)
|
||||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
|
||||||
up.ExpectExit()
|
up.ExpectExit()
|
||||||
hash := matches[0]
|
hash := matches[0]
|
||||||
@ -96,7 +96,7 @@ func TestCLISwarmExportImport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// compare downloaded file with the generated random file
|
// compare downloaded file with the generated random file
|
||||||
mustEqualFiles(t, f, res.Body)
|
mustEqualFiles(t, bytes.NewReader(content), res.Body)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||||
@ -117,27 +117,3 @@ func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
|||||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
|
|
||||||
// create a tmp file
|
|
||||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// callback for tmp file cleanup
|
|
||||||
teardown = func() {
|
|
||||||
tmp.Close()
|
|
||||||
os.Remove(tmp.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// write 10mb random data to file
|
|
||||||
buf := make([]byte, 10000000)
|
|
||||||
_, err = rand.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
|
||||||
|
|
||||||
return tmp, teardown
|
|
||||||
}
|
|
||||||
|
@ -20,49 +20,37 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCLIFeedUpdate(t *testing.T) {
|
func TestCLIFeedUpdate(t *testing.T) {
|
||||||
|
|
||||||
srv := testutil.NewTestSwarmServer(t, func(api *api.API) testutil.TestServer {
|
srv := swarmhttp.NewTestSwarmServer(t, func(api *api.API) swarmhttp.TestServer {
|
||||||
return swarmhttp.NewServer(api, "")
|
return swarmhttp.NewServer(api, "")
|
||||||
}, nil)
|
}, nil)
|
||||||
log.Info("starting a test swarm server")
|
log.Info("starting a test swarm server")
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
// create a private key file for signing
|
// create a private key file for signing
|
||||||
pkfile, err := ioutil.TempFile("", "swarm-test")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer pkfile.Close()
|
|
||||||
defer os.Remove(pkfile.Name())
|
|
||||||
|
|
||||||
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001979"
|
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001979"
|
||||||
privKey, _ := crypto.HexToECDSA(privkeyHex)
|
privKey, _ := crypto.HexToECDSA(privkeyHex)
|
||||||
address := crypto.PubkeyToAddress(privKey.PublicKey)
|
address := crypto.PubkeyToAddress(privKey.PublicKey)
|
||||||
|
|
||||||
// save the private key to a file
|
pkFileName := testutil.TempFileWithContent(t, privkeyHex)
|
||||||
_, err = io.WriteString(pkfile, privkeyHex)
|
defer os.Remove(pkFileName)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// compose a topic. We'll be doing quotes about Miguel de Cervantes
|
// compose a topic. We'll be doing quotes about Miguel de Cervantes
|
||||||
var topic feed.Topic
|
var topic feed.Topic
|
||||||
@ -76,7 +64,7 @@ func TestCLIFeedUpdate(t *testing.T) {
|
|||||||
|
|
||||||
flags := []string{
|
flags := []string{
|
||||||
"--bzzapi", srv.URL,
|
"--bzzapi", srv.URL,
|
||||||
"--bzzaccount", pkfile.Name(),
|
"--bzzaccount", pkFileName,
|
||||||
"feed", "update",
|
"feed", "update",
|
||||||
"--topic", topic.Hex(),
|
"--topic", topic.Hex(),
|
||||||
"--name", name,
|
"--name", name,
|
||||||
@ -89,13 +77,10 @@ func TestCLIFeedUpdate(t *testing.T) {
|
|||||||
|
|
||||||
// now try to get the update using the client
|
// now try to get the update using the client
|
||||||
client := swarm.NewClient(srv.URL)
|
client := swarm.NewClient(srv.URL)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// build the same topic as before, this time
|
// build the same topic as before, this time
|
||||||
// we use NewTopic to create a topic automatically.
|
// we use NewTopic to create a topic automatically.
|
||||||
topic, err = feed.NewTopic(name, subject)
|
topic, err := feed.NewTopic(name, subject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -153,7 +138,7 @@ func TestCLIFeedUpdate(t *testing.T) {
|
|||||||
// test publishing a manifest
|
// test publishing a manifest
|
||||||
flags = []string{
|
flags = []string{
|
||||||
"--bzzapi", srv.URL,
|
"--bzzapi", srv.URL,
|
||||||
"--bzzaccount", pkfile.Name(),
|
"--bzzaccount", pkFileName,
|
||||||
"feed", "create",
|
"feed", "create",
|
||||||
"--topic", topic.Hex(),
|
"--topic", topic.Hex(),
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestManifestChange tests manifest add, update and remove
|
// TestManifestChange tests manifest add, update and remove
|
||||||
@ -58,7 +58,7 @@ func TestManifestChangeEncrypted(t *testing.T) {
|
|||||||
// Argument encrypt controls whether to use encryption or not.
|
// Argument encrypt controls whether to use encryption or not.
|
||||||
func testManifestChange(t *testing.T, encrypt bool) {
|
func testManifestChange(t *testing.T, encrypt bool) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
|
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
|
||||||
@ -430,7 +430,7 @@ func TestNestedDefaultEntryUpdateEncrypted(t *testing.T) {
|
|||||||
|
|
||||||
func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) {
|
func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
|
tmp, err := ioutil.TempDir("", "swarm-manifest-test")
|
||||||
|
@ -42,7 +42,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm"
|
"github.com/ethereum/go-ethereum/swarm"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
||||||
@ -58,7 +57,7 @@ func init() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func serverFunc(api *api.API) testutil.TestServer {
|
func serverFunc(api *api.API) swarmhttp.TestServer {
|
||||||
return swarmhttp.NewServer(api, "")
|
return swarmhttp.NewServer(api, "")
|
||||||
}
|
}
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
"github.com/mattn/go-colorable"
|
"github.com/mattn/go-colorable"
|
||||||
)
|
)
|
||||||
@ -77,33 +78,22 @@ func testCLISwarmUp(toEncrypt bool, t *testing.T) {
|
|||||||
cluster := newTestCluster(t, 3)
|
cluster := newTestCluster(t, 3)
|
||||||
defer cluster.Shutdown()
|
defer cluster.Shutdown()
|
||||||
|
|
||||||
// create a tmp file
|
tmpFileName := testutil.TempFileWithContent(t, data)
|
||||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
defer os.Remove(tmpFileName)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer tmp.Close()
|
|
||||||
defer os.Remove(tmp.Name())
|
|
||||||
|
|
||||||
// write data to file
|
// write data to file
|
||||||
data := "notsorandomdata"
|
|
||||||
_, err = io.WriteString(tmp, data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hashRegexp := `[a-f\d]{64}`
|
hashRegexp := `[a-f\d]{64}`
|
||||||
flags := []string{
|
flags := []string{
|
||||||
"--bzzapi", cluster.Nodes[0].URL,
|
"--bzzapi", cluster.Nodes[0].URL,
|
||||||
"up",
|
"up",
|
||||||
tmp.Name()}
|
tmpFileName}
|
||||||
if toEncrypt {
|
if toEncrypt {
|
||||||
hashRegexp = `[a-f\d]{128}`
|
hashRegexp = `[a-f\d]{128}`
|
||||||
flags = []string{
|
flags = []string{
|
||||||
"--bzzapi", cluster.Nodes[0].URL,
|
"--bzzapi", cluster.Nodes[0].URL,
|
||||||
"up",
|
"up",
|
||||||
"--encrypt",
|
"--encrypt",
|
||||||
tmp.Name()}
|
tmpFileName}
|
||||||
}
|
}
|
||||||
// upload the file with 'swarm up' and expect a hash
|
// upload the file with 'swarm up' and expect a hash
|
||||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
|
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
|
||||||
@ -203,7 +193,6 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpUploadDir)
|
defer os.RemoveAll(tmpUploadDir)
|
||||||
// create tmp files
|
// create tmp files
|
||||||
data := "notsorandomdata"
|
|
||||||
for _, path := range []string{"tmp1", "tmp2"} {
|
for _, path := range []string{"tmp1", "tmp2"} {
|
||||||
if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
|
if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -298,7 +287,7 @@ func TestCLISwarmUpDefaultPath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
|
func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
tmp, err := ioutil.TempDir("", "swarm-defaultpath-test")
|
tmp, err := ioutil.TempDir("", "swarm-defaultpath-test")
|
||||||
|
@ -33,10 +33,9 @@ import (
|
|||||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
"github.com/ethereum/go-ethereum/swarm/multihash"
|
"github.com/ethereum/go-ethereum/swarm/multihash"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func serverFunc(api *api.API) testutil.TestServer {
|
func serverFunc(api *api.API) swarmhttp.TestServer {
|
||||||
return swarmhttp.NewServer(api, "")
|
return swarmhttp.NewServer(api, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,7 +48,7 @@ func TestClientUploadDownloadRawEncrypted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) {
|
func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
client := NewClient(srv.URL)
|
client := NewClient(srv.URL)
|
||||||
@ -90,7 +89,7 @@ func TestClientUploadDownloadFilesEncrypted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testClientUploadDownloadFiles(toEncrypt bool, t *testing.T) {
|
func testClientUploadDownloadFiles(toEncrypt bool, t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
client := NewClient(srv.URL)
|
client := NewClient(srv.URL)
|
||||||
@ -188,7 +187,7 @@ func newTestDirectory(t *testing.T) string {
|
|||||||
// TestClientUploadDownloadDirectory tests uploading and downloading a
|
// TestClientUploadDownloadDirectory tests uploading and downloading a
|
||||||
// directory of files to a swarm manifest
|
// directory of files to a swarm manifest
|
||||||
func TestClientUploadDownloadDirectory(t *testing.T) {
|
func TestClientUploadDownloadDirectory(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
dir := newTestDirectory(t)
|
dir := newTestDirectory(t)
|
||||||
@ -254,7 +253,7 @@ func TestClientFileListEncrypted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testClientFileList(toEncrypt bool, t *testing.T) {
|
func testClientFileList(toEncrypt bool, t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
dir := newTestDirectory(t)
|
dir := newTestDirectory(t)
|
||||||
@ -312,7 +311,7 @@ func testClientFileList(toEncrypt bool, t *testing.T) {
|
|||||||
// TestClientMultipartUpload tests uploading files to swarm using a multipart
|
// TestClientMultipartUpload tests uploading files to swarm using a multipart
|
||||||
// upload
|
// upload
|
||||||
func TestClientMultipartUpload(t *testing.T) {
|
func TestClientMultipartUpload(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
// define an uploader which uploads testDirFiles with some data
|
// define an uploader which uploads testDirFiles with some data
|
||||||
@ -378,7 +377,7 @@ func TestClientCreateFeedMultihash(t *testing.T) {
|
|||||||
|
|
||||||
signer, _ := newTestSigner()
|
signer, _ := newTestSigner()
|
||||||
|
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
client := NewClient(srv.URL)
|
client := NewClient(srv.URL)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
@ -440,7 +439,7 @@ func TestClientCreateUpdateFeed(t *testing.T) {
|
|||||||
|
|
||||||
signer, _ := newTestSigner()
|
signer, _ := newTestSigner()
|
||||||
|
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||||
client := NewClient(srv.URL)
|
client := NewClient(srv.URL)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
|
@ -24,12 +24,10 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"golang.org/x/net/html"
|
"golang.org/x/net/html"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestError(t *testing.T) {
|
func TestError(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@ -55,7 +53,7 @@ func TestError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Test404Page(t *testing.T) {
|
func Test404Page(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@ -81,7 +79,7 @@ func Test404Page(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Test500Page(t *testing.T) {
|
func Test500Page(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@ -106,7 +104,7 @@ func Test500Page(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
func Test500PageWith0xHashPrefix(t *testing.T) {
|
func Test500PageWith0xHashPrefix(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@ -136,7 +134,7 @@ func Test500PageWith0xHashPrefix(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestJsonResponse(t *testing.T) {
|
func TestJsonResponse(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
@ -58,7 +57,7 @@ func init() {
|
|||||||
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
|
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
|
||||||
}
|
}
|
||||||
|
|
||||||
func serverFunc(api *api.API) testutil.TestServer {
|
func serverFunc(api *api.API) TestServer {
|
||||||
return NewServer(api, "")
|
return NewServer(api, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +78,7 @@ func TestBzzFeedMultihash(t *testing.T) {
|
|||||||
|
|
||||||
signer, _ := newTestSigner()
|
signer, _ := newTestSigner()
|
||||||
|
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
// add the data our multihash aliased manifest will point to
|
// add the data our multihash aliased manifest will point to
|
||||||
@ -167,26 +166,19 @@ func TestBzzFeedMultihash(t *testing.T) {
|
|||||||
|
|
||||||
// Test Swarm feeds using the raw update methods
|
// Test Swarm feeds using the raw update methods
|
||||||
func TestBzzFeed(t *testing.T) {
|
func TestBzzFeed(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
signer, _ := newTestSigner()
|
signer, _ := newTestSigner()
|
||||||
|
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
// data of update 1
|
// data of update 1
|
||||||
update1Data := make([]byte, 666)
|
update1Data := testutil.RandomBytes(1, 666)
|
||||||
update1Timestamp := srv.CurrentTime
|
update1Timestamp := srv.CurrentTime
|
||||||
_, err := rand.Read(update1Data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
//data for update 2
|
//data for update 2
|
||||||
update2Data := []byte("foo")
|
update2Data := []byte("foo")
|
||||||
|
|
||||||
topic, _ := feed.NewTopic("foo.eth", nil)
|
topic, _ := feed.NewTopic("foo.eth", nil)
|
||||||
updateRequest := feed.NewFirstRequest(topic)
|
updateRequest := feed.NewFirstRequest(topic)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
updateRequest.SetData(update1Data)
|
updateRequest.SetData(update1Data)
|
||||||
|
|
||||||
if err := updateRequest.Sign(signer); err != nil {
|
if err := updateRequest.Sign(signer); err != nil {
|
||||||
@ -450,7 +442,7 @@ func testBzzGetPath(encrypted bool, t *testing.T) {
|
|||||||
|
|
||||||
addr := [3]storage.Address{}
|
addr := [3]storage.Address{}
|
||||||
|
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
for i, mf := range testmanifest {
|
for i, mf := range testmanifest {
|
||||||
@ -688,7 +680,7 @@ func TestBzzTar(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testBzzTar(encrypted bool, t *testing.T) {
|
func testBzzTar(encrypted bool, t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
fileNames := []string{"tmp1.txt", "tmp2.lock", "tmp3.rtf"}
|
fileNames := []string{"tmp1.txt", "tmp2.lock", "tmp3.rtf"}
|
||||||
fileContents := []string{"tmp1textfilevalue", "tmp2lockfilelocked", "tmp3isjustaplaintextfile"}
|
fileContents := []string{"tmp1textfilevalue", "tmp2lockfilelocked", "tmp3isjustaplaintextfile"}
|
||||||
@ -823,7 +815,7 @@ func TestBzzRootRedirectEncrypted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testBzzRootRedirect(toEncrypt bool, t *testing.T) {
|
func testBzzRootRedirect(toEncrypt bool, t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
// create a manifest with some data at the root path
|
// create a manifest with some data at the root path
|
||||||
@ -878,7 +870,7 @@ func testBzzRootRedirect(toEncrypt bool, t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodsNotAllowed(t *testing.T) {
|
func TestMethodsNotAllowed(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
databytes := "bar"
|
databytes := "bar"
|
||||||
for _, c := range []struct {
|
for _, c := range []struct {
|
||||||
@ -937,7 +929,7 @@ func httpDo(httpMethod string, url string, reqBody io.Reader, headers map[string
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGet(t *testing.T) {
|
func TestGet(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
for _, testCase := range []struct {
|
for _, testCase := range []struct {
|
||||||
@ -1020,7 +1012,7 @@ func TestGet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestModify(t *testing.T) {
|
func TestModify(t *testing.T) {
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
swarmClient := swarm.NewClient(srv.URL)
|
swarmClient := swarm.NewClient(srv.URL)
|
||||||
@ -1121,7 +1113,7 @@ func TestMultiPartUpload(t *testing.T) {
|
|||||||
// POST /bzz:/ Content-Type: multipart/form-data
|
// POST /bzz:/ Content-Type: multipart/form-data
|
||||||
verbose := false
|
verbose := false
|
||||||
// Setup Swarm
|
// Setup Swarm
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/bzz:/", srv.URL)
|
url := fmt.Sprintf("%s/bzz:/", srv.URL)
|
||||||
@ -1152,7 +1144,7 @@ func TestMultiPartUpload(t *testing.T) {
|
|||||||
// TestBzzGetFileWithResolver tests fetching a file using a mocked ENS resolver
|
// TestBzzGetFileWithResolver tests fetching a file using a mocked ENS resolver
|
||||||
func TestBzzGetFileWithResolver(t *testing.T) {
|
func TestBzzGetFileWithResolver(t *testing.T) {
|
||||||
resolver := newTestResolveValidator("")
|
resolver := newTestResolveValidator("")
|
||||||
srv := testutil.NewTestSwarmServer(t, serverFunc, resolver)
|
srv := NewTestSwarmServer(t, serverFunc, resolver)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
fileNames := []string{"dir1/tmp1.txt", "dir2/tmp2.lock", "dir3/tmp3.rtf"}
|
fileNames := []string{"dir1/tmp1.txt", "dir2/tmp2.lock", "dir3/tmp3.rtf"}
|
||||||
fileContents := []string{"tmp1textfilevalue", "tmp2lockfilelocked", "tmp3isjustaplaintextfile"}
|
fileContents := []string{"tmp1textfilevalue", "tmp2lockfilelocked", "tmp3isjustaplaintextfile"}
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package testutil
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
@ -18,10 +18,8 @@ package bmt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
crand "crypto/rand"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@ -29,6 +27,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// the actual data length generated (could be longer than max datalength of the BMT)
|
// the actual data length generated (could be longer than max datalength of the BMT)
|
||||||
@ -116,14 +115,11 @@ func TestRefHasher(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// run the tests
|
// run the tests
|
||||||
for _, x := range tests {
|
for i, x := range tests {
|
||||||
for segmentCount := x.from; segmentCount <= x.to; segmentCount++ {
|
for segmentCount := x.from; segmentCount <= x.to; segmentCount++ {
|
||||||
for length := 1; length <= segmentCount*32; length++ {
|
for length := 1; length <= segmentCount*32; length++ {
|
||||||
t.Run(fmt.Sprintf("%d_segments_%d_bytes", segmentCount, length), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%d_segments_%d_bytes", segmentCount, length), func(t *testing.T) {
|
||||||
data := make([]byte, length)
|
data := testutil.RandomBytes(i, length)
|
||||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
expected := x.expected(data)
|
expected := x.expected(data)
|
||||||
actual := NewRefHasher(sha3.NewKeccak256, segmentCount).Hash(data)
|
actual := NewRefHasher(sha3.NewKeccak256, segmentCount).Hash(data)
|
||||||
if !bytes.Equal(actual, expected) {
|
if !bytes.Equal(actual, expected) {
|
||||||
@ -156,7 +152,7 @@ func TestHasherEmptyData(t *testing.T) {
|
|||||||
|
|
||||||
// tests sequential write with entire max size written in one go
|
// tests sequential write with entire max size written in one go
|
||||||
func TestSyncHasherCorrectness(t *testing.T) {
|
func TestSyncHasherCorrectness(t *testing.T) {
|
||||||
data := newData(BufferSize)
|
data := testutil.RandomBytes(1, BufferSize)
|
||||||
hasher := sha3.NewKeccak256
|
hasher := sha3.NewKeccak256
|
||||||
size := hasher().Size()
|
size := hasher().Size()
|
||||||
|
|
||||||
@ -182,7 +178,7 @@ func TestSyncHasherCorrectness(t *testing.T) {
|
|||||||
|
|
||||||
// tests order-neutral concurrent writes with entire max size written in one go
|
// tests order-neutral concurrent writes with entire max size written in one go
|
||||||
func TestAsyncCorrectness(t *testing.T) {
|
func TestAsyncCorrectness(t *testing.T) {
|
||||||
data := newData(BufferSize)
|
data := testutil.RandomBytes(1, BufferSize)
|
||||||
hasher := sha3.NewKeccak256
|
hasher := sha3.NewKeccak256
|
||||||
size := hasher().Size()
|
size := hasher().Size()
|
||||||
whs := []whenHash{first, last, random}
|
whs := []whenHash{first, last, random}
|
||||||
@ -236,7 +232,7 @@ func testHasherReuse(poolsize int, t *testing.T) {
|
|||||||
bmt := New(pool)
|
bmt := New(pool)
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
data := newData(BufferSize)
|
data := testutil.RandomBytes(1, BufferSize)
|
||||||
n := rand.Intn(bmt.Size())
|
n := rand.Intn(bmt.Size())
|
||||||
err := testHasherCorrectness(bmt, hasher, data, n, segmentCount)
|
err := testHasherCorrectness(bmt, hasher, data, n, segmentCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -256,7 +252,7 @@ func TestBMTConcurrentUse(t *testing.T) {
|
|||||||
for i := 0; i < cycles; i++ {
|
for i := 0; i < cycles; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
bmt := New(pool)
|
bmt := New(pool)
|
||||||
data := newData(BufferSize)
|
data := testutil.RandomBytes(1, BufferSize)
|
||||||
n := rand.Intn(bmt.Size())
|
n := rand.Intn(bmt.Size())
|
||||||
errc <- testHasherCorrectness(bmt, hasher, data, n, 128)
|
errc <- testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||||
}()
|
}()
|
||||||
@ -290,7 +286,7 @@ func TestBMTWriterBuffers(t *testing.T) {
|
|||||||
defer pool.Drain(0)
|
defer pool.Drain(0)
|
||||||
n := count * 32
|
n := count * 32
|
||||||
bmt := New(pool)
|
bmt := New(pool)
|
||||||
data := newData(n)
|
data := testutil.RandomBytes(1, n)
|
||||||
rbmt := NewRefHasher(hasher, count)
|
rbmt := NewRefHasher(hasher, count)
|
||||||
refHash := rbmt.Hash(data)
|
refHash := rbmt.Hash(data)
|
||||||
expHash := syncHash(bmt, nil, data)
|
expHash := syncHash(bmt, nil, data)
|
||||||
@ -413,7 +409,7 @@ func BenchmarkPool(t *testing.B) {
|
|||||||
|
|
||||||
// benchmarks simple sha3 hash on chunks
|
// benchmarks simple sha3 hash on chunks
|
||||||
func benchmarkSHA3(t *testing.B, n int) {
|
func benchmarkSHA3(t *testing.B, n int) {
|
||||||
data := newData(n)
|
data := testutil.RandomBytes(1, n)
|
||||||
hasher := sha3.NewKeccak256
|
hasher := sha3.NewKeccak256
|
||||||
h := hasher()
|
h := hasher()
|
||||||
|
|
||||||
@ -432,7 +428,7 @@ func benchmarkSHA3(t *testing.B, n int) {
|
|||||||
func benchmarkBMTBaseline(t *testing.B, n int) {
|
func benchmarkBMTBaseline(t *testing.B, n int) {
|
||||||
hasher := sha3.NewKeccak256
|
hasher := sha3.NewKeccak256
|
||||||
hashSize := hasher().Size()
|
hashSize := hasher().Size()
|
||||||
data := newData(hashSize)
|
data := testutil.RandomBytes(1, hashSize)
|
||||||
|
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
@ -456,7 +452,7 @@ func benchmarkBMTBaseline(t *testing.B, n int) {
|
|||||||
|
|
||||||
// benchmarks BMT Hasher
|
// benchmarks BMT Hasher
|
||||||
func benchmarkBMT(t *testing.B, n int) {
|
func benchmarkBMT(t *testing.B, n int) {
|
||||||
data := newData(n)
|
data := testutil.RandomBytes(1, n)
|
||||||
hasher := sha3.NewKeccak256
|
hasher := sha3.NewKeccak256
|
||||||
pool := NewTreePool(hasher, segmentCount, PoolSize)
|
pool := NewTreePool(hasher, segmentCount, PoolSize)
|
||||||
bmt := New(pool)
|
bmt := New(pool)
|
||||||
@ -470,12 +466,12 @@ func benchmarkBMT(t *testing.B, n int) {
|
|||||||
|
|
||||||
// benchmarks BMT hasher with asynchronous concurrent segment/section writes
|
// benchmarks BMT hasher with asynchronous concurrent segment/section writes
|
||||||
func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
|
func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
|
||||||
data := newData(n)
|
data := testutil.RandomBytes(1, n)
|
||||||
hasher := sha3.NewKeccak256
|
hasher := sha3.NewKeccak256
|
||||||
pool := NewTreePool(hasher, segmentCount, PoolSize)
|
pool := NewTreePool(hasher, segmentCount, PoolSize)
|
||||||
bmt := New(pool).NewAsyncWriter(double)
|
bmt := New(pool).NewAsyncWriter(double)
|
||||||
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
|
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
|
||||||
shuffle(len(idxs), func(i int, j int) {
|
rand.Shuffle(len(idxs), func(i int, j int) {
|
||||||
idxs[i], idxs[j] = idxs[j], idxs[i]
|
idxs[i], idxs[j] = idxs[j], idxs[i]
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -488,7 +484,7 @@ func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
|
|||||||
|
|
||||||
// benchmarks 100 concurrent bmt hashes with pool capacity
|
// benchmarks 100 concurrent bmt hashes with pool capacity
|
||||||
func benchmarkPool(t *testing.B, poolsize, n int) {
|
func benchmarkPool(t *testing.B, poolsize, n int) {
|
||||||
data := newData(n)
|
data := testutil.RandomBytes(1, n)
|
||||||
hasher := sha3.NewKeccak256
|
hasher := sha3.NewKeccak256
|
||||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||||
cycles := 100
|
cycles := 100
|
||||||
@ -511,7 +507,7 @@ func benchmarkPool(t *testing.B, poolsize, n int) {
|
|||||||
|
|
||||||
// benchmarks the reference hasher
|
// benchmarks the reference hasher
|
||||||
func benchmarkRefHasher(t *testing.B, n int) {
|
func benchmarkRefHasher(t *testing.B, n int) {
|
||||||
data := newData(n)
|
data := testutil.RandomBytes(1, n)
|
||||||
hasher := sha3.NewKeccak256
|
hasher := sha3.NewKeccak256
|
||||||
rbmt := NewRefHasher(hasher, 128)
|
rbmt := NewRefHasher(hasher, 128)
|
||||||
|
|
||||||
@ -522,15 +518,6 @@ func benchmarkRefHasher(t *testing.B, n int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newData(bufferSize int) []byte {
|
|
||||||
data := make([]byte, bufferSize)
|
|
||||||
_, err := io.ReadFull(crand.Reader, data)
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash hashes the data and the span using the bmt hasher
|
// Hash hashes the data and the span using the bmt hasher
|
||||||
func syncHash(h *Hasher, span, data []byte) []byte {
|
func syncHash(h *Hasher, span, data []byte) []byte {
|
||||||
h.ResetWithLength(span)
|
h.ResetWithLength(span)
|
||||||
@ -553,7 +540,7 @@ func splitAndShuffle(secsize int, data []byte) (idxs []int, segments [][]byte) {
|
|||||||
section := data[i*secsize : end]
|
section := data[i*secsize : end]
|
||||||
segments = append(segments, section)
|
segments = append(segments, section)
|
||||||
}
|
}
|
||||||
shuffle(n, func(i int, j int) {
|
rand.Shuffle(n, func(i int, j int) {
|
||||||
idxs[i], idxs[j] = idxs[j], idxs[i]
|
idxs[i], idxs[j] = idxs[j], idxs[i]
|
||||||
})
|
})
|
||||||
return idxs, segments
|
return idxs, segments
|
||||||
@ -594,29 +581,3 @@ func asyncHash(bmt SectionWriter, span []byte, l int, wh whenHash, idxs []int, s
|
|||||||
}
|
}
|
||||||
return <-c
|
return <-c
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is also in swarm/network_test.go
|
|
||||||
// shuffle pseudo-randomizes the order of elements.
|
|
||||||
// n is the number of elements. Shuffle panics if n < 0.
|
|
||||||
// swap swaps the elements with indexes i and j.
|
|
||||||
func shuffle(n int, swap func(i, j int)) {
|
|
||||||
if n < 0 {
|
|
||||||
panic("invalid argument to Shuffle")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
|
|
||||||
// Shuffle really ought not be called with n that doesn't fit in 32 bits.
|
|
||||||
// Not only will it take a very long time, but with 2³¹! possible permutations,
|
|
||||||
// there's no way that any PRNG can have a big enough internal state to
|
|
||||||
// generate even a minuscule percentage of the possible permutations.
|
|
||||||
// Nevertheless, the right API signature accepts an int n, so handle it as best we can.
|
|
||||||
i := n - 1
|
|
||||||
for ; i > 1<<31-1-1; i-- {
|
|
||||||
j := int(rand.Int63n(int64(i + 1)))
|
|
||||||
swap(i, j)
|
|
||||||
}
|
|
||||||
for ; i > 0; i-- {
|
|
||||||
j := int(rand.Int31n(int32(i + 1)))
|
|
||||||
swap(i, j)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -20,20 +20,19 @@ package fuse
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
|
|
||||||
colorable "github.com/mattn/go-colorable"
|
colorable "github.com/mattn/go-colorable"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -229,12 +228,6 @@ func checkFile(t *testing.T, testMountDir, fname string, contents []byte) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRandomBytes(size int) []byte {
|
|
||||||
contents := make([]byte, size)
|
|
||||||
rand.Read(contents)
|
|
||||||
return contents
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDirEmpty(name string) bool {
|
func isDirEmpty(name string) bool {
|
||||||
f, err := os.Open(name)
|
f, err := os.Open(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -328,22 +321,22 @@ func (ta *testAPI) mountListAndUnmount(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testMountDir = filepath.Join(dat.testDir, "testMountDir")
|
dat.testMountDir = filepath.Join(dat.testDir, "testMountDir")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
|
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
dat.files["2.txt"] = fileInfo{0711, 333, 444, getRandomBytes(10)}
|
dat.files["2.txt"] = fileInfo{0711, 333, 444, testutil.RandomBytes(2, 10)}
|
||||||
dat.files["3.txt"] = fileInfo{0622, 333, 444, getRandomBytes(100)}
|
dat.files["3.txt"] = fileInfo{0622, 333, 444, testutil.RandomBytes(3, 100)}
|
||||||
dat.files["4.txt"] = fileInfo{0533, 333, 444, getRandomBytes(1024)}
|
dat.files["4.txt"] = fileInfo{0533, 333, 444, testutil.RandomBytes(4, 1024)}
|
||||||
dat.files["5.txt"] = fileInfo{0544, 333, 444, getRandomBytes(10)}
|
dat.files["5.txt"] = fileInfo{0544, 333, 444, testutil.RandomBytes(5, 10)}
|
||||||
dat.files["6.txt"] = fileInfo{0555, 333, 444, getRandomBytes(10)}
|
dat.files["6.txt"] = fileInfo{0555, 333, 444, testutil.RandomBytes(6, 10)}
|
||||||
dat.files["7.txt"] = fileInfo{0666, 333, 444, getRandomBytes(10)}
|
dat.files["7.txt"] = fileInfo{0666, 333, 444, testutil.RandomBytes(7, 10)}
|
||||||
dat.files["8.txt"] = fileInfo{0777, 333, 333, getRandomBytes(10)}
|
dat.files["8.txt"] = fileInfo{0777, 333, 333, testutil.RandomBytes(8, 10)}
|
||||||
dat.files["11.txt"] = fileInfo{0777, 333, 444, getRandomBytes(10)}
|
dat.files["11.txt"] = fileInfo{0777, 333, 444, testutil.RandomBytes(9, 10)}
|
||||||
dat.files["111.txt"] = fileInfo{0777, 333, 444, getRandomBytes(10)}
|
dat.files["111.txt"] = fileInfo{0777, 333, 444, testutil.RandomBytes(10, 10)}
|
||||||
dat.files["two/2.txt"] = fileInfo{0777, 333, 444, getRandomBytes(10)}
|
dat.files["two/2.txt"] = fileInfo{0777, 333, 444, testutil.RandomBytes(11, 10)}
|
||||||
dat.files["two/2/2.txt"] = fileInfo{0777, 333, 444, getRandomBytes(10)}
|
dat.files["two/2/2.txt"] = fileInfo{0777, 333, 444, testutil.RandomBytes(12, 10)}
|
||||||
dat.files["two/2./2.txt"] = fileInfo{0777, 444, 444, getRandomBytes(10)}
|
dat.files["two/2./2.txt"] = fileInfo{0777, 444, 444, testutil.RandomBytes(13, 10)}
|
||||||
dat.files["twice/2.txt"] = fileInfo{0777, 444, 333, getRandomBytes(200)}
|
dat.files["twice/2.txt"] = fileInfo{0777, 444, 333, testutil.RandomBytes(14, 200)}
|
||||||
dat.files["one/two/three/four/five/six/seven/eight/nine/10.txt"] = fileInfo{0777, 333, 444, getRandomBytes(10240)}
|
dat.files["one/two/three/four/five/six/seven/eight/nine/10.txt"] = fileInfo{0777, 333, 444, testutil.RandomBytes(15, 10240)}
|
||||||
dat.files["one/two/three/four/five/six/six"] = fileInfo{0777, 333, 444, getRandomBytes(10)}
|
dat.files["one/two/three/four/five/six/six"] = fileInfo{0777, 333, 444, testutil.RandomBytes(16, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -386,7 +379,7 @@ func (ta *testAPI) runMaxMounts(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload1")
|
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload1")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "max-mount1")
|
dat.testMountDir = filepath.Join(dat.testDir, "max-mount1")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -396,7 +389,7 @@ func (ta *testAPI) runMaxMounts(t *testing.T, toEncrypt bool) {
|
|||||||
|
|
||||||
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload2")
|
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload2")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "max-mount2")
|
dat.testMountDir = filepath.Join(dat.testDir, "max-mount2")
|
||||||
dat.files["2.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["2.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -405,7 +398,7 @@ func (ta *testAPI) runMaxMounts(t *testing.T, toEncrypt bool) {
|
|||||||
|
|
||||||
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload3")
|
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload3")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "max-mount3")
|
dat.testMountDir = filepath.Join(dat.testDir, "max-mount3")
|
||||||
dat.files["3.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["3.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -414,7 +407,7 @@ func (ta *testAPI) runMaxMounts(t *testing.T, toEncrypt bool) {
|
|||||||
|
|
||||||
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload4")
|
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload4")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "max-mount4")
|
dat.testMountDir = filepath.Join(dat.testDir, "max-mount4")
|
||||||
dat.files["4.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["4.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -423,7 +416,7 @@ func (ta *testAPI) runMaxMounts(t *testing.T, toEncrypt bool) {
|
|||||||
|
|
||||||
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload5")
|
dat.testUploadDir = filepath.Join(dat.testDir, "max-upload5")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "max-mount5")
|
dat.testMountDir = filepath.Join(dat.testDir, "max-mount5")
|
||||||
dat.files["5.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["5.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -436,7 +429,7 @@ func (ta *testAPI) runMaxMounts(t *testing.T, toEncrypt bool) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Couldn't create upload dir 6: %v", err)
|
t.Fatalf("Couldn't create upload dir 6: %v", err)
|
||||||
}
|
}
|
||||||
dat.files["6.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["6.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
testMountDir6 := filepath.Join(dat.testDir, "max-mount6")
|
testMountDir6 := filepath.Join(dat.testDir, "max-mount6")
|
||||||
err = os.MkdirAll(testMountDir6, 0777)
|
err = os.MkdirAll(testMountDir6, 0777)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -475,7 +468,7 @@ func (ta *testAPI) remount(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testMountDir = filepath.Join(dat.testDir, "remount-mount1")
|
dat.testMountDir = filepath.Join(dat.testDir, "remount-mount1")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
|
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -494,7 +487,7 @@ func (ta *testAPI) remount(t *testing.T, toEncrypt bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mount a different hash in already mounted point
|
// mount a different hash in already mounted point
|
||||||
dat.files["2.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["2.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
testUploadDir2, err3 := addDir(dat.testDir, "remount-upload2")
|
testUploadDir2, err3 := addDir(dat.testDir, "remount-upload2")
|
||||||
if err3 != nil {
|
if err3 != nil {
|
||||||
t.Fatalf("Error creating second upload dir: %v", err3)
|
t.Fatalf("Error creating second upload dir: %v", err3)
|
||||||
@ -543,7 +536,7 @@ func (ta *testAPI) unmount(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "ex-upload1")
|
dat.testUploadDir = filepath.Join(dat.testDir, "ex-upload1")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "ex-mount1")
|
dat.testMountDir = filepath.Join(dat.testDir, "ex-mount1")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -591,7 +584,7 @@ func (ta *testAPI) unmountWhenResourceBusy(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "ex-upload1")
|
dat.testUploadDir = filepath.Join(dat.testDir, "ex-upload1")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "ex-mount1")
|
dat.testMountDir = filepath.Join(dat.testDir, "ex-mount1")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -609,7 +602,7 @@ func (ta *testAPI) unmountWhenResourceBusy(t *testing.T, toEncrypt bool) {
|
|||||||
//we need to manually close the file before mount for this test
|
//we need to manually close the file before mount for this test
|
||||||
//but let's defer too in case of errors
|
//but let's defer too in case of errors
|
||||||
defer d.Close()
|
defer d.Close()
|
||||||
_, err = d.Write(getRandomBytes(10))
|
_, err = d.Write(testutil.RandomBytes(1, 10))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Couldn't write to file: %v", err)
|
t.Fatalf("Couldn't write to file: %v", err)
|
||||||
}
|
}
|
||||||
@ -667,7 +660,7 @@ func (ta *testAPI) seekInMultiChunkFile(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "seek-upload1")
|
dat.testUploadDir = filepath.Join(dat.testDir, "seek-upload1")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "seek-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "seek-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10240)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10240)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -733,9 +726,9 @@ func (ta *testAPI) createNewFile(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "create-upload1")
|
dat.testUploadDir = filepath.Join(dat.testDir, "create-upload1")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "create-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "create-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
dat.files["five.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["five.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(2, 10)}
|
||||||
dat.files["six.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["six.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(3, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -751,11 +744,7 @@ func (ta *testAPI) createNewFile(t *testing.T, toEncrypt bool) {
|
|||||||
}
|
}
|
||||||
defer d.Close()
|
defer d.Close()
|
||||||
log.Debug("Opened file")
|
log.Debug("Opened file")
|
||||||
contents := make([]byte, 11)
|
contents := testutil.RandomBytes(1, 11)
|
||||||
_, err = rand.Read(contents)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Could not rand read contents %v", err)
|
|
||||||
}
|
|
||||||
log.Debug("content read")
|
log.Debug("content read")
|
||||||
_, err = d.Write(contents)
|
_, err = d.Write(contents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -815,7 +804,7 @@ func (ta *testAPI) createNewFileInsideDirectory(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "createinsidedir-upload")
|
dat.testUploadDir = filepath.Join(dat.testDir, "createinsidedir-upload")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "createinsidedir-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "createinsidedir-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["one/1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["one/1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -832,11 +821,7 @@ func (ta *testAPI) createNewFileInsideDirectory(t *testing.T, toEncrypt bool) {
|
|||||||
}
|
}
|
||||||
defer d.Close()
|
defer d.Close()
|
||||||
log.Debug("File opened")
|
log.Debug("File opened")
|
||||||
contents := make([]byte, 11)
|
contents := testutil.RandomBytes(1, 11)
|
||||||
_, err = rand.Read(contents)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error filling random bytes into byte array %v", err)
|
|
||||||
}
|
|
||||||
log.Debug("Content read")
|
log.Debug("Content read")
|
||||||
_, err = d.Write(contents)
|
_, err = d.Write(contents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -896,7 +881,7 @@ func (ta *testAPI) createNewFileInsideNewDirectory(t *testing.T, toEncrypt bool)
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "createinsidenewdir-upload")
|
dat.testUploadDir = filepath.Join(dat.testDir, "createinsidenewdir-upload")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "createinsidenewdir-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "createinsidenewdir-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -916,11 +901,7 @@ func (ta *testAPI) createNewFileInsideNewDirectory(t *testing.T, toEncrypt bool)
|
|||||||
}
|
}
|
||||||
defer d.Close()
|
defer d.Close()
|
||||||
log.Debug("File opened")
|
log.Debug("File opened")
|
||||||
contents := make([]byte, 11)
|
contents := testutil.RandomBytes(1, 11)
|
||||||
_, err = rand.Read(contents)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error writing random bytes to byte array: %v", err)
|
|
||||||
}
|
|
||||||
log.Debug("content read")
|
log.Debug("content read")
|
||||||
_, err = d.Write(contents)
|
_, err = d.Write(contents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -976,9 +957,9 @@ func (ta *testAPI) removeExistingFile(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "remove-upload")
|
dat.testUploadDir = filepath.Join(dat.testDir, "remove-upload")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "remove-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "remove-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
dat.files["five.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["five.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(2, 10)}
|
||||||
dat.files["six.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["six.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(3, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1036,9 +1017,9 @@ func (ta *testAPI) removeExistingFileInsideDir(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "remove-upload")
|
dat.testUploadDir = filepath.Join(dat.testDir, "remove-upload")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "remove-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "remove-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
dat.files["one/five.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["one/five.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(2, 10)}
|
||||||
dat.files["one/six.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["one/six.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(3, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1104,9 +1085,9 @@ func (ta *testAPI) removeNewlyAddedFile(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "removenew-upload")
|
dat.testUploadDir = filepath.Join(dat.testDir, "removenew-upload")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "removenew-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "removenew-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
dat.files["five.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["five.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(2, 10)}
|
||||||
dat.files["six.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["six.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(3, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1127,11 +1108,7 @@ func (ta *testAPI) removeNewlyAddedFile(t *testing.T, toEncrypt bool) {
|
|||||||
}
|
}
|
||||||
defer d.Close()
|
defer d.Close()
|
||||||
log.Debug("file opened")
|
log.Debug("file opened")
|
||||||
contents := make([]byte, 11)
|
contents := testutil.RandomBytes(1, 11)
|
||||||
_, err = rand.Read(contents)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error writing random bytes to byte array: %v", err)
|
|
||||||
}
|
|
||||||
log.Debug("content read")
|
log.Debug("content read")
|
||||||
_, err = d.Write(contents)
|
_, err = d.Write(contents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1201,9 +1178,9 @@ func (ta *testAPI) addNewFileAndModifyContents(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "modifyfile-upload")
|
dat.testUploadDir = filepath.Join(dat.testDir, "modifyfile-upload")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "modifyfile-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "modifyfile-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
dat.files["five.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["five.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(2, 10)}
|
||||||
dat.files["six.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["six.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(3, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1357,9 +1334,9 @@ func (ta *testAPI) removeEmptyDir(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "rmdir-upload")
|
dat.testUploadDir = filepath.Join(dat.testDir, "rmdir-upload")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "rmdir-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "rmdir-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
dat.files["five.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["five.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(2, 10)}
|
||||||
dat.files["six.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["six.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(3, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1406,9 +1383,9 @@ func (ta *testAPI) removeDirWhichHasFiles(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "rmdir-upload")
|
dat.testUploadDir = filepath.Join(dat.testDir, "rmdir-upload")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "rmdir-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "rmdir-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["one/1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["one/1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
dat.files["two/five.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["two/five.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(2, 10)}
|
||||||
dat.files["two/six.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["two/six.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(3, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1480,12 +1457,12 @@ func (ta *testAPI) removeDirWhichHasSubDirs(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testUploadDir = filepath.Join(dat.testDir, "rmsubdir-upload")
|
dat.testUploadDir = filepath.Join(dat.testDir, "rmsubdir-upload")
|
||||||
dat.testMountDir = filepath.Join(dat.testDir, "rmsubdir-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "rmsubdir-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
dat.files["one/1.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["one/1.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(1, 10)}
|
||||||
dat.files["two/three/2.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["two/three/2.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(2, 10)}
|
||||||
dat.files["two/three/3.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["two/three/3.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(3, 10)}
|
||||||
dat.files["two/four/5.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["two/four/5.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(4, 10)}
|
||||||
dat.files["two/four/6.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["two/four/6.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(5, 10)}
|
||||||
dat.files["two/four/six/7.txt"] = fileInfo{0700, 333, 444, getRandomBytes(10)}
|
dat.files["two/four/six/7.txt"] = fileInfo{0700, 333, 444, testutil.RandomBytes(6, 10)}
|
||||||
|
|
||||||
dat, err = ta.uploadAndMount(dat, t)
|
dat, err = ta.uploadAndMount(dat, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1567,11 +1544,7 @@ func (ta *testAPI) appendFileContentsToEnd(t *testing.T, toEncrypt bool) {
|
|||||||
dat.testMountDir = filepath.Join(dat.testDir, "appendlargefile-mount")
|
dat.testMountDir = filepath.Join(dat.testDir, "appendlargefile-mount")
|
||||||
dat.files = make(map[string]fileInfo)
|
dat.files = make(map[string]fileInfo)
|
||||||
|
|
||||||
line1 := make([]byte, 10)
|
line1 := testutil.RandomBytes(1, 10)
|
||||||
_, err = rand.Read(line1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error writing random bytes to byte array: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dat.files["1.txt"] = fileInfo{0700, 333, 444, line1}
|
dat.files["1.txt"] = fileInfo{0700, 333, 444, line1}
|
||||||
|
|
||||||
@ -1588,11 +1561,7 @@ func (ta *testAPI) appendFileContentsToEnd(t *testing.T, toEncrypt bool) {
|
|||||||
}
|
}
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
log.Debug("file opened")
|
log.Debug("file opened")
|
||||||
line2 := make([]byte, 5)
|
line2 := testutil.RandomBytes(1, 5)
|
||||||
_, err = rand.Read(line2)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error writing random bytes to byte array: %v", err)
|
|
||||||
}
|
|
||||||
log.Debug("line read")
|
log.Debug("line read")
|
||||||
_, err = fd.Seek(int64(len(line1)), 0)
|
_, err = fd.Seek(int64(len(line1)), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -18,7 +18,6 @@ package stream
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
crand "crypto/rand"
|
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -40,6 +39,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
colorable "github.com/mattn/go-colorable"
|
colorable "github.com/mattn/go-colorable"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -230,12 +230,7 @@ func generateRandomFile() (string, error) {
|
|||||||
//generate a random file size between minFileSize and maxFileSize
|
//generate a random file size between minFileSize and maxFileSize
|
||||||
fileSize := rand.Intn(maxFileSize-minFileSize) + minFileSize
|
fileSize := rand.Intn(maxFileSize-minFileSize) + minFileSize
|
||||||
log.Debug(fmt.Sprintf("Generated file with filesize %d kB", fileSize))
|
log.Debug(fmt.Sprintf("Generated file with filesize %d kB", fileSize))
|
||||||
b := make([]byte, fileSize*1024)
|
b := testutil.RandomBytes(1, fileSize*1024)
|
||||||
_, err := crand.Read(b)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Error generating random file.", "err", err)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return string(b), nil
|
return string(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,9 +19,7 @@ package stream
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
crand "crypto/rand"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -39,6 +37,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
//Tests initializing a retrieve request
|
//Tests initializing a retrieve request
|
||||||
@ -530,7 +529,7 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
|
|||||||
//now we can actually upload a (random) file to the round-robin store
|
//now we can actually upload a (random) file to the round-robin store
|
||||||
size := chunkCount * chunkSize
|
size := chunkCount * chunkSize
|
||||||
log.Debug("Storing data to file store")
|
log.Debug("Storing data to file store")
|
||||||
fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
|
fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
|
||||||
// wait until all chunks stored
|
// wait until all chunks stored
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -719,7 +718,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
|
|||||||
for i := 0; i < chunkCount; i++ {
|
for i := 0; i < chunkCount; i++ {
|
||||||
// create actual size real chunks
|
// create actual size real chunks
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
|
hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("expected no error. got %v", err)
|
b.Fatalf("expected no error. got %v", err)
|
||||||
}
|
}
|
||||||
|
@ -18,10 +18,8 @@ package stream
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
crand "crypto/rand"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -36,6 +34,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIntervalsLive(t *testing.T) {
|
func TestIntervalsLive(t *testing.T) {
|
||||||
@ -130,7 +129,8 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|||||||
fileStore := item.(*storage.FileStore)
|
fileStore := item.(*storage.FileStore)
|
||||||
|
|
||||||
size := chunkCount * chunkSize
|
size := chunkCount * chunkSize
|
||||||
_, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
|
|
||||||
|
_, wait, err := fileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Store error: %v", "err", err)
|
log.Error("Store error: %v", "err", err)
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -17,9 +17,7 @@ package stream
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
crand "crypto/rand"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
@ -39,6 +37,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const MaxTimeout = 600
|
const MaxTimeout = 600
|
||||||
@ -603,7 +602,7 @@ func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.Lo
|
|||||||
size := chunkSize
|
size := chunkSize
|
||||||
var rootAddrs []storage.Address
|
var rootAddrs []storage.Address
|
||||||
for i := 0; i < chunkCount; i++ {
|
for i := 0; i < chunkCount; i++ {
|
||||||
rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false)
|
rk, wait, err := fileStore.Store(context.TODO(), testutil.RandomReader(i, size), int64(size), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -18,9 +18,7 @@ package stream
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
crand "crypto/rand"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
@ -39,6 +37,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const dataChunkCount = 200
|
const dataChunkCount = 200
|
||||||
@ -183,7 +182,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
|
|||||||
}
|
}
|
||||||
fileStore := item.(*storage.FileStore)
|
fileStore := item.(*storage.FileStore)
|
||||||
size := chunkCount * chunkSize
|
size := chunkCount * chunkSize
|
||||||
_, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
|
_, wait, err := fileStore.Store(ctx, testutil.RandomReader(j, size), int64(size), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
@ -335,7 +335,7 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa
|
|||||||
|
|
||||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
||||||
nodeIDs := sim.UpNodeIDs()
|
nodeIDs := sim.UpNodeIDs()
|
||||||
shuffle(len(nodeIDs), func(i, j int) {
|
rand.Shuffle(len(nodeIDs), func(i, j int) {
|
||||||
nodeIDs[i], nodeIDs[j] = nodeIDs[j], nodeIDs[i]
|
nodeIDs[i], nodeIDs[j] = nodeIDs[j], nodeIDs[i]
|
||||||
})
|
})
|
||||||
for _, id := range nodeIDs {
|
for _, id := range nodeIDs {
|
||||||
@ -404,7 +404,7 @@ func retrieve(
|
|||||||
nodeStatusM *sync.Map,
|
nodeStatusM *sync.Map,
|
||||||
totalFoundCount *uint64,
|
totalFoundCount *uint64,
|
||||||
) (missing uint64) {
|
) (missing uint64) {
|
||||||
shuffle(len(files), func(i, j int) {
|
rand.Shuffle(len(files), func(i, j int) {
|
||||||
files[i], files[j] = files[j], files[i]
|
files[i], files[j] = files[j], files[i]
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -499,32 +499,3 @@ func retrieve(
|
|||||||
|
|
||||||
return uint64(totalCheckCount) - atomic.LoadUint64(totalFoundCount)
|
return uint64(totalCheckCount) - atomic.LoadUint64(totalFoundCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backported from stdlib https://golang.org/src/math/rand/rand.go?s=11175:11215#L333
|
|
||||||
//
|
|
||||||
// Replace with rand.Shuffle from go 1.10 when go 1.9 support is dropped.
|
|
||||||
//
|
|
||||||
// shuffle pseudo-randomizes the order of elements.
|
|
||||||
// n is the number of elements. Shuffle panics if n < 0.
|
|
||||||
// swap swaps the elements with indexes i and j.
|
|
||||||
func shuffle(n int, swap func(i, j int)) {
|
|
||||||
if n < 0 {
|
|
||||||
panic("invalid argument to Shuffle")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
|
|
||||||
// Shuffle really ought not be called with n that doesn't fit in 32 bits.
|
|
||||||
// Not only will it take a very long time, but with 2³¹! possible permutations,
|
|
||||||
// there's no way that any PRNG can have a big enough internal state to
|
|
||||||
// generate even a minuscule percentage of the possible permutations.
|
|
||||||
// Nevertheless, the right API signature accepts an int n, so handle it as best we can.
|
|
||||||
i := n - 1
|
|
||||||
for ; i > 1<<31-1-1; i-- {
|
|
||||||
j := int(rand.Int63n(int64(i + 1)))
|
|
||||||
swap(i, j)
|
|
||||||
}
|
|
||||||
for ; i > 0; i-- {
|
|
||||||
j := int(rand.Int31n(int32(i + 1)))
|
|
||||||
swap(i, j)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -19,13 +19,13 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -47,7 +47,7 @@ func newTestHasherStore(store ChunkStore, hash string) *hasherStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testRandomBrokenData(n int, tester *chunkerTester) {
|
func testRandomBrokenData(n int, tester *chunkerTester) {
|
||||||
data := io.LimitReader(rand.Reader, int64(n))
|
data := testutil.RandomReader(1, n)
|
||||||
brokendata := brokenLimitReader(data, n, n/2)
|
brokendata := brokenLimitReader(data, n, n/2)
|
||||||
|
|
||||||
buf := make([]byte, n)
|
buf := make([]byte, n)
|
||||||
@ -56,7 +56,7 @@ func testRandomBrokenData(n int, tester *chunkerTester) {
|
|||||||
tester.t.Fatalf("Broken reader is not broken, hence broken. Returns: %v", err)
|
tester.t.Fatalf("Broken reader is not broken, hence broken. Returns: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data = io.LimitReader(rand.Reader, int64(n))
|
data = testutil.RandomReader(2, n)
|
||||||
brokendata = brokenLimitReader(data, n, n/2)
|
brokendata = brokenLimitReader(data, n, n/2)
|
||||||
|
|
||||||
putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
|
putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
|
||||||
@ -77,7 +77,8 @@ func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester)
|
|||||||
input, found := tester.inputs[uint64(n)]
|
input, found := tester.inputs[uint64(n)]
|
||||||
var data io.Reader
|
var data io.Reader
|
||||||
if !found {
|
if !found {
|
||||||
data, input = GenerateRandomData(n)
|
input = testutil.RandomBytes(1, n)
|
||||||
|
data = bytes.NewReader(input)
|
||||||
tester.inputs[uint64(n)] = input
|
tester.inputs[uint64(n)] = input
|
||||||
} else {
|
} else {
|
||||||
data = io.LimitReader(bytes.NewReader(input), int64(n))
|
data = io.LimitReader(bytes.NewReader(input), int64(n))
|
||||||
@ -118,14 +119,13 @@ func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester)
|
|||||||
// testing partial read
|
// testing partial read
|
||||||
for i := 1; i < n; i += 10000 {
|
for i := 1; i < n; i += 10000 {
|
||||||
readableLength := n - i
|
readableLength := n - i
|
||||||
output := make([]byte, readableLength)
|
|
||||||
r, err := reader.ReadAt(output, int64(i))
|
r, err := reader.ReadAt(output, int64(i))
|
||||||
if r != readableLength || err != io.EOF {
|
if r != readableLength || err != io.EOF {
|
||||||
tester.t.Fatalf("readAt error with offset %v read: %v n = %v err = %v\n", i, r, readableLength, err)
|
tester.t.Fatalf("readAt error with offset %v read: %v n = %v err = %v\n", i, r, readableLength, err)
|
||||||
}
|
}
|
||||||
if input != nil {
|
if input != nil {
|
||||||
if !bytes.Equal(output, input[i:]) {
|
if !bytes.Equal(output[:readableLength], input[i:]) {
|
||||||
tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input[i:], output)
|
tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input[i:], output[:readableLength])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -177,7 +177,8 @@ func TestDataAppend(t *testing.T) {
|
|||||||
input, found := tester.inputs[uint64(n)]
|
input, found := tester.inputs[uint64(n)]
|
||||||
var data io.Reader
|
var data io.Reader
|
||||||
if !found {
|
if !found {
|
||||||
data, input = GenerateRandomData(n)
|
input = testutil.RandomBytes(i, n)
|
||||||
|
data = bytes.NewReader(input)
|
||||||
tester.inputs[uint64(n)] = input
|
tester.inputs[uint64(n)] = input
|
||||||
} else {
|
} else {
|
||||||
data = io.LimitReader(bytes.NewReader(input), int64(n))
|
data = io.LimitReader(bytes.NewReader(input), int64(n))
|
||||||
@ -199,7 +200,8 @@ func TestDataAppend(t *testing.T) {
|
|||||||
appendInput, found := tester.inputs[uint64(m)]
|
appendInput, found := tester.inputs[uint64(m)]
|
||||||
var appendData io.Reader
|
var appendData io.Reader
|
||||||
if !found {
|
if !found {
|
||||||
appendData, appendInput = GenerateRandomData(m)
|
appendInput = testutil.RandomBytes(i, m)
|
||||||
|
appendData = bytes.NewReader(appendInput)
|
||||||
tester.inputs[uint64(m)] = appendInput
|
tester.inputs[uint64(m)] = appendInput
|
||||||
} else {
|
} else {
|
||||||
appendData = io.LimitReader(bytes.NewReader(appendInput), int64(m))
|
appendData = io.LimitReader(bytes.NewReader(appendInput), int64(m))
|
||||||
@ -272,7 +274,7 @@ func benchReadAll(reader LazySectionReader) {
|
|||||||
func benchmarkSplitJoin(n int, t *testing.B) {
|
func benchmarkSplitJoin(n int, t *testing.B) {
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
data := testDataReader(n)
|
data := testutil.RandomReader(i, n)
|
||||||
|
|
||||||
putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
|
putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
@ -292,7 +294,7 @@ func benchmarkSplitJoin(n int, t *testing.B) {
|
|||||||
func benchmarkSplitTreeSHA3(n int, t *testing.B) {
|
func benchmarkSplitTreeSHA3(n int, t *testing.B) {
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
data := testDataReader(n)
|
data := testutil.RandomReader(i, n)
|
||||||
putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
|
putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@ -311,7 +313,7 @@ func benchmarkSplitTreeSHA3(n int, t *testing.B) {
|
|||||||
func benchmarkSplitTreeBMT(n int, t *testing.B) {
|
func benchmarkSplitTreeBMT(n int, t *testing.B) {
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
data := testDataReader(n)
|
data := testutil.RandomReader(i, n)
|
||||||
putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
|
putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@ -329,7 +331,7 @@ func benchmarkSplitTreeBMT(n int, t *testing.B) {
|
|||||||
func benchmarkSplitPyramidBMT(n int, t *testing.B) {
|
func benchmarkSplitPyramidBMT(n int, t *testing.B) {
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
data := testDataReader(n)
|
data := testutil.RandomReader(i, n)
|
||||||
putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
|
putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@ -347,7 +349,7 @@ func benchmarkSplitPyramidBMT(n int, t *testing.B) {
|
|||||||
func benchmarkSplitPyramidSHA3(n int, t *testing.B) {
|
func benchmarkSplitPyramidSHA3(n int, t *testing.B) {
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
data := testDataReader(n)
|
data := testutil.RandomReader(i, n)
|
||||||
putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
|
putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@ -365,8 +367,8 @@ func benchmarkSplitPyramidSHA3(n int, t *testing.B) {
|
|||||||
func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
|
func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
data := testDataReader(n)
|
data := testutil.RandomReader(i, n)
|
||||||
data1 := testDataReader(m)
|
data1 := testutil.RandomReader(t.N+i, m)
|
||||||
|
|
||||||
store := NewMapChunkStore()
|
store := NewMapChunkStore()
|
||||||
putGetter := newTestHasherStore(store, SHA3Hash)
|
putGetter := newTestHasherStore(store, SHA3Hash)
|
||||||
|
@ -19,7 +19,6 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -31,7 +30,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
colorable "github.com/mattn/go-colorable"
|
"github.com/mattn/go-colorable"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -151,10 +150,6 @@ func mget(store ChunkStore, hs []Address, f func(h Address, chunk Chunk) error)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func testDataReader(l int) (r io.Reader) {
|
|
||||||
return io.LimitReader(rand.Reader, int64(l))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *brokenLimitedReader) Read(buf []byte) (int, error) {
|
func (r *brokenLimitedReader) Read(buf []byte) (int, error) {
|
||||||
if r.off+len(buf) > r.errAt {
|
if r.off+len(buf) > r.errAt {
|
||||||
return 0, fmt.Errorf("Broken reader")
|
return 0, fmt.Errorf("Broken reader")
|
||||||
|
File diff suppressed because one or more lines are too long
@ -23,6 +23,8 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const testDataSize = 0x0001000
|
const testDataSize = 0x0001000
|
||||||
@ -49,9 +51,9 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) {
|
|||||||
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
||||||
defer os.RemoveAll("/tmp/bzz")
|
defer os.RemoveAll("/tmp/bzz")
|
||||||
|
|
||||||
reader, slice := GenerateRandomData(testDataSize)
|
slice := testutil.RandomBytes(1, testDataSize)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt)
|
key, wait, err := fileStore.Store(ctx, bytes.NewReader(slice), testDataSize, toEncrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Store error: %v", err)
|
t.Fatalf("Store error: %v", err)
|
||||||
}
|
}
|
||||||
@ -63,13 +65,13 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) {
|
|||||||
if isEncrypted != toEncrypt {
|
if isEncrypted != toEncrypt {
|
||||||
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
|
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
|
||||||
}
|
}
|
||||||
resultSlice := make([]byte, len(slice))
|
resultSlice := make([]byte, testDataSize)
|
||||||
n, err := resultReader.ReadAt(resultSlice, 0)
|
n, err := resultReader.ReadAt(resultSlice, 0)
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
t.Fatalf("Retrieve error: %v", err)
|
t.Fatalf("Retrieve error: %v", err)
|
||||||
}
|
}
|
||||||
if n != len(slice) {
|
if n != testDataSize {
|
||||||
t.Fatalf("Slice size error got %d, expected %d.", n, len(slice))
|
t.Fatalf("Slice size error got %d, expected %d.", n, testDataSize)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(slice, resultSlice) {
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
t.Fatalf("Comparison error.")
|
t.Fatalf("Comparison error.")
|
||||||
@ -114,9 +116,9 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
|||||||
DbStore: db,
|
DbStore: db,
|
||||||
}
|
}
|
||||||
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
||||||
reader, slice := GenerateRandomData(testDataSize)
|
slice := testutil.RandomBytes(1, testDataSize)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt)
|
key, wait, err := fileStore.Store(ctx, bytes.NewReader(slice), testDataSize, toEncrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Store error: %v", err)
|
t.Errorf("Store error: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||||
@ -251,16 +250,6 @@ func GenerateRandomChunks(dataSize int64, count int) (chunks []Chunk) {
|
|||||||
return chunks
|
return chunks
|
||||||
}
|
}
|
||||||
|
|
||||||
func GenerateRandomData(l int) (r io.Reader, slice []byte) {
|
|
||||||
slice, err := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(l)))
|
|
||||||
if err != nil {
|
|
||||||
panic("rand error")
|
|
||||||
}
|
|
||||||
// log.Warn("generate random data", "len", len(slice), "data", common.Bytes2Hex(slice))
|
|
||||||
r = io.LimitReader(bytes.NewReader(slice), int64(l))
|
|
||||||
return r, slice
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size, Seek, Read, ReadAt
|
// Size, Seek, Read, ReadAt
|
||||||
type LazySectionReader interface {
|
type LazySectionReader interface {
|
||||||
Context() context.Context
|
Context() context.Context
|
||||||
|
@ -17,8 +17,10 @@
|
|||||||
package testutil
|
package testutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -42,3 +44,22 @@ func TempFileWithContent(t *testing.T, content string) string {
|
|||||||
}
|
}
|
||||||
return tempFile.Name()
|
return tempFile.Name()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RandomBytes returns pseudo-random deterministic result
|
||||||
|
// because test fails must be reproducible
|
||||||
|
func RandomBytes(seed, length int) []byte {
|
||||||
|
b := make([]byte, length)
|
||||||
|
reader := rand.New(rand.NewSource(int64(seed)))
|
||||||
|
for n := 0; n < length; {
|
||||||
|
read, err := reader.Read(b[n:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
n += read
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func RandomReader(seed, length int) *bytes.Reader {
|
||||||
|
return bytes.NewReader(RandomBytes(seed, length))
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user