forked from cerc-io/plugeth
Merge pull request #19550 from ethersphere/swarm-rather-stable
swarm v0.4-rc1
This commit is contained in:
commit
494f5d448a
@ -252,15 +252,15 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
|
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
|
||||||
currentConfig.LocalStoreParams.ChunkDbPath = storePath
|
currentConfig.ChunkDbPath = storePath
|
||||||
}
|
}
|
||||||
|
|
||||||
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
|
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
|
||||||
currentConfig.LocalStoreParams.DbCapacity = storeCapacity
|
currentConfig.DbCapacity = storeCapacity
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(SwarmStoreCacheCapacity.Name) {
|
if ctx.GlobalIsSet(SwarmStoreCacheCapacity.Name) {
|
||||||
currentConfig.LocalStoreParams.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
|
currentConfig.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(SwarmBootnodeModeFlag.Name) {
|
if ctx.GlobalIsSet(SwarmBootnodeModeFlag.Name) {
|
||||||
|
@ -447,8 +447,8 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
|
|||||||
t.Fatal("Expected Sync to be disabled, but is true")
|
t.Fatal("Expected Sync to be disabled, but is true")
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.LocalStoreParams.DbCapacity != 9000000 {
|
if info.DbCapacity != 9000000 {
|
||||||
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
|
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.DbCapacity)
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
if info.HiveParams.KeepAliveInterval != 6000000000 {
|
||||||
|
118
cmd/swarm/db.go
118
cmd/swarm/db.go
@ -17,6 +17,10 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -25,10 +29,22 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var legacyKeyIndex = byte(0)
|
||||||
|
var keyData = byte(6)
|
||||||
|
|
||||||
|
type dpaDBIndex struct {
|
||||||
|
Idx uint64
|
||||||
|
Access uint64
|
||||||
|
}
|
||||||
|
|
||||||
var dbCommand = cli.Command{
|
var dbCommand = cli.Command{
|
||||||
Name: "db",
|
Name: "db",
|
||||||
CustomHelpTemplate: helpTemplate,
|
CustomHelpTemplate: helpTemplate,
|
||||||
@ -67,6 +83,9 @@ The import may be quite large, consider piping the input through the Unix
|
|||||||
pv(1) tool to get a progress bar:
|
pv(1) tool to get a progress bar:
|
||||||
|
|
||||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
|
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
SwarmLegacyFlag,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -77,12 +96,6 @@ func dbExport(ctx *cli.Context) {
|
|||||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
|
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
|
||||||
}
|
}
|
||||||
|
|
||||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("error opening local chunk database: %s", err)
|
|
||||||
}
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
var out io.Writer
|
var out io.Writer
|
||||||
if args[1] == "-" {
|
if args[1] == "-" {
|
||||||
out = os.Stdout
|
out = os.Stdout
|
||||||
@ -95,6 +108,23 @@ func dbExport(ctx *cli.Context) {
|
|||||||
out = f
|
out = f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isLegacy := localstore.IsLegacyDatabase(args[0])
|
||||||
|
if isLegacy {
|
||||||
|
count, err := exportLegacy(args[0], common.Hex2Bytes(args[2]), out)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("error exporting legacy local chunk database: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info(fmt.Sprintf("successfully exported %d chunks from legacy db", count))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("error opening local chunk database: %s", err)
|
||||||
|
}
|
||||||
|
defer store.Close()
|
||||||
|
|
||||||
count, err := store.Export(out)
|
count, err := store.Export(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("error exporting local chunk database: %s", err)
|
utils.Fatalf("error exporting local chunk database: %s", err)
|
||||||
@ -109,6 +139,8 @@ func dbImport(ctx *cli.Context) {
|
|||||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
|
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
legacy := ctx.IsSet(SwarmLegacyFlag.Name)
|
||||||
|
|
||||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("error opening local chunk database: %s", err)
|
utils.Fatalf("error opening local chunk database: %s", err)
|
||||||
@ -127,7 +159,7 @@ func dbImport(ctx *cli.Context) {
|
|||||||
in = f
|
in = f
|
||||||
}
|
}
|
||||||
|
|
||||||
count, err := store.Import(in)
|
count, err := store.Import(in, legacy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("error importing local chunk database: %s", err)
|
utils.Fatalf("error importing local chunk database: %s", err)
|
||||||
}
|
}
|
||||||
@ -135,13 +167,73 @@ func dbImport(ctx *cli.Context) {
|
|||||||
log.Info(fmt.Sprintf("successfully imported %d chunks", count))
|
log.Info(fmt.Sprintf("successfully imported %d chunks", count))
|
||||||
}
|
}
|
||||||
|
|
||||||
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
|
func openLDBStore(path string, basekey []byte) (*localstore.DB, error) {
|
||||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
storeparams := storage.NewDefaultStoreParams()
|
return localstore.New(path, basekey, nil)
|
||||||
ldbparams := storage.NewLDBStoreParams(storeparams, path)
|
}
|
||||||
ldbparams.BaseKey = basekey
|
|
||||||
return storage.NewLDBStore(ldbparams)
|
func decodeIndex(data []byte, index *dpaDBIndex) error {
|
||||||
|
dec := rlp.NewStream(bytes.NewReader(data), 0)
|
||||||
|
return dec.Decode(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDataKey(idx uint64, po uint8) []byte {
|
||||||
|
key := make([]byte, 10)
|
||||||
|
key[0] = keyData
|
||||||
|
key[1] = po
|
||||||
|
binary.BigEndian.PutUint64(key[2:], idx)
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func exportLegacy(path string, basekey []byte, out io.Writer) (int64, error) {
|
||||||
|
tw := tar.NewWriter(out)
|
||||||
|
defer tw.Close()
|
||||||
|
db, err := leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 128})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
it := db.NewIterator(nil, nil)
|
||||||
|
defer it.Release()
|
||||||
|
var count int64
|
||||||
|
for ok := it.Seek([]byte{legacyKeyIndex}); ok; ok = it.Next() {
|
||||||
|
key := it.Key()
|
||||||
|
if (key == nil) || (key[0] != legacyKeyIndex) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
var index dpaDBIndex
|
||||||
|
|
||||||
|
hash := key[1:]
|
||||||
|
decodeIndex(it.Value(), &index)
|
||||||
|
|
||||||
|
po := uint8(chunk.Proximity(basekey, hash))
|
||||||
|
|
||||||
|
datakey := getDataKey(index.Idx, po)
|
||||||
|
data, err := db.Get(datakey, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Crit(fmt.Sprintf("Chunk %x found but could not be accessed: %v, %x", key, err, datakey))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: hex.EncodeToString(hash),
|
||||||
|
Mode: 0644,
|
||||||
|
Size: int64(len(data)),
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write(data); err != nil {
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
@ -47,7 +48,7 @@ func hashes(ctx *cli.Context) {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
|
||||||
refs, err := fileStore.GetAllReferences(context.TODO(), f, false)
|
refs, err := fileStore.GetAllReferences(context.TODO(), f, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("%v\n", err)
|
utils.Fatalf("%v\n", err)
|
||||||
|
@ -17,19 +17,34 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/swarm/testdata"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm"
|
"github.com/ethereum/go-ethereum/swarm"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DATABASE_FIXTURE_BZZ_ACCOUNT = "0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
|
||||||
|
DATABASE_FIXTURE_PASSWORD = "pass"
|
||||||
|
FIXTURE_DATADIR_PREFIX = "swarm/bzz-0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
|
||||||
|
FixtureBaseKey = "a9f22b3d77b4bdf5f3eefce995d6c8e7cecf2636f20956f08a0d1ed95adb52ad"
|
||||||
|
)
|
||||||
|
|
||||||
// TestCLISwarmExportImport perform the following test:
|
// TestCLISwarmExportImport perform the following test:
|
||||||
// 1. runs swarm node
|
// 1. runs swarm node
|
||||||
// 2. uploads a random file
|
// 2. uploads a random file
|
||||||
@ -99,6 +114,112 @@ func TestCLISwarmExportImport(t *testing.T) {
|
|||||||
mustEqualFiles(t, bytes.NewReader(content), res.Body)
|
mustEqualFiles(t, bytes.NewReader(content), res.Body)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestExportLegacyToNew checks that an old database gets imported correctly into the new localstore structure
|
||||||
|
// The test sequence is as follows:
|
||||||
|
// 1. unpack database fixture to tmp dir
|
||||||
|
// 2. try to open with new swarm binary that should complain about old database
|
||||||
|
// 3. export from old database
|
||||||
|
// 4. remove the chunks folder
|
||||||
|
// 5. import the dump
|
||||||
|
// 6. file should be accessible
|
||||||
|
func TestExportLegacyToNew(t *testing.T) {
|
||||||
|
/*
|
||||||
|
fixture bzz account 0aa159029fa13ffa8fa1c6fff6ebceface99d6a4
|
||||||
|
*/
|
||||||
|
const UPLOADED_FILE_MD5_HASH = "a001fdae53ba50cae584b8b02b06f821"
|
||||||
|
const UPLOADED_HASH = "67a86082ee0ea1bc7dd8d955bb1e14d04f61d55ae6a4b37b3d0296a3a95e454a"
|
||||||
|
tmpdir, err := ioutil.TempDir("", "swarm-test")
|
||||||
|
log.Trace("running legacy datastore migration test", "temp dir", tmpdir)
|
||||||
|
defer os.RemoveAll(tmpdir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
inflateBase64Gzip(t, testdata.DATADIR_MIGRATION_FIXTURE, tmpdir)
|
||||||
|
|
||||||
|
tmpPassword := testutil.TempFileWithContent(t, DATABASE_FIXTURE_PASSWORD)
|
||||||
|
defer os.Remove(tmpPassword)
|
||||||
|
|
||||||
|
flags := []string{
|
||||||
|
"--datadir", tmpdir,
|
||||||
|
"--bzzaccount", DATABASE_FIXTURE_BZZ_ACCOUNT,
|
||||||
|
"--password", tmpPassword,
|
||||||
|
}
|
||||||
|
|
||||||
|
newSwarmOldDb := runSwarm(t, flags...)
|
||||||
|
_, matches := newSwarmOldDb.ExpectRegexp(".+")
|
||||||
|
newSwarmOldDb.ExpectExit()
|
||||||
|
|
||||||
|
if len(matches) == 0 {
|
||||||
|
t.Fatalf("stdout not matched")
|
||||||
|
}
|
||||||
|
|
||||||
|
if newSwarmOldDb.ExitStatus() == 0 {
|
||||||
|
t.Fatal("should error")
|
||||||
|
}
|
||||||
|
t.Log("exporting legacy database")
|
||||||
|
actualDataDir := path.Join(tmpdir, FIXTURE_DATADIR_PREFIX)
|
||||||
|
exportCmd := runSwarm(t, "--verbosity", "5", "db", "export", actualDataDir+"/chunks", tmpdir+"/export.tar", FixtureBaseKey)
|
||||||
|
exportCmd.ExpectExit()
|
||||||
|
|
||||||
|
stat, err := os.Stat(tmpdir + "/export.tar")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make some silly size assumption
|
||||||
|
if stat.Size() < 90000 {
|
||||||
|
t.Fatal("export size too small")
|
||||||
|
}
|
||||||
|
t.Log("removing chunk datadir")
|
||||||
|
err = os.RemoveAll(path.Join(actualDataDir, "chunks"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// start second cluster
|
||||||
|
cluster2 := newTestCluster(t, 1)
|
||||||
|
var info2 swarm.Info
|
||||||
|
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stop second cluster, so that we close LevelDB
|
||||||
|
cluster2.Stop()
|
||||||
|
defer cluster2.Cleanup()
|
||||||
|
|
||||||
|
// import the export.tar
|
||||||
|
importCmd := runSwarm(t, "db", "import", "--legacy", info2.Path+"/chunks", tmpdir+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
|
||||||
|
importCmd.ExpectExit()
|
||||||
|
|
||||||
|
// spin second cluster back up
|
||||||
|
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
|
||||||
|
t.Log("trying to http get the file")
|
||||||
|
// try to fetch imported file
|
||||||
|
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + UPLOADED_HASH)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
|
||||||
|
}
|
||||||
|
h := md5.New()
|
||||||
|
if _, err := io.Copy(h, res.Body); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sum := h.Sum(nil)
|
||||||
|
|
||||||
|
b, err := hex.DecodeString(UPLOADED_FILE_MD5_HASH)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(sum, b) {
|
||||||
|
t.Fatal("should be equal")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
||||||
h := md5.New()
|
h := md5.New()
|
||||||
upLen, err := io.Copy(h, up)
|
upLen, err := io.Copy(h, up)
|
||||||
@ -117,3 +238,46 @@ func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
|
|||||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func inflateBase64Gzip(t *testing.T, base64File, directory string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
f := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64File))
|
||||||
|
gzf, err := gzip.NewReader(f)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tarReader := tar.NewReader(gzf)
|
||||||
|
|
||||||
|
for {
|
||||||
|
header, err := tarReader.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := header.Name
|
||||||
|
|
||||||
|
switch header.Typeflag {
|
||||||
|
case tar.TypeDir:
|
||||||
|
err := os.Mkdir(path.Join(directory, name), os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
case tar.TypeReg:
|
||||||
|
file, err := os.Create(path.Join(directory, name))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := io.Copy(file, tarReader); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Fatal("shouldn't happen")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -182,4 +182,8 @@ var (
|
|||||||
Usage: "URL of the Global Store API provider (only for testing)",
|
Usage: "URL of the Global Store API provider (only for testing)",
|
||||||
EnvVar: SwarmGlobalstoreAPI,
|
EnvVar: SwarmGlobalstoreAPI,
|
||||||
}
|
}
|
||||||
|
SwarmLegacyFlag = cli.BoolFlag{
|
||||||
|
Name: "legacy",
|
||||||
|
Usage: "Use this flag when importing a db export from a legacy local store database dump (for schemas older than 'sanctuary')",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/contracts/ens"
|
"github.com/ethereum/go-ethereum/contracts/ens"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
@ -77,7 +78,7 @@ func hash(ctx *cli.Context) {
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
stat, _ := f.Stat()
|
stat, _ := f.Stat()
|
||||||
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
|
||||||
addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false)
|
addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("%v\n", err)
|
utils.Fatalf("%v\n", err)
|
||||||
|
@ -26,11 +26,11 @@ const (
|
|||||||
feedRandomDataLength = 8
|
feedRandomDataLength = 8
|
||||||
)
|
)
|
||||||
|
|
||||||
func feedUploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
func feedUploadAndSyncCmd(ctx *cli.Context) error {
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
errc <- feedUploadAndSync(ctx, tuid)
|
errc <- feedUploadAndSync(ctx)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -46,7 +46,7 @@ func feedUploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func feedUploadAndSync(c *cli.Context, tuid string) error {
|
func feedUploadAndSync(c *cli.Context) error {
|
||||||
log.Info("generating and uploading feeds to " + httpEndpoint(hosts[0]) + " and syncing")
|
log.Info("generating and uploading feeds to " + httpEndpoint(hosts[0]) + " and syncing")
|
||||||
|
|
||||||
// create a random private key to sign updates with and derive the address
|
// create a random private key to sign updates with and derive the address
|
||||||
@ -272,7 +272,7 @@ func feedUploadAndSync(c *cli.Context, tuid string) error {
|
|||||||
ruid := uuid.New()[:8]
|
ruid := uuid.New()[:8]
|
||||||
go func(url string, endpoint string, ruid string) {
|
go func(url string, endpoint string, ruid string) {
|
||||||
for {
|
for {
|
||||||
err := fetch(url, endpoint, fileHash, ruid, "")
|
err := fetch(url, endpoint, fileHash, ruid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -37,17 +37,17 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
allhosts string
|
allhosts string
|
||||||
hosts []string
|
hosts []string
|
||||||
filesize int
|
filesize int
|
||||||
inputSeed int
|
syncDelay bool
|
||||||
syncDelay int
|
inputSeed int
|
||||||
httpPort int
|
httpPort int
|
||||||
wsPort int
|
wsPort int
|
||||||
verbosity int
|
verbosity int
|
||||||
timeout int
|
timeout int
|
||||||
single bool
|
single bool
|
||||||
trackTimeout int
|
onlyUpload bool
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@ -87,10 +87,9 @@ func main() {
|
|||||||
Usage: "file size for generated random file in KB",
|
Usage: "file size for generated random file in KB",
|
||||||
Destination: &filesize,
|
Destination: &filesize,
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.BoolFlag{
|
||||||
Name: "sync-delay",
|
Name: "sync-delay",
|
||||||
Value: 5,
|
Usage: "wait for content to be synced",
|
||||||
Usage: "duration of delay in seconds to wait for content to be synced",
|
|
||||||
Destination: &syncDelay,
|
Destination: &syncDelay,
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
@ -101,7 +100,7 @@ func main() {
|
|||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
Name: "timeout",
|
Name: "timeout",
|
||||||
Value: 120,
|
Value: 180,
|
||||||
Usage: "timeout in seconds after which kill the process",
|
Usage: "timeout in seconds after which kill the process",
|
||||||
Destination: &timeout,
|
Destination: &timeout,
|
||||||
},
|
},
|
||||||
@ -110,11 +109,10 @@ func main() {
|
|||||||
Usage: "whether to fetch content from a single node or from all nodes",
|
Usage: "whether to fetch content from a single node or from all nodes",
|
||||||
Destination: &single,
|
Destination: &single,
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.BoolFlag{
|
||||||
Name: "track-timeout",
|
Name: "only-upload",
|
||||||
Value: 5,
|
Usage: "whether to only upload content to a single node without fetching",
|
||||||
Usage: "timeout in seconds to wait for GetAllReferences to return",
|
Destination: &onlyUpload,
|
||||||
Destination: &trackTimeout,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,11 +35,11 @@ type uploadResult struct {
|
|||||||
digest []byte
|
digest []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func slidingWindowCmd(ctx *cli.Context, tuid string) error {
|
func slidingWindowCmd(ctx *cli.Context) error {
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
errc <- slidingWindow(ctx, tuid)
|
errc <- slidingWindow(ctx)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err := <-errc
|
err := <-errc
|
||||||
@ -49,10 +49,10 @@ func slidingWindowCmd(ctx *cli.Context, tuid string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func slidingWindow(ctx *cli.Context, tuid string) error {
|
func slidingWindow(ctx *cli.Context) error {
|
||||||
var hashes []uploadResult //swarm hashes of the uploads
|
var hashes []uploadResult //swarm hashes of the uploads
|
||||||
nodes := len(hosts)
|
nodes := len(hosts)
|
||||||
log.Info("sliding window test started", "tuid", tuid, "nodes", nodes, "filesize(kb)", filesize, "timeout", timeout)
|
log.Info("sliding window test started", "nodes", nodes, "filesize(kb)", filesize, "timeout", timeout)
|
||||||
uploadedBytes := 0
|
uploadedBytes := 0
|
||||||
networkDepth := 0
|
networkDepth := 0
|
||||||
errored := false
|
errored := false
|
||||||
@ -81,9 +81,13 @@ outer:
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash), "sleeping", syncDelay)
|
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash), "wait for sync", syncDelay)
|
||||||
hashes = append(hashes, uploadResult{hash: hash, digest: fhash})
|
hashes = append(hashes, uploadResult{hash: hash, digest: fhash})
|
||||||
time.Sleep(time.Duration(syncDelay) * time.Second)
|
|
||||||
|
if syncDelay {
|
||||||
|
waitToSync()
|
||||||
|
}
|
||||||
|
|
||||||
uploadedBytes += filesize * 1000
|
uploadedBytes += filesize * 1000
|
||||||
q := make(chan struct{}, 1)
|
q := make(chan struct{}, 1)
|
||||||
d := make(chan struct{})
|
d := make(chan struct{})
|
||||||
@ -107,7 +111,7 @@ outer:
|
|||||||
start = time.Now()
|
start = time.Now()
|
||||||
// fetch hangs when swarm dies out, so we have to jump through a bit more hoops to actually
|
// fetch hangs when swarm dies out, so we have to jump through a bit more hoops to actually
|
||||||
// catch the timeout, but also allow this retry logic
|
// catch the timeout, but also allow this retry logic
|
||||||
err := fetch(v.hash, httpEndpoint(hosts[idx]), v.digest, ruid, "")
|
err := fetch(v.hash, httpEndpoint(hosts[idx]), v.digest, ruid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("error fetching hash", "err", err)
|
log.Error("error fetching hash", "err", err)
|
||||||
continue
|
continue
|
||||||
|
@ -19,26 +19,27 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
"github.com/pborman/uuid"
|
|
||||||
|
|
||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
func uploadAndSyncCmd(ctx *cli.Context) error {
|
||||||
// use input seed if it has been set
|
// use input seed if it has been set
|
||||||
if inputSeed != 0 {
|
if inputSeed != 0 {
|
||||||
seed = inputSeed
|
seed = inputSeed
|
||||||
@ -49,7 +50,7 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
|||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
errc <- uploadAndSync(ctx, randomBytes, tuid)
|
errc <- uploadAndSync(ctx, randomBytes)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
@ -65,7 +66,7 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// trigger debug functionality on randomBytes
|
// trigger debug functionality on randomBytes
|
||||||
e := trackChunks(randomBytes[:])
|
e := trackChunks(randomBytes[:], true)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.Error(e.Error())
|
log.Error(e.Error())
|
||||||
}
|
}
|
||||||
@ -73,51 +74,180 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func trackChunks(testData []byte) error {
|
func trackChunks(testData []byte, submitMetrics bool) error {
|
||||||
addrs, err := getAllRefs(testData)
|
addrs, err := getAllRefs(testData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, ref := range addrs {
|
for i, ref := range addrs {
|
||||||
log.Trace(fmt.Sprintf("ref %d", i), "ref", ref)
|
log.Debug(fmt.Sprintf("ref %d", i), "ref", ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var globalYes, globalNo int
|
||||||
|
var globalMu sync.Mutex
|
||||||
|
var hasErr bool
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(hosts))
|
||||||
|
|
||||||
|
var mu sync.Mutex // mutex protecting the allHostsChunks and bzzAddrs maps
|
||||||
|
allHostChunks := map[string]string{} // host->bitvector of presence for chunks
|
||||||
|
bzzAddrs := map[string]string{} // host->bzzAddr
|
||||||
|
|
||||||
for _, host := range hosts {
|
for _, host := range hosts {
|
||||||
httpHost := fmt.Sprintf("ws://%s:%d", host, 8546)
|
host := host
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
httpHost := fmt.Sprintf("ws://%s:%d", host, 8546)
|
||||||
|
|
||||||
hostChunks := []string{}
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
rpcClient, err := rpc.Dial(httpHost)
|
rpcClient, err := rpc.DialContext(ctx, httpHost)
|
||||||
if err != nil {
|
if rpcClient != nil {
|
||||||
log.Error("error dialing host", "err", err, "host", httpHost)
|
defer rpcClient.Close()
|
||||||
continue
|
}
|
||||||
}
|
if err != nil {
|
||||||
|
log.Error("error dialing host", "err", err, "host", httpHost)
|
||||||
|
hasErr = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
var hasInfo []api.HasInfo
|
hostChunks, err := getChunksBitVectorFromHost(rpcClient, addrs)
|
||||||
err = rpcClient.Call(&hasInfo, "bzz_has", addrs)
|
if err != nil {
|
||||||
if err != nil {
|
log.Error("error getting chunks bit vector from host", "err", err, "host", httpHost)
|
||||||
log.Error("error calling rpc client", "err", err, "host", httpHost)
|
hasErr = true
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
count := 0
|
bzzAddr, err := getBzzAddrFromHost(rpcClient)
|
||||||
for _, info := range hasInfo {
|
if err != nil {
|
||||||
if info.Has {
|
log.Error("error getting bzz addrs from host", "err", err, "host", httpHost)
|
||||||
hostChunks = append(hostChunks, "1")
|
hasErr = true
|
||||||
} else {
|
return
|
||||||
hostChunks = append(hostChunks, "0")
|
}
|
||||||
count++
|
|
||||||
|
mu.Lock()
|
||||||
|
allHostChunks[host] = hostChunks
|
||||||
|
bzzAddrs[host] = bzzAddr
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
|
yes, no := 0, 0
|
||||||
|
for _, val := range hostChunks {
|
||||||
|
if val == '1' {
|
||||||
|
yes++
|
||||||
|
} else {
|
||||||
|
no++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if no == 0 {
|
||||||
|
log.Info("host reported to have all chunks", "host", host)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("chunks", "chunks", hostChunks, "yes", yes, "no", no, "host", host)
|
||||||
|
|
||||||
|
if submitMetrics {
|
||||||
|
globalMu.Lock()
|
||||||
|
globalYes += yes
|
||||||
|
globalNo += no
|
||||||
|
globalMu.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
checkChunksVsMostProxHosts(addrs, allHostChunks, bzzAddrs)
|
||||||
|
|
||||||
|
if !hasErr && submitMetrics {
|
||||||
|
// remove the chunks stored on the uploader node
|
||||||
|
globalYes -= len(addrs)
|
||||||
|
|
||||||
|
metrics.GetOrRegisterCounter("deployment.chunks.yes", nil).Inc(int64(globalYes))
|
||||||
|
metrics.GetOrRegisterCounter("deployment.chunks.no", nil).Inc(int64(globalNo))
|
||||||
|
metrics.GetOrRegisterCounter("deployment.chunks.refs", nil).Inc(int64(len(addrs)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getChunksBitVectorFromHost returns a bit vector of presence for a given slice of chunks from a given host
|
||||||
|
func getChunksBitVectorFromHost(client *rpc.Client, addrs []storage.Address) (string, error) {
|
||||||
|
var hostChunks string
|
||||||
|
|
||||||
|
err := client.Call(&hostChunks, "bzz_has", addrs)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hostChunks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBzzAddrFromHost returns the bzzAddr for a given host
|
||||||
|
func getBzzAddrFromHost(client *rpc.Client) (string, error) {
|
||||||
|
var hive string
|
||||||
|
|
||||||
|
err := client.Call(&hive, "bzz_hive")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// we make an ugly assumption about the output format of the hive.String() method
|
||||||
|
// ideally we should replace this with an API call that returns the bzz addr for a given host,
|
||||||
|
// but this also works for now (provided we don't change the hive.String() method, which we haven't in some time
|
||||||
|
ss := strings.Split(strings.Split(hive, "\n")[3], " ")
|
||||||
|
return ss[len(ss)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkChunksVsMostProxHosts is checking:
|
||||||
|
// 1. whether a chunk has been found at less than 2 hosts. Considering our NN size, this should not happen.
|
||||||
|
// 2. if a chunk is not found at its closest node. This should also not happen.
|
||||||
|
// Together with the --only-upload flag, we could run this smoke test and make sure that our syncing
|
||||||
|
// functionality is correct (without even trying to retrieve the content).
|
||||||
|
//
|
||||||
|
// addrs - a slice with all uploaded chunk refs
|
||||||
|
// allHostChunks - host->bit vector, showing what chunks are present on what hosts
|
||||||
|
// bzzAddrs - host->bzz address, used when determining the most proximate host for a given chunk
|
||||||
|
func checkChunksVsMostProxHosts(addrs []storage.Address, allHostChunks map[string]string, bzzAddrs map[string]string) {
|
||||||
|
for k, v := range bzzAddrs {
|
||||||
|
log.Trace("bzzAddr", "bzz", v, "host", k)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range addrs {
|
||||||
|
var foundAt int
|
||||||
|
maxProx := -1
|
||||||
|
var maxProxHost string
|
||||||
|
for host := range allHostChunks {
|
||||||
|
if allHostChunks[host][i] == '1' {
|
||||||
|
foundAt++
|
||||||
|
}
|
||||||
|
|
||||||
|
ba, err := hex.DecodeString(bzzAddrs[host])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate the host closest to any chunk
|
||||||
|
prox := chunk.Proximity(addrs[i], ba)
|
||||||
|
if prox > maxProx {
|
||||||
|
maxProx = prox
|
||||||
|
maxProxHost = host
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if count == 0 {
|
if allHostChunks[maxProxHost][i] == '0' {
|
||||||
log.Info("host reported to have all chunks", "host", host)
|
log.Error("chunk not found at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
|
||||||
|
} else {
|
||||||
|
log.Trace("chunk present at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("chunks", "chunks", strings.Join(hostChunks, ""), "host", host)
|
// if chunk found at less than 2 hosts
|
||||||
|
if foundAt < 2 {
|
||||||
|
log.Error("chunk found at less than two hosts", "foundAt", foundAt, "ref", addrs[i])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAllRefs(testData []byte) (storage.AddressCollection, error) {
|
func getAllRefs(testData []byte) (storage.AddressCollection, error) {
|
||||||
@ -126,19 +256,17 @@ func getAllRefs(testData []byte) (storage.AddressCollection, error) {
|
|||||||
return nil, fmt.Errorf("unable to create temp dir: %v", err)
|
return nil, fmt.Errorf("unable to create temp dir: %v", err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(datadir)
|
defer os.RemoveAll(datadir)
|
||||||
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
|
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32), chunk.NewTags())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(trackTimeout)*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
reader := bytes.NewReader(testData)
|
reader := bytes.NewReader(testData)
|
||||||
return fileStore.GetAllReferences(ctx, reader, false)
|
return fileStore.GetAllReferences(context.Background(), reader, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func uploadAndSync(c *cli.Context, randomBytes []byte, tuid string) error {
|
func uploadAndSync(c *cli.Context, randomBytes []byte) error {
|
||||||
log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "tuid", tuid, "seed", seed)
|
log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "seed", seed)
|
||||||
|
|
||||||
t1 := time.Now()
|
t1 := time.Now()
|
||||||
hash, err := upload(randomBytes, httpEndpoint(hosts[0]))
|
hash, err := upload(randomBytes, httpEndpoint(hosts[0]))
|
||||||
@ -155,53 +283,94 @@ func uploadAndSync(c *cli.Context, randomBytes []byte, tuid string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("uploaded successfully", "tuid", tuid, "hash", hash, "took", t2, "digest", fmt.Sprintf("%x", fhash))
|
log.Info("uploaded successfully", "hash", hash, "took", t2, "digest", fmt.Sprintf("%x", fhash))
|
||||||
|
|
||||||
time.Sleep(time.Duration(syncDelay) * time.Second)
|
// wait to sync and log chunks before fetch attempt, only if syncDelay is set to true
|
||||||
|
if syncDelay {
|
||||||
|
waitToSync()
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
log.Debug("chunks before fetch attempt", "hash", hash)
|
||||||
if single {
|
|
||||||
randIndex := 1 + rand.Intn(len(hosts)-1)
|
|
||||||
ruid := uuid.New()[:8]
|
|
||||||
wg.Add(1)
|
|
||||||
go func(endpoint string, ruid string) {
|
|
||||||
for {
|
|
||||||
start := time.Now()
|
|
||||||
err := fetch(hash, endpoint, fhash, ruid, tuid)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ended := time.Since(start)
|
|
||||||
|
|
||||||
metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended)
|
err = trackChunks(randomBytes, false)
|
||||||
log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
|
if err != nil {
|
||||||
wg.Done()
|
log.Error(err.Error())
|
||||||
return
|
|
||||||
}
|
|
||||||
}(httpEndpoint(hosts[randIndex]), ruid)
|
|
||||||
} else {
|
|
||||||
for _, endpoint := range hosts[1:] {
|
|
||||||
ruid := uuid.New()[:8]
|
|
||||||
wg.Add(1)
|
|
||||||
go func(endpoint string, ruid string) {
|
|
||||||
for {
|
|
||||||
start := time.Now()
|
|
||||||
err := fetch(hash, endpoint, fhash, ruid, tuid)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ended := time.Since(start)
|
|
||||||
|
|
||||||
metrics.GetOrRegisterResettingTimer("upload-and-sync.each.fetch-time", nil).Update(ended)
|
|
||||||
log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
|
|
||||||
wg.Done()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}(httpEndpoint(endpoint), ruid)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
wg.Wait()
|
|
||||||
log.Info("all hosts synced random file successfully")
|
if onlyUpload {
|
||||||
|
log.Debug("only-upload is true, stoppping test", "hash", hash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
randIndex := 1 + rand.Intn(len(hosts)-1)
|
||||||
|
|
||||||
|
for {
|
||||||
|
start := time.Now()
|
||||||
|
err := fetch(hash, httpEndpoint(hosts[randIndex]), fhash, "")
|
||||||
|
if err != nil {
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ended := time.Since(start)
|
||||||
|
|
||||||
|
metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended)
|
||||||
|
log.Info("fetch successful", "took", ended, "endpoint", httpEndpoint(hosts[randIndex]))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isSyncing(wsHost string) (bool, error) {
|
||||||
|
rpcClient, err := rpc.Dial(wsHost)
|
||||||
|
if rpcClient != nil {
|
||||||
|
defer rpcClient.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error("error dialing host", "err", err)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var isSyncing bool
|
||||||
|
err = rpcClient.Call(&isSyncing, "bzz_isSyncing")
|
||||||
|
if err != nil {
|
||||||
|
log.Error("error calling host for isSyncing", "err", err)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("isSyncing result", "host", wsHost, "isSyncing", isSyncing)
|
||||||
|
|
||||||
|
return isSyncing, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitToSync() {
|
||||||
|
t1 := time.Now()
|
||||||
|
|
||||||
|
ns := uint64(1)
|
||||||
|
|
||||||
|
for ns > 0 {
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
|
notSynced := uint64(0)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(hosts))
|
||||||
|
for i := 0; i < len(hosts); i++ {
|
||||||
|
i := i
|
||||||
|
go func(idx int) {
|
||||||
|
stillSyncing, err := isSyncing(wsEndpoint(hosts[idx]))
|
||||||
|
|
||||||
|
if stillSyncing || err != nil {
|
||||||
|
atomic.AddUint64(¬Synced, 1)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
ns = atomic.LoadUint64(¬Synced)
|
||||||
|
}
|
||||||
|
|
||||||
|
t2 := time.Since(t1)
|
||||||
|
metrics.GetOrRegisterResettingTimer("upload-and-sync.single.wait-for-sync.deployment", nil).Update(t2)
|
||||||
|
}
|
||||||
|
@ -28,14 +28,14 @@ import (
|
|||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func uploadSpeedCmd(ctx *cli.Context, tuid string) error {
|
func uploadSpeedCmd(ctx *cli.Context) error {
|
||||||
log.Info("uploading to "+hosts[0], "tuid", tuid, "seed", seed)
|
log.Info("uploading to "+hosts[0], "seed", seed)
|
||||||
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
||||||
|
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
errc <- uploadSpeed(ctx, tuid, randomBytes)
|
errc <- uploadSpeed(ctx, randomBytes)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -53,7 +53,7 @@ func uploadSpeedCmd(ctx *cli.Context, tuid string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func uploadSpeed(c *cli.Context, tuid string, data []byte) error {
|
func uploadSpeed(c *cli.Context, data []byte) error {
|
||||||
t1 := time.Now()
|
t1 := time.Now()
|
||||||
hash, err := upload(data, hosts[0])
|
hash, err := upload(data, hosts[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -38,7 +38,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/api/client"
|
"github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
opentracing "github.com/opentracing/opentracing-go"
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
"github.com/pborman/uuid"
|
|
||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -59,28 +58,25 @@ func wsEndpoint(host string) string {
|
|||||||
return fmt.Sprintf("ws://%s:%d", host, wsPort)
|
return fmt.Sprintf("ws://%s:%d", host, wsPort)
|
||||||
}
|
}
|
||||||
|
|
||||||
func wrapCliCommand(name string, command func(*cli.Context, string) error) func(*cli.Context) error {
|
func wrapCliCommand(name string, command func(*cli.Context) error) func(*cli.Context) error {
|
||||||
return func(ctx *cli.Context) error {
|
return func(ctx *cli.Context) error {
|
||||||
log.PrintOrigins(true)
|
log.PrintOrigins(true)
|
||||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(false))))
|
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(false))))
|
||||||
|
|
||||||
// test uuid
|
|
||||||
tuid := uuid.New()[:8]
|
|
||||||
|
|
||||||
commandName = name
|
commandName = name
|
||||||
|
|
||||||
hosts = strings.Split(allhosts, ",")
|
hosts = strings.Split(allhosts, ",")
|
||||||
|
|
||||||
defer func(now time.Time) {
|
defer func(now time.Time) {
|
||||||
totalTime := time.Since(now)
|
totalTime := time.Since(now)
|
||||||
log.Info("total time", "tuid", tuid, "time", totalTime, "kb", filesize)
|
log.Info("total time", "time", totalTime, "kb", filesize)
|
||||||
metrics.GetOrRegisterResettingTimer(name+".total-time", nil).Update(totalTime)
|
metrics.GetOrRegisterResettingTimer(name+".total-time", nil).Update(totalTime)
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
log.Info("smoke test starting", "tuid", tuid, "task", name, "timeout", timeout)
|
log.Info("smoke test starting", "task", name, "timeout", timeout)
|
||||||
metrics.GetOrRegisterCounter(name, nil).Inc(1)
|
metrics.GetOrRegisterCounter(name, nil).Inc(1)
|
||||||
|
|
||||||
return command(ctx, tuid)
|
return command(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,11 +138,11 @@ func fetchFeed(topic string, user string, endpoint string, original []byte, ruid
|
|||||||
}
|
}
|
||||||
|
|
||||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||||
func fetch(hash string, endpoint string, original []byte, ruid string, tuid string) error {
|
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
||||||
ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
|
ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
|
||||||
defer sp.Finish()
|
defer sp.Finish()
|
||||||
|
|
||||||
log.Info("http get request", "tuid", tuid, "ruid", ruid, "endpoint", endpoint, "hash", hash)
|
log.Info("http get request", "ruid", ruid, "endpoint", endpoint, "hash", hash)
|
||||||
|
|
||||||
var tn time.Time
|
var tn time.Time
|
||||||
reqUri := endpoint + "/bzz:/" + hash + "/"
|
reqUri := endpoint + "/bzz:/" + hash + "/"
|
||||||
@ -170,7 +166,7 @@ func fetch(hash string, endpoint string, original []byte, ruid string, tuid stri
|
|||||||
log.Error(err.Error(), "ruid", ruid)
|
log.Error(err.Error(), "ruid", ruid)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Info("http get response", "tuid", tuid, "ruid", ruid, "endpoint", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
|
log.Info("http get response", "ruid", ruid, "endpoint", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
|
||||||
|
|
||||||
if res.StatusCode != 200 {
|
if res.StatusCode != 200 {
|
||||||
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
|
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -34,9 +33,7 @@ import (
|
|||||||
// It runs a few "create" commands with different flag values and loads generated
|
// It runs a few "create" commands with different flag values and loads generated
|
||||||
// snapshot files to validate their content.
|
// snapshot files to validate their content.
|
||||||
func TestSnapshotCreate(t *testing.T) {
|
func TestSnapshotCreate(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" {
|
t.Skip("test is flaky. disabling until underlying problem is addressed")
|
||||||
t.Skip()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range []struct {
|
for _, v := range []struct {
|
||||||
name string
|
name string
|
||||||
|
1390
cmd/swarm/testdata/datastore_fixture.go
vendored
Normal file
1390
cmd/swarm/testdata/datastore_fixture.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@ -243,7 +243,7 @@ func (p *Peer) Run(handler func(ctx context.Context, msg interface{}) error) err
|
|||||||
// Drop disconnects a peer.
|
// Drop disconnects a peer.
|
||||||
// TODO: may need to implement protocol drop only? don't want to kick off the peer
|
// TODO: may need to implement protocol drop only? don't want to kick off the peer
|
||||||
// if they are useful for other protocols
|
// if they are useful for other protocols
|
||||||
func (p *Peer) Drop(err error) {
|
func (p *Peer) Drop() {
|
||||||
p.Disconnect(p2p.DiscSubprotocolError)
|
p.Disconnect(p2p.DiscSubprotocolError)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -254,6 +254,7 @@ func (p *Peer) Drop(err error) {
|
|||||||
func (p *Peer) Send(ctx context.Context, msg interface{}) error {
|
func (p *Peer) Send(ctx context.Context, msg interface{}) error {
|
||||||
defer metrics.GetOrRegisterResettingTimer("peer.send_t", nil).UpdateSince(time.Now())
|
defer metrics.GetOrRegisterResettingTimer("peer.send_t", nil).UpdateSince(time.Now())
|
||||||
metrics.GetOrRegisterCounter("peer.send", nil).Inc(1)
|
metrics.GetOrRegisterCounter("peer.send", nil).Inc(1)
|
||||||
|
metrics.GetOrRegisterCounter(fmt.Sprintf("peer.send.%T", msg), nil).Inc(1)
|
||||||
|
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
if tracing.Enabled {
|
if tracing.Enabled {
|
||||||
@ -291,7 +292,7 @@ func (p *Peer) Send(ctx context.Context, msg interface{}) error {
|
|||||||
if p.spec.Hook != nil {
|
if p.spec.Hook != nil {
|
||||||
err := p.spec.Hook.Send(p, wmsg.Size, msg)
|
err := p.spec.Hook.Send(p, wmsg.Size, msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.Drop(err)
|
p.Drop()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ func newProtocol(pp *p2ptest.TestPeerPool) func(*p2p.Peer, p2p.MsgReadWriter) er
|
|||||||
case *kill:
|
case *kill:
|
||||||
// demonstrates use of peerPool, killing another peer connection as a response to a message
|
// demonstrates use of peerPool, killing another peer connection as a response to a message
|
||||||
id := msg.C
|
id := msg.C
|
||||||
pp.Get(id).Drop(errors.New("killed"))
|
pp.Get(id).Drop()
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case *drop:
|
case *drop:
|
||||||
@ -269,6 +269,7 @@ func TestProtocolHook(t *testing.T) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
tester := p2ptest.NewProtocolTester(prvkey, 2, runFunc)
|
tester := p2ptest.NewProtocolTester(prvkey, 2, runFunc)
|
||||||
|
defer tester.Stop()
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
err = tester.TestExchanges(p2ptest.Exchange{
|
||||||
Expects: []p2ptest.Expect{
|
Expects: []p2ptest.Expect{
|
||||||
{
|
{
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
|
|
||||||
type TestPeer interface {
|
type TestPeer interface {
|
||||||
ID() enode.ID
|
ID() enode.ID
|
||||||
Drop(error)
|
Drop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPeerPool is an example peerPool to demonstrate registration of peer connections
|
// TestPeerPool is an example peerPool to demonstrate registration of peer connections
|
||||||
|
@ -41,6 +41,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/contracts/ens"
|
"github.com/ethereum/go-ethereum/contracts/ens"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
@ -53,8 +54,6 @@ import (
|
|||||||
var (
|
var (
|
||||||
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
|
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
|
||||||
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
|
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
|
||||||
apiPutCount = metrics.NewRegisteredCounter("api.put.count", nil)
|
|
||||||
apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil)
|
|
||||||
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil)
|
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil)
|
||||||
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil)
|
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil)
|
||||||
apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
|
apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
|
||||||
@ -188,15 +187,17 @@ type API struct {
|
|||||||
feed *feed.Handler
|
feed *feed.Handler
|
||||||
fileStore *storage.FileStore
|
fileStore *storage.FileStore
|
||||||
dns Resolver
|
dns Resolver
|
||||||
|
Tags *chunk.Tags
|
||||||
Decryptor func(context.Context, string) DecryptFunc
|
Decryptor func(context.Context, string) DecryptFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAPI the api constructor initialises a new API instance.
|
// NewAPI the api constructor initialises a new API instance.
|
||||||
func NewAPI(fileStore *storage.FileStore, dns Resolver, feedHandler *feed.Handler, pk *ecdsa.PrivateKey) (self *API) {
|
func NewAPI(fileStore *storage.FileStore, dns Resolver, feedHandler *feed.Handler, pk *ecdsa.PrivateKey, tags *chunk.Tags) (self *API) {
|
||||||
self = &API{
|
self = &API{
|
||||||
fileStore: fileStore,
|
fileStore: fileStore,
|
||||||
dns: dns,
|
dns: dns,
|
||||||
feed: feedHandler,
|
feed: feedHandler,
|
||||||
|
Tags: tags,
|
||||||
Decryptor: func(ctx context.Context, credentials string) DecryptFunc {
|
Decryptor: func(ctx context.Context, credentials string) DecryptFunc {
|
||||||
return self.doDecrypt(ctx, credentials, pk)
|
return self.doDecrypt(ctx, credentials, pk)
|
||||||
},
|
},
|
||||||
@ -297,31 +298,6 @@ func (a *API) ResolveURI(ctx context.Context, uri *URI, credentials string) (sto
|
|||||||
return addr, nil
|
return addr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put provides singleton manifest creation on top of FileStore store
|
|
||||||
func (a *API) Put(ctx context.Context, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) {
|
|
||||||
apiPutCount.Inc(1)
|
|
||||||
r := strings.NewReader(content)
|
|
||||||
key, waitContent, err := a.fileStore.Store(ctx, r, int64(len(content)), toEncrypt)
|
|
||||||
if err != nil {
|
|
||||||
apiPutFail.Inc(1)
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
|
|
||||||
r = strings.NewReader(manifest)
|
|
||||||
key, waitManifest, err := a.fileStore.Store(ctx, r, int64(len(manifest)), toEncrypt)
|
|
||||||
if err != nil {
|
|
||||||
apiPutFail.Inc(1)
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return key, func(ctx context.Context) error {
|
|
||||||
err := waitContent(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return waitManifest(ctx)
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get uses iterative manifest retrieval and prefix matching
|
// Get uses iterative manifest retrieval and prefix matching
|
||||||
// to resolve basePath to content using FileStore retrieve
|
// to resolve basePath to content using FileStore retrieve
|
||||||
// it returns a section reader, mimeType, status, the key of the actual content and an error
|
// it returns a section reader, mimeType, status, the key of the actual content and an error
|
||||||
|
@ -19,6 +19,7 @@ package api
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
crand "crypto/rand"
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -26,13 +27,16 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/sctx"
|
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -41,19 +45,21 @@ func init() {
|
|||||||
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
|
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAPI(t *testing.T, f func(*API, bool)) {
|
func testAPI(t *testing.T, f func(*API, *chunk.Tags, bool)) {
|
||||||
datadir, err := ioutil.TempDir("", "bzz-test")
|
for _, v := range []bool{true, false} {
|
||||||
if err != nil {
|
datadir, err := ioutil.TempDir("", "bzz-test")
|
||||||
t.Fatalf("unable to create temp dir: %v", err)
|
if err != nil {
|
||||||
|
t.Fatalf("unable to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(datadir)
|
||||||
|
tags := chunk.NewTags()
|
||||||
|
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32), tags)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
api := NewAPI(fileStore, nil, nil, nil, tags)
|
||||||
|
f(api, tags, v)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(datadir)
|
|
||||||
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
api := NewAPI(fileStore, nil, nil, nil)
|
|
||||||
f(api, false)
|
|
||||||
f(api, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type testResponse struct {
|
type testResponse struct {
|
||||||
@ -61,6 +67,13 @@ type testResponse struct {
|
|||||||
*Response
|
*Response
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Response struct {
|
||||||
|
MimeType string
|
||||||
|
Status int
|
||||||
|
Size int64
|
||||||
|
Content string
|
||||||
|
}
|
||||||
|
|
||||||
func checkResponse(t *testing.T, resp *testResponse, exp *Response) {
|
func checkResponse(t *testing.T, resp *testResponse, exp *Response) {
|
||||||
|
|
||||||
if resp.MimeType != exp.MimeType {
|
if resp.MimeType != exp.MimeType {
|
||||||
@ -111,15 +124,14 @@ func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse {
|
|||||||
}
|
}
|
||||||
reader.Seek(0, 0)
|
reader.Seek(0, 0)
|
||||||
return &testResponse{reader, &Response{mimeType, status, size, string(s)}}
|
return &testResponse{reader, &Response{mimeType, status, size, string(s)}}
|
||||||
// return &testResponse{reader, &Response{mimeType, status, reader.Size(), nil}}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestApiPut(t *testing.T) {
|
func TestApiPut(t *testing.T) {
|
||||||
testAPI(t, func(api *API, toEncrypt bool) {
|
testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) {
|
||||||
content := "hello"
|
content := "hello"
|
||||||
exp := expResponse(content, "text/plain", 0)
|
exp := expResponse(content, "text/plain", 0)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
addr, wait, err := api.Put(ctx, content, exp.MimeType, toEncrypt)
|
addr, wait, err := putString(ctx, api, content, exp.MimeType, toEncrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -129,6 +141,40 @@ func TestApiPut(t *testing.T) {
|
|||||||
}
|
}
|
||||||
resp := testGet(t, api, addr.Hex(), "")
|
resp := testGet(t, api, addr.Hex(), "")
|
||||||
checkResponse(t, resp, exp)
|
checkResponse(t, resp, exp)
|
||||||
|
tag := tags.All()[0]
|
||||||
|
testutil.CheckTag(t, tag, 2, 2, 0, 2) //1 chunk data, 1 chunk manifest
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestApiTagLarge tests that the the number of chunks counted is larger for a larger input
|
||||||
|
func TestApiTagLarge(t *testing.T) {
|
||||||
|
const contentLength = 4096 * 4095
|
||||||
|
testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) {
|
||||||
|
randomContentReader := io.LimitReader(crand.Reader, int64(contentLength))
|
||||||
|
tag, err := api.Tags.New("unnamed-tag", 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
ctx := sctx.SetTag(context.Background(), tag.Uid)
|
||||||
|
key, waitContent, err := api.Store(ctx, randomContentReader, int64(contentLength), toEncrypt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = waitContent(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
tag.DoneSplit(key)
|
||||||
|
|
||||||
|
if toEncrypt {
|
||||||
|
tag := tags.All()[0]
|
||||||
|
expect := int64(4095 + 64 + 1)
|
||||||
|
testutil.CheckTag(t, tag, expect, expect, 0, expect)
|
||||||
|
} else {
|
||||||
|
tag := tags.All()[0]
|
||||||
|
expect := int64(4095 + 32 + 1)
|
||||||
|
testutil.CheckTag(t, tag, expect, expect, 0, expect)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -391,7 +437,7 @@ func TestDecryptOriginForbidden(t *testing.T) {
|
|||||||
Access: &AccessEntry{Type: AccessTypePass},
|
Access: &AccessEntry{Type: AccessTypePass},
|
||||||
}
|
}
|
||||||
|
|
||||||
api := NewAPI(nil, nil, nil, nil)
|
api := NewAPI(nil, nil, nil, nil, chunk.NewTags())
|
||||||
|
|
||||||
f := api.Decryptor(ctx, "")
|
f := api.Decryptor(ctx, "")
|
||||||
err := f(me)
|
err := f(me)
|
||||||
@ -425,7 +471,7 @@ func TestDecryptOrigin(t *testing.T) {
|
|||||||
Access: &AccessEntry{Type: AccessTypePass},
|
Access: &AccessEntry{Type: AccessTypePass},
|
||||||
}
|
}
|
||||||
|
|
||||||
api := NewAPI(nil, nil, nil, nil)
|
api := NewAPI(nil, nil, nil, nil, chunk.NewTags())
|
||||||
|
|
||||||
f := api.Decryptor(ctx, "")
|
f := api.Decryptor(ctx, "")
|
||||||
err := f(me)
|
err := f(me)
|
||||||
@ -500,3 +546,31 @@ func TestDetectContentType(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// putString provides singleton manifest creation on top of api.API
|
||||||
|
func putString(ctx context.Context, a *API, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) {
|
||||||
|
r := strings.NewReader(content)
|
||||||
|
tag, err := a.Tags.New("unnamed-tag", 0)
|
||||||
|
|
||||||
|
log.Trace("created new tag", "uid", tag.Uid)
|
||||||
|
|
||||||
|
cCtx := sctx.SetTag(ctx, tag.Uid)
|
||||||
|
key, waitContent, err := a.Store(cCtx, r, int64(len(content)), toEncrypt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
|
||||||
|
r = strings.NewReader(manifest)
|
||||||
|
key, waitManifest, err := a.Store(cCtx, r, int64(len(manifest)), toEncrypt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
tag.DoneSplit(key)
|
||||||
|
return key, func(ctx context.Context) error {
|
||||||
|
err := waitContent(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return waitManifest(ctx)
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
@ -40,6 +40,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||||
"github.com/pborman/uuid"
|
"github.com/pborman/uuid"
|
||||||
@ -75,6 +76,8 @@ func (c *Client) UploadRaw(r io.Reader, size int64, toEncrypt bool) (string, err
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
req.ContentLength = size
|
req.ContentLength = size
|
||||||
|
req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("raw_upload_%d", time.Now().Unix()))
|
||||||
|
|
||||||
res, err := http.DefaultClient.Do(req)
|
res, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -111,6 +114,7 @@ func (c *Client) DownloadRaw(hash string) (io.ReadCloser, bool, error) {
|
|||||||
type File struct {
|
type File struct {
|
||||||
io.ReadCloser
|
io.ReadCloser
|
||||||
api.ManifestEntry
|
api.ManifestEntry
|
||||||
|
Tag string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open opens a local file which can then be passed to client.Upload to upload
|
// Open opens a local file which can then be passed to client.Upload to upload
|
||||||
@ -139,6 +143,7 @@ func Open(path string) (*File, error) {
|
|||||||
Size: stat.Size(),
|
Size: stat.Size(),
|
||||||
ModTime: stat.ModTime(),
|
ModTime: stat.ModTime(),
|
||||||
},
|
},
|
||||||
|
Tag: filepath.Base(path),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -422,6 +427,7 @@ func (c *Client) List(hash, prefix, credentials string) (*api.ManifestList, erro
|
|||||||
// Uploader uploads files to swarm using a provided UploadFn
|
// Uploader uploads files to swarm using a provided UploadFn
|
||||||
type Uploader interface {
|
type Uploader interface {
|
||||||
Upload(UploadFn) error
|
Upload(UploadFn) error
|
||||||
|
Tag() string
|
||||||
}
|
}
|
||||||
|
|
||||||
type UploaderFunc func(UploadFn) error
|
type UploaderFunc func(UploadFn) error
|
||||||
@ -430,12 +436,23 @@ func (u UploaderFunc) Upload(upload UploadFn) error {
|
|||||||
return u(upload)
|
return u(upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (u UploaderFunc) Tag() string {
|
||||||
|
return fmt.Sprintf("multipart_upload_%d", time.Now().Unix())
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirectoryUploader implements Uploader
|
||||||
|
var _ Uploader = &DirectoryUploader{}
|
||||||
|
|
||||||
// DirectoryUploader uploads all files in a directory, optionally uploading
|
// DirectoryUploader uploads all files in a directory, optionally uploading
|
||||||
// a file to the default path
|
// a file to the default path
|
||||||
type DirectoryUploader struct {
|
type DirectoryUploader struct {
|
||||||
Dir string
|
Dir string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *DirectoryUploader) Tag() string {
|
||||||
|
return filepath.Base(d.Dir)
|
||||||
|
}
|
||||||
|
|
||||||
// Upload performs the upload of the directory and default path
|
// Upload performs the upload of the directory and default path
|
||||||
func (d *DirectoryUploader) Upload(upload UploadFn) error {
|
func (d *DirectoryUploader) Upload(upload UploadFn) error {
|
||||||
return filepath.Walk(d.Dir, func(path string, f os.FileInfo, err error) error {
|
return filepath.Walk(d.Dir, func(path string, f os.FileInfo, err error) error {
|
||||||
@ -458,11 +475,17 @@ func (d *DirectoryUploader) Upload(upload UploadFn) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ Uploader = &FileUploader{}
|
||||||
|
|
||||||
// FileUploader uploads a single file
|
// FileUploader uploads a single file
|
||||||
type FileUploader struct {
|
type FileUploader struct {
|
||||||
File *File
|
File *File
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *FileUploader) Tag() string {
|
||||||
|
return f.File.Tag
|
||||||
|
}
|
||||||
|
|
||||||
// Upload performs the upload of the file
|
// Upload performs the upload of the file
|
||||||
func (f *FileUploader) Upload(upload UploadFn) error {
|
func (f *FileUploader) Upload(upload UploadFn) error {
|
||||||
return upload(f.File)
|
return upload(f.File)
|
||||||
@ -509,6 +532,14 @@ func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, t
|
|||||||
req.URL.RawQuery = q.Encode()
|
req.URL.RawQuery = q.Encode()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tag := uploader.Tag()
|
||||||
|
if tag == "" {
|
||||||
|
tag = "unnamed_tag_" + fmt.Sprintf("%d", time.Now().Unix())
|
||||||
|
}
|
||||||
|
log.Trace("setting upload tag", "tag", tag)
|
||||||
|
|
||||||
|
req.Header.Set(swarmhttp.SwarmTagHeaderName, tag)
|
||||||
|
|
||||||
// use 'Expect: 100-continue' so we don't send the request body if
|
// use 'Expect: 100-continue' so we don't send the request body if
|
||||||
// the server refuses the request
|
// the server refuses the request
|
||||||
req.Header.Set("Expect", "100-continue")
|
req.Header.Set("Expect", "100-continue")
|
||||||
@ -574,6 +605,7 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error)
|
|||||||
|
|
||||||
mw := multipart.NewWriter(reqW)
|
mw := multipart.NewWriter(reqW)
|
||||||
req.Header.Set("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%q", mw.Boundary()))
|
req.Header.Set("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%q", mw.Boundary()))
|
||||||
|
req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("multipart_upload_%d", time.Now().Unix()))
|
||||||
|
|
||||||
// define an UploadFn which adds files to the multipart form
|
// define an UploadFn which adds files to the multipart form
|
||||||
uploadFn := func(file *File) error {
|
uploadFn := func(file *File) error {
|
||||||
|
@ -25,16 +25,14 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func serverFunc(api *api.API) swarmhttp.TestServer {
|
func serverFunc(api *api.API) swarmhttp.TestServer {
|
||||||
@ -68,6 +66,10 @@ func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check the tag was created successfully
|
||||||
|
tag := srv.Tags.All()[0]
|
||||||
|
testutil.CheckTag(t, tag, 1, 1, 0, 1)
|
||||||
|
|
||||||
// check we can download the same data
|
// check we can download the same data
|
||||||
res, isEncrypted, err := client.DownloadRaw(hash)
|
res, isEncrypted, err := client.DownloadRaw(hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -209,6 +211,10 @@ func TestClientUploadDownloadDirectory(t *testing.T) {
|
|||||||
t.Fatalf("error uploading directory: %s", err)
|
t.Fatalf("error uploading directory: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check the tag was created successfully
|
||||||
|
tag := srv.Tags.All()[0]
|
||||||
|
testutil.CheckTag(t, tag, 9, 9, 0, 9)
|
||||||
|
|
||||||
// check we can download the individual files
|
// check we can download the individual files
|
||||||
checkDownloadFile := func(path string, expected []byte) {
|
checkDownloadFile := func(path string, expected []byte) {
|
||||||
file, err := client.Download(hash, path)
|
file, err := client.Download(hash, path)
|
||||||
@ -323,6 +329,7 @@ func TestClientMultipartUpload(t *testing.T) {
|
|||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
// define an uploader which uploads testDirFiles with some data
|
// define an uploader which uploads testDirFiles with some data
|
||||||
|
// note: this test should result in SEEN chunks. assert accordingly
|
||||||
data := []byte("some-data")
|
data := []byte("some-data")
|
||||||
uploader := UploaderFunc(func(upload UploadFn) error {
|
uploader := UploaderFunc(func(upload UploadFn) error {
|
||||||
for _, name := range testDirFiles {
|
for _, name := range testDirFiles {
|
||||||
@ -348,6 +355,10 @@ func TestClientMultipartUpload(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check the tag was created successfully
|
||||||
|
tag := srv.Tags.All()[0]
|
||||||
|
testutil.CheckTag(t, tag, 9, 9, 7, 9)
|
||||||
|
|
||||||
// check we can download the individual files
|
// check we can download the individual files
|
||||||
checkDownloadFile := func(path string) {
|
checkDownloadFile := func(path string) {
|
||||||
file, err := client.Download(hash, path)
|
file, err := client.Download(hash, path)
|
||||||
|
@ -45,7 +45,13 @@ const (
|
|||||||
type Config struct {
|
type Config struct {
|
||||||
// serialised/persisted fields
|
// serialised/persisted fields
|
||||||
*storage.FileStoreParams
|
*storage.FileStoreParams
|
||||||
*storage.LocalStoreParams
|
|
||||||
|
// LocalStore
|
||||||
|
ChunkDbPath string
|
||||||
|
DbCapacity uint64
|
||||||
|
CacheCapacity uint
|
||||||
|
BaseKey []byte
|
||||||
|
|
||||||
*network.HiveParams
|
*network.HiveParams
|
||||||
Swap *swap.LocalProfile
|
Swap *swap.LocalProfile
|
||||||
Pss *pss.PssParams
|
Pss *pss.PssParams
|
||||||
@ -78,7 +84,6 @@ type Config struct {
|
|||||||
func NewConfig() (c *Config) {
|
func NewConfig() (c *Config) {
|
||||||
|
|
||||||
c = &Config{
|
c = &Config{
|
||||||
LocalStoreParams: storage.NewDefaultLocalStoreParams(),
|
|
||||||
FileStoreParams: storage.NewFileStoreParams(),
|
FileStoreParams: storage.NewFileStoreParams(),
|
||||||
HiveParams: network.NewHiveParams(),
|
HiveParams: network.NewHiveParams(),
|
||||||
Swap: swap.NewDefaultSwapParams(),
|
Swap: swap.NewDefaultSwapParams(),
|
||||||
@ -130,8 +135,9 @@ func (c *Config) Init(prvKey *ecdsa.PrivateKey, nodeKey *ecdsa.PrivateKey) error
|
|||||||
c.Swap.Init(c.Contract, prvKey)
|
c.Swap.Init(c.Contract, prvKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.LocalStoreParams.Init(c.Path)
|
c.privateKey = prvKey
|
||||||
c.LocalStoreParams.BaseKey = common.FromHex(c.BzzKey)
|
c.ChunkDbPath = filepath.Join(c.Path, "chunks")
|
||||||
|
c.BaseKey = common.FromHex(c.BzzKey)
|
||||||
|
|
||||||
c.Pss = c.Pss.WithPrivateKey(c.privateKey)
|
c.Pss = c.Pss.WithPrivateKey(c.privateKey)
|
||||||
return nil
|
return nil
|
||||||
|
@ -41,7 +41,6 @@ func TestConfig(t *testing.T) {
|
|||||||
one := NewConfig()
|
one := NewConfig()
|
||||||
two := NewConfig()
|
two := NewConfig()
|
||||||
|
|
||||||
one.LocalStoreParams = two.LocalStoreParams
|
|
||||||
if equal := reflect.DeepEqual(one, two); !equal {
|
if equal := reflect.DeepEqual(one, two); !equal {
|
||||||
t.Fatal("Two default configs are not equal")
|
t.Fatal("Two default configs are not equal")
|
||||||
}
|
}
|
||||||
|
@ -25,13 +25,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test")
|
var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test")
|
||||||
|
|
||||||
func testFileSystem(t *testing.T, f func(*FileSystem, bool)) {
|
func testFileSystem(t *testing.T, f func(*FileSystem, bool)) {
|
||||||
testAPI(t, func(api *API, toEncrypt bool) {
|
testAPI(t, func(api *API, _ *chunk.Tags, toEncrypt bool) {
|
||||||
f(NewFileSystem(api), toEncrypt)
|
f(NewFileSystem(api), toEncrypt)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/sctx"
|
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
@ -86,6 +87,54 @@ func InitLoggingResponseWriter(h http.Handler) http.Handler {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InitUploadTag creates a new tag for an upload to the local HTTP proxy
|
||||||
|
// if a tag is not named using the SwarmTagHeaderName, a fallback name will be used
|
||||||
|
// when the Content-Length header is set, an ETA on chunking will be available since the
|
||||||
|
// number of chunks to be split is known in advance (not including enclosing manifest chunks)
|
||||||
|
// the tag can later be accessed using the appropriate identifier in the request context
|
||||||
|
func InitUploadTag(h http.Handler, tags *chunk.Tags) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var (
|
||||||
|
tagName string
|
||||||
|
err error
|
||||||
|
estimatedTotal int64 = 0
|
||||||
|
contentType = r.Header.Get("Content-Type")
|
||||||
|
headerTag = r.Header.Get(SwarmTagHeaderName)
|
||||||
|
)
|
||||||
|
if headerTag != "" {
|
||||||
|
tagName = headerTag
|
||||||
|
log.Trace("got tag name from http header", "tagName", tagName)
|
||||||
|
} else {
|
||||||
|
tagName = fmt.Sprintf("unnamed_tag_%d", time.Now().Unix())
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(contentType, "multipart") && r.ContentLength > 0 {
|
||||||
|
log.Trace("calculating tag size", "contentType", contentType, "contentLength", r.ContentLength)
|
||||||
|
uri := GetURI(r.Context())
|
||||||
|
if uri != nil {
|
||||||
|
log.Debug("got uri from context")
|
||||||
|
if uri.Addr == "encrypt" {
|
||||||
|
estimatedTotal = calculateNumberOfChunks(r.ContentLength, true)
|
||||||
|
} else {
|
||||||
|
estimatedTotal = calculateNumberOfChunks(r.ContentLength, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Trace("creating tag", "tagName", tagName, "estimatedTotal", estimatedTotal)
|
||||||
|
|
||||||
|
t, err := tags.New(tagName, estimatedTotal)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("error creating tag", "err", err, "tagName", tagName)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Trace("setting tag id to context", "uid", t.Uid)
|
||||||
|
ctx := sctx.SetTag(r.Context(), t.Uid)
|
||||||
|
|
||||||
|
h.ServeHTTP(w, r.WithContext(ctx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func InstrumentOpenTracing(h http.Handler) http.Handler {
|
func InstrumentOpenTracing(h http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
uri := GetURI(r.Context())
|
uri := GetURI(r.Context())
|
||||||
|
@ -79,7 +79,7 @@ func respondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg s
|
|||||||
}
|
}
|
||||||
|
|
||||||
func respondError(w http.ResponseWriter, r *http.Request, msg string, code int) {
|
func respondError(w http.ResponseWriter, r *http.Request, msg string, code int) {
|
||||||
log.Info("respondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code)
|
log.Info("respondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code, "msg", msg)
|
||||||
respondTemplate(w, r, "error", msg, code)
|
respondTemplate(w, r, "error", msg, code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
"mime"
|
"mime"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -38,7 +39,9 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||||
"github.com/rs/cors"
|
"github.com/rs/cors"
|
||||||
@ -60,6 +63,8 @@ var (
|
|||||||
getListFail = metrics.NewRegisteredCounter("api.http.get.list.fail", nil)
|
getListFail = metrics.NewRegisteredCounter("api.http.get.list.fail", nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const SwarmTagHeaderName = "x-swarm-tag"
|
||||||
|
|
||||||
type methodHandler map[string]http.Handler
|
type methodHandler map[string]http.Handler
|
||||||
|
|
||||||
func (m methodHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
func (m methodHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||||
@ -94,6 +99,12 @@ func NewServer(api *api.API, corsString string) *Server {
|
|||||||
InstrumentOpenTracing,
|
InstrumentOpenTracing,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tagAdapter := Adapter(func(h http.Handler) http.Handler {
|
||||||
|
return InitUploadTag(h, api.Tags)
|
||||||
|
})
|
||||||
|
|
||||||
|
defaultPostMiddlewares := append(defaultMiddlewares, tagAdapter)
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
mux.Handle("/bzz:/", methodHandler{
|
mux.Handle("/bzz:/", methodHandler{
|
||||||
"GET": Adapt(
|
"GET": Adapt(
|
||||||
@ -102,7 +113,7 @@ func NewServer(api *api.API, corsString string) *Server {
|
|||||||
),
|
),
|
||||||
"POST": Adapt(
|
"POST": Adapt(
|
||||||
http.HandlerFunc(server.HandlePostFiles),
|
http.HandlerFunc(server.HandlePostFiles),
|
||||||
defaultMiddlewares...,
|
defaultPostMiddlewares...,
|
||||||
),
|
),
|
||||||
"DELETE": Adapt(
|
"DELETE": Adapt(
|
||||||
http.HandlerFunc(server.HandleDelete),
|
http.HandlerFunc(server.HandleDelete),
|
||||||
@ -116,7 +127,7 @@ func NewServer(api *api.API, corsString string) *Server {
|
|||||||
),
|
),
|
||||||
"POST": Adapt(
|
"POST": Adapt(
|
||||||
http.HandlerFunc(server.HandlePostRaw),
|
http.HandlerFunc(server.HandlePostRaw),
|
||||||
defaultMiddlewares...,
|
defaultPostMiddlewares...,
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
mux.Handle("/bzz-immutable:/", methodHandler{
|
mux.Handle("/bzz-immutable:/", methodHandler{
|
||||||
@ -230,6 +241,12 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *http.Request) {
|
|||||||
ruid := GetRUID(r.Context())
|
ruid := GetRUID(r.Context())
|
||||||
log.Debug("handle.post.raw", "ruid", ruid)
|
log.Debug("handle.post.raw", "ruid", ruid)
|
||||||
|
|
||||||
|
tagUid := sctx.GetTag(r.Context())
|
||||||
|
tag, err := s.api.Tags.Get(tagUid)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("handle post raw got an error retrieving tag for DoneSplit", "tagUid", tagUid, "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
postRawCount.Inc(1)
|
postRawCount.Inc(1)
|
||||||
|
|
||||||
toEncrypt := false
|
toEncrypt := false
|
||||||
@ -256,13 +273,16 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
addr, _, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt)
|
addr, wait, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
postRawFail.Inc(1)
|
postRawFail.Inc(1)
|
||||||
respondError(w, r, err.Error(), http.StatusInternalServerError)
|
respondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wait(r.Context())
|
||||||
|
tag.DoneSplit(addr)
|
||||||
|
|
||||||
log.Debug("stored content", "ruid", ruid, "key", addr)
|
log.Debug("stored content", "ruid", ruid, "key", addr)
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "text/plain")
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
@ -311,7 +331,6 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
log.Debug("new manifest", "ruid", ruid, "key", addr)
|
log.Debug("new manifest", "ruid", ruid, "key", addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
newAddr, err := s.api.UpdateManifest(r.Context(), addr, func(mw *api.ManifestWriter) error {
|
newAddr, err := s.api.UpdateManifest(r.Context(), addr, func(mw *api.ManifestWriter) error {
|
||||||
switch contentType {
|
switch contentType {
|
||||||
case "application/x-tar":
|
case "application/x-tar":
|
||||||
@ -334,6 +353,15 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tagUid := sctx.GetTag(r.Context())
|
||||||
|
tag, err := s.api.Tags.Get(tagUid)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("got an error retrieving tag for DoneSplit", "tagUid", tagUid, "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("done splitting, setting tag total", "SPLIT", tag.Get(chunk.StateSplit), "TOTAL", tag.Total())
|
||||||
|
tag.DoneSplit(newAddr)
|
||||||
|
|
||||||
log.Debug("stored content", "ruid", ruid, "key", newAddr)
|
log.Debug("stored content", "ruid", ruid, "key", newAddr)
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "text/plain")
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
@ -342,7 +370,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleTarUpload(r *http.Request, mw *api.ManifestWriter) (storage.Address, error) {
|
func (s *Server) handleTarUpload(r *http.Request, mw *api.ManifestWriter) (storage.Address, error) {
|
||||||
log.Debug("handle.tar.upload", "ruid", GetRUID(r.Context()))
|
log.Debug("handle.tar.upload", "ruid", GetRUID(r.Context()), "tag", sctx.GetTag(r.Context()))
|
||||||
|
|
||||||
defaultPath := r.URL.Query().Get("defaultpath")
|
defaultPath := r.URL.Query().Get("defaultpath")
|
||||||
|
|
||||||
@ -837,6 +865,28 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) {
|
|||||||
http.ServeContent(w, r, fileName, time.Now(), newBufferedReadSeeker(reader, getFileBufferSize))
|
http.ServeContent(w, r, fileName, time.Now(), newBufferedReadSeeker(reader, getFileBufferSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// calculateNumberOfChunks calculates the number of chunks in an arbitrary content length
|
||||||
|
func calculateNumberOfChunks(contentLength int64, isEncrypted bool) int64 {
|
||||||
|
if contentLength < 4096 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
branchingFactor := 128
|
||||||
|
if isEncrypted {
|
||||||
|
branchingFactor = 64
|
||||||
|
}
|
||||||
|
|
||||||
|
dataChunks := math.Ceil(float64(contentLength) / float64(4096))
|
||||||
|
totalChunks := dataChunks
|
||||||
|
intermediate := dataChunks / float64(branchingFactor)
|
||||||
|
|
||||||
|
for intermediate > 1 {
|
||||||
|
totalChunks += math.Ceil(intermediate)
|
||||||
|
intermediate = intermediate / float64(branchingFactor)
|
||||||
|
}
|
||||||
|
|
||||||
|
return int64(totalChunks) + 1
|
||||||
|
}
|
||||||
|
|
||||||
// The size of buffer used for bufio.Reader on LazyChunkReader passed to
|
// The size of buffer used for bufio.Reader on LazyChunkReader passed to
|
||||||
// http.ServeContent in HandleGetFile.
|
// http.ServeContent in HandleGetFile.
|
||||||
// Warning: This value influences the number of chunk requests and chunker join goroutines
|
// Warning: This value influences the number of chunk requests and chunker join goroutines
|
||||||
|
@ -44,7 +44,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
@ -755,6 +754,7 @@ func testBzzTar(encrypted bool, t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
req.Header.Add("Content-Type", "application/x-tar")
|
req.Header.Add("Content-Type", "application/x-tar")
|
||||||
|
req.Header.Add(SwarmTagHeaderName, "test-upload")
|
||||||
client := &http.Client{}
|
client := &http.Client{}
|
||||||
resp2, err := client.Do(req)
|
resp2, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -763,6 +763,11 @@ func testBzzTar(encrypted bool, t *testing.T) {
|
|||||||
if resp2.StatusCode != http.StatusOK {
|
if resp2.StatusCode != http.StatusOK {
|
||||||
t.Fatalf("err %s", resp2.Status)
|
t.Fatalf("err %s", resp2.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check that the tag was written correctly
|
||||||
|
tag := srv.Tags.All()[0]
|
||||||
|
testutil.CheckTag(t, tag, 4, 4, 0, 4)
|
||||||
|
|
||||||
swarmHash, err := ioutil.ReadAll(resp2.Body)
|
swarmHash, err := ioutil.ReadAll(resp2.Body)
|
||||||
resp2.Body.Close()
|
resp2.Body.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -834,6 +839,75 @@ func testBzzTar(encrypted bool, t *testing.T) {
|
|||||||
t.Fatalf("file %s did not pass content assertion", hdr.Name)
|
t.Fatalf("file %s did not pass content assertion", hdr.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// now check the tags endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBzzCorrectTagEstimate checks that the HTTP middleware sets the total number of chunks
|
||||||
|
// in the tag according to an estimate from the HTTP request Content-Length header divided
|
||||||
|
// by chunk size (4096). It is needed to be checked BEFORE chunking is done, therefore
|
||||||
|
// concurrency was introduced to slow down the HTTP request
|
||||||
|
func TestBzzCorrectTagEstimate(t *testing.T) {
|
||||||
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
|
defer srv.Close()
|
||||||
|
|
||||||
|
for _, v := range []struct {
|
||||||
|
toEncrypt bool
|
||||||
|
expChunks int64
|
||||||
|
}{
|
||||||
|
{toEncrypt: false, expChunks: 248},
|
||||||
|
{toEncrypt: true, expChunks: 250},
|
||||||
|
} {
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
addr := ""
|
||||||
|
if v.toEncrypt {
|
||||||
|
addr = "encrypt"
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("POST", srv.URL+"/bzz:/"+addr, pr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
req.ContentLength = 1000000
|
||||||
|
req.Header.Add(SwarmTagHeaderName, "1000000")
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(1 * time.Millisecond):
|
||||||
|
_, err := pw.Write([]byte{0})
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
transport := http.DefaultTransport
|
||||||
|
_, err := transport.RoundTrip(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
done := false
|
||||||
|
for !done {
|
||||||
|
switch len(srv.Tags.All()) {
|
||||||
|
case 0:
|
||||||
|
<-time.After(10 * time.Millisecond)
|
||||||
|
case 1:
|
||||||
|
tag := srv.Tags.All()[0]
|
||||||
|
testutil.CheckTag(t, tag, 0, 0, 0, v.expChunks)
|
||||||
|
srv.Tags.Delete(tag.Uid)
|
||||||
|
done = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestBzzRootRedirect tests that getting the root path of a manifest without
|
// TestBzzRootRedirect tests that getting the root path of a manifest without
|
||||||
@ -851,19 +925,11 @@ func testBzzRootRedirect(toEncrypt bool, t *testing.T) {
|
|||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
// create a manifest with some data at the root path
|
// create a manifest with some data at the root path
|
||||||
client := swarm.NewClient(srv.URL)
|
|
||||||
data := []byte("data")
|
data := []byte("data")
|
||||||
file := &swarm.File{
|
headers := map[string]string{"Content-Type": "text/plain"}
|
||||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
|
res, hash := httpDo("POST", srv.URL+"/bzz:/", bytes.NewReader(data), headers, false, t)
|
||||||
ManifestEntry: api.ManifestEntry{
|
if res.StatusCode != http.StatusOK {
|
||||||
Path: "",
|
t.Fatalf("unexpected status code from server %d want %d", res.StatusCode, http.StatusOK)
|
||||||
ContentType: "text/plain",
|
|
||||||
Size: int64(len(data)),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
hash, err := client.Upload(file, "", toEncrypt)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// define a CheckRedirect hook which ensures there is only a single
|
// define a CheckRedirect hook which ensures there is only a single
|
||||||
@ -1046,21 +1112,10 @@ func TestGet(t *testing.T) {
|
|||||||
func TestModify(t *testing.T) {
|
func TestModify(t *testing.T) {
|
||||||
srv := NewTestSwarmServer(t, serverFunc, nil)
|
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
headers := map[string]string{"Content-Type": "text/plain"}
|
||||||
swarmClient := swarm.NewClient(srv.URL)
|
res, hash := httpDo("POST", srv.URL+"/bzz:/", bytes.NewReader([]byte("data")), headers, false, t)
|
||||||
data := []byte("data")
|
if res.StatusCode != http.StatusOK {
|
||||||
file := &swarm.File{
|
t.Fatalf("unexpected status code from server %d want %d", res.StatusCode, http.StatusOK)
|
||||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
|
|
||||||
ManifestEntry: api.ManifestEntry{
|
|
||||||
Path: "",
|
|
||||||
ContentType: "text/plain",
|
|
||||||
Size: int64(len(data)),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
hash, err := swarmClient.Upload(file, "", false)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range []struct {
|
for _, testCase := range []struct {
|
||||||
@ -1283,6 +1338,46 @@ func TestBzzGetFileWithResolver(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestCalculateNumberOfChunks is a unit test for the chunk-number-according-to-content-length
|
||||||
|
// calculation
|
||||||
|
func TestCalculateNumberOfChunks(t *testing.T) {
|
||||||
|
|
||||||
|
//test cases:
|
||||||
|
for _, tc := range []struct{ len, chunks int64 }{
|
||||||
|
{len: 1000, chunks: 1},
|
||||||
|
{len: 5000, chunks: 3},
|
||||||
|
{len: 10000, chunks: 4},
|
||||||
|
{len: 100000, chunks: 26},
|
||||||
|
{len: 1000000, chunks: 248},
|
||||||
|
{len: 325839339210, chunks: 79550620 + 621490 + 4856 + 38 + 1},
|
||||||
|
} {
|
||||||
|
res := calculateNumberOfChunks(tc.len, false)
|
||||||
|
if res != tc.chunks {
|
||||||
|
t.Fatalf("expected result for %d bytes to be %d got %d", tc.len, tc.chunks, res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCalculateNumberOfChunksEncrypted is a unit test for the chunk-number-according-to-content-length
|
||||||
|
// calculation with encryption (branching factor=64)
|
||||||
|
func TestCalculateNumberOfChunksEncrypted(t *testing.T) {
|
||||||
|
|
||||||
|
//test cases:
|
||||||
|
for _, tc := range []struct{ len, chunks int64 }{
|
||||||
|
{len: 1000, chunks: 1},
|
||||||
|
{len: 5000, chunks: 3},
|
||||||
|
{len: 10000, chunks: 4},
|
||||||
|
{len: 100000, chunks: 26},
|
||||||
|
{len: 1000000, chunks: 245 + 4 + 1},
|
||||||
|
{len: 325839339210, chunks: 79550620 + 1242979 + 19422 + 304 + 5 + 1},
|
||||||
|
} {
|
||||||
|
res := calculateNumberOfChunks(tc.len, true)
|
||||||
|
if res != tc.chunks {
|
||||||
|
t.Fatalf("expected result for %d bytes to be %d got %d", tc.len, tc.chunks, res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// testResolver implements the Resolver interface and either returns the given
|
// testResolver implements the Resolver interface and either returns the given
|
||||||
// hash if it is set, or returns a "name not found" error
|
// hash if it is set, or returns a "name not found" error
|
||||||
type testResolveValidator struct {
|
type testResolveValidator struct {
|
||||||
@ -1308,6 +1403,7 @@ func (t *testResolveValidator) Resolve(addr string) (common.Hash, error) {
|
|||||||
func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) {
|
func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) {
|
func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -24,8 +24,10 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TestServer interface {
|
type TestServer interface {
|
||||||
@ -37,17 +39,15 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
localStore, err := localstore.New(swarmDir, make([]byte, 32), nil)
|
||||||
storeParams := storage.NewDefaultLocalStoreParams()
|
|
||||||
storeParams.DbCapacity = 5000000
|
|
||||||
storeParams.CacheCapacity = 5000
|
|
||||||
storeParams.Init(swarmDir)
|
|
||||||
localStore, err := storage.NewLocalStore(storeParams, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.RemoveAll(swarmDir)
|
os.RemoveAll(swarmDir)
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams())
|
|
||||||
|
tags := chunk.NewTags()
|
||||||
|
fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams(), tags)
|
||||||
|
|
||||||
// Swarm feeds test setup
|
// Swarm feeds test setup
|
||||||
feedsDir, err := ioutil.TempDir("", "swarm-feeds-test")
|
feedsDir, err := ioutil.TempDir("", "swarm-feeds-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -59,12 +59,13 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
swarmApi := api.NewAPI(fileStore, resolver, feeds.Handler, nil)
|
swarmApi := api.NewAPI(fileStore, resolver, feeds.Handler, nil, tags)
|
||||||
apiServer := httptest.NewServer(serverFunc(swarmApi))
|
apiServer := httptest.NewServer(serverFunc(swarmApi))
|
||||||
|
|
||||||
tss := &TestSwarmServer{
|
tss := &TestSwarmServer{
|
||||||
Server: apiServer,
|
Server: apiServer,
|
||||||
FileStore: fileStore,
|
FileStore: fileStore,
|
||||||
|
Tags: tags,
|
||||||
dir: swarmDir,
|
dir: swarmDir,
|
||||||
Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
|
Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
|
||||||
cleanup: func() {
|
cleanup: func() {
|
||||||
@ -84,6 +85,7 @@ type TestSwarmServer struct {
|
|||||||
*httptest.Server
|
*httptest.Server
|
||||||
Hasher storage.SwarmHash
|
Hasher storage.SwarmHash
|
||||||
FileStore *storage.FileStore
|
FileStore *storage.FileStore
|
||||||
|
Tags *chunk.Tags
|
||||||
dir string
|
dir string
|
||||||
cleanup func()
|
cleanup func()
|
||||||
CurrentTime uint64
|
CurrentTime uint64
|
||||||
|
@ -19,7 +19,11 @@ package api
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
)
|
)
|
||||||
@ -47,21 +51,34 @@ func (inspector *Inspector) ListKnown() []string {
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
type HasInfo struct {
|
func (inspector *Inspector) IsSyncing() bool {
|
||||||
Addr string `json:"address"`
|
lastReceivedChunksMsg := metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)
|
||||||
Has bool `json:"has"`
|
|
||||||
|
// last received chunks msg time
|
||||||
|
lrct := time.Unix(0, lastReceivedChunksMsg.Value())
|
||||||
|
|
||||||
|
// if last received chunks msg time is after now-15sec. (i.e. within the last 15sec.) then we say that the node is still syncing
|
||||||
|
// technically this is not correct, because this might have been a retrieve request, but for the time being it works for our purposes
|
||||||
|
// because we know we are not making retrieve requests on the node while checking this
|
||||||
|
return lrct.After(time.Now().Add(-15 * time.Second))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Has checks whether each chunk address is present in the underlying datastore,
|
// Has checks whether each chunk address is present in the underlying datastore,
|
||||||
// the bool in the returned structs indicates if the underlying datastore has
|
// the bool in the returned structs indicates if the underlying datastore has
|
||||||
// the chunk stored with the given address (true), or not (false)
|
// the chunk stored with the given address (true), or not (false)
|
||||||
func (inspector *Inspector) Has(chunkAddresses []storage.Address) []HasInfo {
|
func (inspector *Inspector) Has(chunkAddresses []storage.Address) string {
|
||||||
results := make([]HasInfo, 0)
|
hostChunks := []string{}
|
||||||
for _, addr := range chunkAddresses {
|
for _, addr := range chunkAddresses {
|
||||||
res := HasInfo{}
|
has, err := inspector.netStore.Has(context.Background(), addr)
|
||||||
res.Addr = addr.String()
|
if err != nil {
|
||||||
res.Has = inspector.netStore.Has(context.Background(), addr)
|
log.Error(err.Error())
|
||||||
results = append(results, res)
|
}
|
||||||
|
if has {
|
||||||
|
hostChunks = append(hostChunks, "1")
|
||||||
|
} else {
|
||||||
|
hostChunks = append(hostChunks, "0")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return results
|
|
||||||
|
return strings.Join(hostChunks, "")
|
||||||
}
|
}
|
||||||
|
@ -235,7 +235,6 @@ func loadManifest(ctx context.Context, fileStore *storage.FileStore, addr storag
|
|||||||
}
|
}
|
||||||
|
|
||||||
func readManifest(mr storage.LazySectionReader, addr storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
func readManifest(mr storage.LazySectionReader, addr storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
||||||
|
|
||||||
// TODO check size for oversized manifests
|
// TODO check size for oversized manifests
|
||||||
size, err := mr.Size(mr.Context(), quitC)
|
size, err := mr.Size(mr.Context(), quitC)
|
||||||
if err != nil { // size == 0
|
if err != nil { // size == 0
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -42,7 +43,7 @@ func manifest(paths ...string) (manifestReader storage.LazySectionReader) {
|
|||||||
|
|
||||||
func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie {
|
func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie {
|
||||||
quitC := make(chan bool)
|
quitC := make(chan bool)
|
||||||
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
|
||||||
ref := make([]byte, fileStore.HashSize())
|
ref := make([]byte, fileStore.HashSize())
|
||||||
trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC, NOOPDecrypt)
|
trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC, NOOPDecrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -99,7 +100,7 @@ func TestGetEntry(t *testing.T) {
|
|||||||
func TestExactMatch(t *testing.T) {
|
func TestExactMatch(t *testing.T) {
|
||||||
quitC := make(chan bool)
|
quitC := make(chan bool)
|
||||||
mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map")
|
mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map")
|
||||||
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
|
||||||
ref := make([]byte, fileStore.HashSize())
|
ref := make([]byte, fileStore.HashSize())
|
||||||
trie, err := readManifest(mf, ref, fileStore, false, quitC, nil)
|
trie, err := readManifest(mf, ref, fileStore, false, quitC, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -132,7 +133,7 @@ func TestAddFileWithManifestPath(t *testing.T) {
|
|||||||
reader := &storage.LazyTestSectionReader{
|
reader := &storage.LazyTestSectionReader{
|
||||||
SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))),
|
SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))),
|
||||||
}
|
}
|
||||||
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
|
||||||
ref := make([]byte, fileStore.HashSize())
|
ref := make([]byte, fileStore.HashSize())
|
||||||
trie, err := readManifest(reader, ref, fileStore, false, nil, NOOPDecrypt)
|
trie, err := readManifest(reader, ref, fileStore, false, nil, NOOPDecrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1,85 +0,0 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Response struct {
|
|
||||||
MimeType string
|
|
||||||
Status int
|
|
||||||
Size int64
|
|
||||||
// Content []byte
|
|
||||||
Content string
|
|
||||||
}
|
|
||||||
|
|
||||||
// implements a service
|
|
||||||
//
|
|
||||||
// DEPRECATED: Use the HTTP API instead
|
|
||||||
type Storage struct {
|
|
||||||
api *API
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStorage(api *API) *Storage {
|
|
||||||
return &Storage{api}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put uploads the content to the swarm with a simple manifest speficying
|
|
||||||
// its content type
|
|
||||||
//
|
|
||||||
// DEPRECATED: Use the HTTP API instead
|
|
||||||
func (s *Storage) Put(ctx context.Context, content string, contentType string, toEncrypt bool) (storage.Address, func(context.Context) error, error) {
|
|
||||||
return s.api.Put(ctx, content, contentType, toEncrypt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get retrieves the content from bzzpath and reads the response in full
|
|
||||||
// It returns the Response object, which serialises containing the
|
|
||||||
// response body as the value of the Content field
|
|
||||||
// NOTE: if error is non-nil, sResponse may still have partial content
|
|
||||||
// the actual size of which is given in len(resp.Content), while the expected
|
|
||||||
// size is resp.Size
|
|
||||||
//
|
|
||||||
// DEPRECATED: Use the HTTP API instead
|
|
||||||
func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) {
|
|
||||||
uri, err := Parse(path.Join("bzz:/", bzzpath))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
addr, err := s.api.Resolve(ctx, uri.Addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
reader, mimeType, status, _, err := s.api.Get(ctx, nil, addr, uri.Path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
quitC := make(chan bool)
|
|
||||||
expsize, err := reader.Size(ctx, quitC)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
body := make([]byte, expsize)
|
|
||||||
size, err := reader.Read(body)
|
|
||||||
if int64(size) == expsize {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return &Response{mimeType, status, expsize, string(body[:size])}, err
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func testStorage(t *testing.T, f func(*Storage, bool)) {
|
|
||||||
testAPI(t, func(api *API, toEncrypt bool) {
|
|
||||||
f(NewStorage(api), toEncrypt)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStoragePutGet(t *testing.T) {
|
|
||||||
testStorage(t, func(api *Storage, toEncrypt bool) {
|
|
||||||
content := "hello"
|
|
||||||
exp := expResponse(content, "text/plain", 0)
|
|
||||||
// exp := expResponse([]byte(content), "text/plain", 0)
|
|
||||||
ctx := context.TODO()
|
|
||||||
bzzkey, wait, err := api.Put(ctx, content, exp.MimeType, toEncrypt)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
err = wait(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
bzzhash := bzzkey.Hex()
|
|
||||||
// to check put against the API#Get
|
|
||||||
resp0 := testGet(t, api.api, bzzhash, "")
|
|
||||||
checkResponse(t, resp0, exp)
|
|
||||||
|
|
||||||
// check storage#Get
|
|
||||||
resp, err := api.Get(context.TODO(), bzzhash)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
checkResponse(t, &testResponse{nil, resp}, exp)
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,6 +1,23 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package chunk
|
package chunk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
@ -28,7 +45,7 @@ type chunk struct {
|
|||||||
sdata []byte
|
sdata []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewChunk(addr Address, data []byte) *chunk {
|
func NewChunk(addr Address, data []byte) Chunk {
|
||||||
return &chunk{
|
return &chunk{
|
||||||
addr: addr,
|
addr: addr,
|
||||||
sdata: data,
|
sdata: data,
|
||||||
@ -107,3 +124,138 @@ func Proximity(one, other []byte) (ret int) {
|
|||||||
}
|
}
|
||||||
return MaxPO
|
return MaxPO
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ModeGet enumerates different Getter modes.
|
||||||
|
type ModeGet int
|
||||||
|
|
||||||
|
func (m ModeGet) String() string {
|
||||||
|
switch m {
|
||||||
|
case ModeGetRequest:
|
||||||
|
return "Request"
|
||||||
|
case ModeGetSync:
|
||||||
|
return "Sync"
|
||||||
|
case ModeGetLookup:
|
||||||
|
return "Lookup"
|
||||||
|
default:
|
||||||
|
return "Unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getter modes.
|
||||||
|
const (
|
||||||
|
// ModeGetRequest: when accessed for retrieval
|
||||||
|
ModeGetRequest ModeGet = iota
|
||||||
|
// ModeGetSync: when accessed for syncing or proof of custody request
|
||||||
|
ModeGetSync
|
||||||
|
// ModeGetLookup: when accessed to lookup a a chunk in feeds or other places
|
||||||
|
ModeGetLookup
|
||||||
|
)
|
||||||
|
|
||||||
|
// ModePut enumerates different Putter modes.
|
||||||
|
type ModePut int
|
||||||
|
|
||||||
|
func (m ModePut) String() string {
|
||||||
|
switch m {
|
||||||
|
case ModePutRequest:
|
||||||
|
return "Request"
|
||||||
|
case ModePutSync:
|
||||||
|
return "Sync"
|
||||||
|
case ModePutUpload:
|
||||||
|
return "Upload"
|
||||||
|
default:
|
||||||
|
return "Unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Putter modes.
|
||||||
|
const (
|
||||||
|
// ModePutRequest: when a chunk is received as a result of retrieve request and delivery
|
||||||
|
ModePutRequest ModePut = iota
|
||||||
|
// ModePutSync: when a chunk is received via syncing
|
||||||
|
ModePutSync
|
||||||
|
// ModePutUpload: when a chunk is created by local upload
|
||||||
|
ModePutUpload
|
||||||
|
)
|
||||||
|
|
||||||
|
// ModeSet enumerates different Setter modes.
|
||||||
|
type ModeSet int
|
||||||
|
|
||||||
|
func (m ModeSet) String() string {
|
||||||
|
switch m {
|
||||||
|
case ModeSetAccess:
|
||||||
|
return "Access"
|
||||||
|
case ModeSetSync:
|
||||||
|
return "Sync"
|
||||||
|
case ModeSetRemove:
|
||||||
|
return "Remove"
|
||||||
|
default:
|
||||||
|
return "Unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setter modes.
|
||||||
|
const (
|
||||||
|
// ModeSetAccess: when an update request is received for a chunk or chunk is retrieved for delivery
|
||||||
|
ModeSetAccess ModeSet = iota
|
||||||
|
// ModeSetSync: when a chunk is added to a pull sync batch or when a push sync receipt is received
|
||||||
|
ModeSetSync
|
||||||
|
// ModeSetRemove: when a chunk is removed
|
||||||
|
ModeSetRemove
|
||||||
|
)
|
||||||
|
|
||||||
|
// Descriptor holds information required for Pull syncing. This struct
|
||||||
|
// is provided by subscribing to pull index.
|
||||||
|
type Descriptor struct {
|
||||||
|
Address Address
|
||||||
|
BinID uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Descriptor) String() string {
|
||||||
|
if d == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s bin id %v", d.Address.Hex(), d.BinID)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Store interface {
|
||||||
|
Get(ctx context.Context, mode ModeGet, addr Address) (ch Chunk, err error)
|
||||||
|
Put(ctx context.Context, mode ModePut, ch Chunk) (exists bool, err error)
|
||||||
|
Has(ctx context.Context, addr Address) (yes bool, err error)
|
||||||
|
Set(ctx context.Context, mode ModeSet, addr Address) (err error)
|
||||||
|
LastPullSubscriptionBinID(bin uint8) (id uint64, err error)
|
||||||
|
SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan Descriptor, stop func())
|
||||||
|
Close() (err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validator validates a chunk.
|
||||||
|
type Validator interface {
|
||||||
|
Validate(ch Chunk) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidatorStore encapsulates Store by decorting the Put method
|
||||||
|
// with validators check.
|
||||||
|
type ValidatorStore struct {
|
||||||
|
Store
|
||||||
|
validators []Validator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewValidatorStore returns a new ValidatorStore which uses
|
||||||
|
// provided validators to validate chunks on Put.
|
||||||
|
func NewValidatorStore(store Store, validators ...Validator) (s *ValidatorStore) {
|
||||||
|
return &ValidatorStore{
|
||||||
|
Store: store,
|
||||||
|
validators: validators,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put overrides Store put method with validators check. If one of the validators
|
||||||
|
// return true, the chunk is considered valid and Store Put method is called.
|
||||||
|
// If all validators return false, ErrChunkInvalid is returned.
|
||||||
|
func (s *ValidatorStore) Put(ctx context.Context, mode ModePut, ch Chunk) (exists bool, err error) {
|
||||||
|
for _, v := range s.validators {
|
||||||
|
if v.Validate(ch) {
|
||||||
|
return s.Store.Put(ctx, mode, ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, ErrChunkInvalid
|
||||||
|
}
|
||||||
|
218
swarm/chunk/tag.go
Normal file
218
swarm/chunk/tag.go
Normal file
@ -0,0 +1,218 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errExists = errors.New("already exists")
|
||||||
|
errNA = errors.New("not available yet")
|
||||||
|
errNoETA = errors.New("unable to calculate ETA")
|
||||||
|
errTagNotFound = errors.New("tag not found")
|
||||||
|
)
|
||||||
|
|
||||||
|
// State is the enum type for chunk states
|
||||||
|
type State = uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
StateSplit State = iota // chunk has been processed by filehasher/swarm safe call
|
||||||
|
StateStored // chunk stored locally
|
||||||
|
StateSeen // chunk previously seen
|
||||||
|
StateSent // chunk sent to neighbourhood
|
||||||
|
StateSynced // proof is received; chunk removed from sync db; chunk is available everywhere
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tag represents info on the status of new chunks
|
||||||
|
type Tag struct {
|
||||||
|
Uid uint32 // a unique identifier for this tag
|
||||||
|
Name string // a name tag for this tag
|
||||||
|
Address Address // the associated swarm hash for this tag
|
||||||
|
total int64 // total chunks belonging to a tag
|
||||||
|
split int64 // number of chunks already processed by splitter for hashing
|
||||||
|
seen int64 // number of chunks already seen
|
||||||
|
stored int64 // number of chunks already stored locally
|
||||||
|
sent int64 // number of chunks sent for push syncing
|
||||||
|
synced int64 // number of chunks synced with proof
|
||||||
|
startedAt time.Time // tag started to calculate ETA
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new tag, stores it by the name and returns it
|
||||||
|
// it returns an error if the tag with this name already exists
|
||||||
|
func NewTag(uid uint32, s string, total int64) *Tag {
|
||||||
|
t := &Tag{
|
||||||
|
Uid: uid,
|
||||||
|
Name: s,
|
||||||
|
startedAt: time.Now(),
|
||||||
|
total: total,
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inc increments the count for a state
|
||||||
|
func (t *Tag) Inc(state State) {
|
||||||
|
var v *int64
|
||||||
|
switch state {
|
||||||
|
case StateSplit:
|
||||||
|
v = &t.split
|
||||||
|
case StateStored:
|
||||||
|
v = &t.stored
|
||||||
|
case StateSeen:
|
||||||
|
v = &t.seen
|
||||||
|
case StateSent:
|
||||||
|
v = &t.sent
|
||||||
|
case StateSynced:
|
||||||
|
v = &t.synced
|
||||||
|
}
|
||||||
|
atomic.AddInt64(v, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the count for a state on a tag
|
||||||
|
func (t *Tag) Get(state State) int64 {
|
||||||
|
var v *int64
|
||||||
|
switch state {
|
||||||
|
case StateSplit:
|
||||||
|
v = &t.split
|
||||||
|
case StateStored:
|
||||||
|
v = &t.stored
|
||||||
|
case StateSeen:
|
||||||
|
v = &t.seen
|
||||||
|
case StateSent:
|
||||||
|
v = &t.sent
|
||||||
|
case StateSynced:
|
||||||
|
v = &t.synced
|
||||||
|
}
|
||||||
|
return atomic.LoadInt64(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTotal returns the total count
|
||||||
|
func (t *Tag) Total() int64 {
|
||||||
|
return atomic.LoadInt64(&t.total)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoneSplit sets total count to SPLIT count and sets the associated swarm hash for this tag
|
||||||
|
// is meant to be called when splitter finishes for input streams of unknown size
|
||||||
|
func (t *Tag) DoneSplit(address Address) int64 {
|
||||||
|
total := atomic.LoadInt64(&t.split)
|
||||||
|
atomic.StoreInt64(&t.total, total)
|
||||||
|
t.Address = address
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns the value of state and the total count
|
||||||
|
func (t *Tag) Status(state State) (int64, int64, error) {
|
||||||
|
count, seen, total := t.Get(state), atomic.LoadInt64(&t.seen), atomic.LoadInt64(&t.total)
|
||||||
|
if total == 0 {
|
||||||
|
return count, total, errNA
|
||||||
|
}
|
||||||
|
switch state {
|
||||||
|
case StateSplit, StateStored, StateSeen:
|
||||||
|
return count, total, nil
|
||||||
|
case StateSent, StateSynced:
|
||||||
|
stored := atomic.LoadInt64(&t.stored)
|
||||||
|
if stored < total {
|
||||||
|
return count, total - seen, errNA
|
||||||
|
}
|
||||||
|
return count, total - seen, nil
|
||||||
|
}
|
||||||
|
return count, total, errNA
|
||||||
|
}
|
||||||
|
|
||||||
|
// ETA returns the time of completion estimated based on time passed and rate of completion
|
||||||
|
func (t *Tag) ETA(state State) (time.Time, error) {
|
||||||
|
cnt, total, err := t.Status(state)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
if cnt == 0 || total == 0 {
|
||||||
|
return time.Time{}, errNoETA
|
||||||
|
}
|
||||||
|
diff := time.Since(t.startedAt)
|
||||||
|
dur := time.Duration(total) * diff / time.Duration(cnt)
|
||||||
|
return t.startedAt.Add(dur), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary marshals the tag into a byte slice
|
||||||
|
func (tag *Tag) MarshalBinary() (data []byte, err error) {
|
||||||
|
buffer := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(buffer, tag.Uid)
|
||||||
|
encodeInt64Append(&buffer, tag.total)
|
||||||
|
encodeInt64Append(&buffer, tag.split)
|
||||||
|
encodeInt64Append(&buffer, tag.seen)
|
||||||
|
encodeInt64Append(&buffer, tag.stored)
|
||||||
|
encodeInt64Append(&buffer, tag.sent)
|
||||||
|
encodeInt64Append(&buffer, tag.synced)
|
||||||
|
|
||||||
|
intBuffer := make([]byte, 8)
|
||||||
|
|
||||||
|
n := binary.PutVarint(intBuffer, tag.startedAt.Unix())
|
||||||
|
buffer = append(buffer, intBuffer[:n]...)
|
||||||
|
|
||||||
|
n = binary.PutVarint(intBuffer, int64(len(tag.Address)))
|
||||||
|
buffer = append(buffer, intBuffer[:n]...)
|
||||||
|
|
||||||
|
buffer = append(buffer, tag.Address[:]...)
|
||||||
|
|
||||||
|
buffer = append(buffer, []byte(tag.Name)...)
|
||||||
|
|
||||||
|
return buffer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary unmarshals a byte slice into a tag
|
||||||
|
func (tag *Tag) UnmarshalBinary(buffer []byte) error {
|
||||||
|
if len(buffer) < 13 {
|
||||||
|
return errors.New("buffer too short")
|
||||||
|
}
|
||||||
|
tag.Uid = binary.BigEndian.Uint32(buffer)
|
||||||
|
buffer = buffer[4:]
|
||||||
|
|
||||||
|
tag.total = decodeInt64Splice(&buffer)
|
||||||
|
tag.split = decodeInt64Splice(&buffer)
|
||||||
|
tag.seen = decodeInt64Splice(&buffer)
|
||||||
|
tag.stored = decodeInt64Splice(&buffer)
|
||||||
|
tag.sent = decodeInt64Splice(&buffer)
|
||||||
|
tag.synced = decodeInt64Splice(&buffer)
|
||||||
|
|
||||||
|
t, n := binary.Varint(buffer)
|
||||||
|
tag.startedAt = time.Unix(t, 0)
|
||||||
|
buffer = buffer[n:]
|
||||||
|
|
||||||
|
t, n = binary.Varint(buffer)
|
||||||
|
buffer = buffer[n:]
|
||||||
|
if t > 0 {
|
||||||
|
tag.Address = buffer[:t]
|
||||||
|
}
|
||||||
|
tag.Name = string(buffer[t:])
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeInt64Append(buffer *[]byte, val int64) {
|
||||||
|
intBuffer := make([]byte, 8)
|
||||||
|
n := binary.PutVarint(intBuffer, val)
|
||||||
|
*buffer = append(*buffer, intBuffer[:n]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeInt64Splice(buffer *[]byte) int64 {
|
||||||
|
val, n := binary.Varint((*buffer))
|
||||||
|
*buffer = (*buffer)[n:]
|
||||||
|
return val
|
||||||
|
}
|
273
swarm/chunk/tag_test.go
Normal file
273
swarm/chunk/tag_test.go
Normal file
@ -0,0 +1,273 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
allStates = []State{StateSplit, StateStored, StateSeen, StateSent, StateSynced}
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestTagSingleIncrements tests if Inc increments the tag state value
|
||||||
|
func TestTagSingleIncrements(t *testing.T) {
|
||||||
|
tg := &Tag{total: 10}
|
||||||
|
|
||||||
|
tc := []struct {
|
||||||
|
state uint32
|
||||||
|
inc int
|
||||||
|
expcount int64
|
||||||
|
exptotal int64
|
||||||
|
}{
|
||||||
|
{state: StateSplit, inc: 10, expcount: 10, exptotal: 10},
|
||||||
|
{state: StateStored, inc: 9, expcount: 9, exptotal: 9},
|
||||||
|
{state: StateSeen, inc: 1, expcount: 1, exptotal: 10},
|
||||||
|
{state: StateSent, inc: 9, expcount: 9, exptotal: 9},
|
||||||
|
{state: StateSynced, inc: 9, expcount: 9, exptotal: 9},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tc {
|
||||||
|
for i := 0; i < tc.inc; i++ {
|
||||||
|
tg.Inc(tc.state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tc {
|
||||||
|
if tg.Get(tc.state) != tc.expcount {
|
||||||
|
t.Fatalf("not incremented")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestTagStatus is a unit test to cover Tag.Status method functionality
|
||||||
|
func TestTagStatus(t *testing.T) {
|
||||||
|
tg := &Tag{total: 10}
|
||||||
|
tg.Inc(StateSeen)
|
||||||
|
tg.Inc(StateSent)
|
||||||
|
tg.Inc(StateSynced)
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
tg.Inc(StateSplit)
|
||||||
|
tg.Inc(StateStored)
|
||||||
|
}
|
||||||
|
for _, v := range []struct {
|
||||||
|
state State
|
||||||
|
expVal int64
|
||||||
|
expTotal int64
|
||||||
|
}{
|
||||||
|
{state: StateStored, expVal: 10, expTotal: 10},
|
||||||
|
{state: StateSplit, expVal: 10, expTotal: 10},
|
||||||
|
{state: StateSeen, expVal: 1, expTotal: 10},
|
||||||
|
{state: StateSent, expVal: 1, expTotal: 9},
|
||||||
|
{state: StateSynced, expVal: 1, expTotal: 9},
|
||||||
|
} {
|
||||||
|
val, total, err := tg.Status(v.state)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if val != v.expVal {
|
||||||
|
t.Fatalf("should be %d, got %d", v.expVal, val)
|
||||||
|
}
|
||||||
|
if total != v.expTotal {
|
||||||
|
t.Fatalf("expected total to be %d, got %d", v.expTotal, total)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tests ETA is precise
|
||||||
|
func TestTagETA(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
maxDiff := 100000 // 100 microsecond
|
||||||
|
tg := &Tag{total: 10, startedAt: now}
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
tg.Inc(StateSplit)
|
||||||
|
eta, err := tg.ETA(StateSplit)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
diff := time.Until(eta) - 9*time.Since(now)
|
||||||
|
if int(diff) > maxDiff {
|
||||||
|
t.Fatalf("ETA is not precise, got diff %v > .1ms", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestTagConcurrentIncrements tests Inc calls concurrently
|
||||||
|
func TestTagConcurrentIncrements(t *testing.T) {
|
||||||
|
tg := &Tag{}
|
||||||
|
n := 1000
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(5 * n)
|
||||||
|
for _, f := range allStates {
|
||||||
|
go func(f State) {
|
||||||
|
for j := 0; j < n; j++ {
|
||||||
|
go func() {
|
||||||
|
tg.Inc(f)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}(f)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
for _, f := range allStates {
|
||||||
|
v := tg.Get(f)
|
||||||
|
if v != int64(n) {
|
||||||
|
t.Fatalf("expected state %v to be %v, got %v", f, n, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestTagsMultipleConcurrentIncrements tests Inc calls concurrently
|
||||||
|
func TestTagsMultipleConcurrentIncrementsSyncMap(t *testing.T) {
|
||||||
|
ts := NewTags()
|
||||||
|
n := 100
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(10 * 5 * n)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
s := string([]byte{uint8(i)})
|
||||||
|
tag, err := ts.New(s, int64(n))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, f := range allStates {
|
||||||
|
go func(tag *Tag, f State) {
|
||||||
|
for j := 0; j < n; j++ {
|
||||||
|
go func() {
|
||||||
|
tag.Inc(f)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}(tag, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
i := 0
|
||||||
|
ts.Range(func(k, v interface{}) bool {
|
||||||
|
i++
|
||||||
|
uid := k.(uint32)
|
||||||
|
for _, f := range allStates {
|
||||||
|
tag, err := ts.Get(uid)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
stateVal := tag.Get(f)
|
||||||
|
if stateVal != int64(n) {
|
||||||
|
t.Fatalf("expected tag %v state %v to be %v, got %v", uid, f, n, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
|
||||||
|
})
|
||||||
|
if i != 10 {
|
||||||
|
t.Fatal("not enough tagz")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMarshallingWithAddr tests that marshalling and unmarshalling is done correctly when the
|
||||||
|
// tag Address (byte slice) contains some arbitrary value
|
||||||
|
func TestMarshallingWithAddr(t *testing.T) {
|
||||||
|
tg := NewTag(111, "test/tag", 10)
|
||||||
|
tg.Address = []byte{0, 1, 2, 3, 4, 5, 6}
|
||||||
|
|
||||||
|
for _, f := range allStates {
|
||||||
|
tg.Inc(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := tg.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
unmarshalledTag := &Tag{}
|
||||||
|
err = unmarshalledTag.UnmarshalBinary(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if unmarshalledTag.Uid != tg.Uid {
|
||||||
|
t.Fatalf("tag uids not equal. want %d got %d", tg.Uid, unmarshalledTag.Uid)
|
||||||
|
}
|
||||||
|
|
||||||
|
if unmarshalledTag.Name != tg.Name {
|
||||||
|
t.Fatalf("tag names not equal. want %s got %s", tg.Name, unmarshalledTag.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, state := range allStates {
|
||||||
|
uv, tv := unmarshalledTag.Get(state), tg.Get(state)
|
||||||
|
if uv != tv {
|
||||||
|
t.Fatalf("state %d inconsistent. expected %d to equal %d", state, uv, tv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if unmarshalledTag.Total() != tg.Total() {
|
||||||
|
t.Fatalf("tag names not equal. want %d got %d", tg.Total(), unmarshalledTag.Total())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(unmarshalledTag.Address) != len(tg.Address) {
|
||||||
|
t.Fatalf("tag addresses length mismatch, want %d, got %d", len(tg.Address), len(unmarshalledTag.Address))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(unmarshalledTag.Address, tg.Address) {
|
||||||
|
t.Fatalf("expected tag address to be %v got %v", unmarshalledTag.Address, tg.Address)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMarshallingNoAddress tests that marshalling and unmarshalling is done correctly
|
||||||
|
// when the tag Address (byte slice) is empty in this case
|
||||||
|
func TestMarshallingNoAddr(t *testing.T) {
|
||||||
|
tg := NewTag(111, "test/tag", 10)
|
||||||
|
for _, f := range allStates {
|
||||||
|
tg.Inc(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := tg.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
unmarshalledTag := &Tag{}
|
||||||
|
err = unmarshalledTag.UnmarshalBinary(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if unmarshalledTag.Uid != tg.Uid {
|
||||||
|
t.Fatalf("tag uids not equal. want %d got %d", tg.Uid, unmarshalledTag.Uid)
|
||||||
|
}
|
||||||
|
|
||||||
|
if unmarshalledTag.Name != tg.Name {
|
||||||
|
t.Fatalf("tag names not equal. want %s got %s", tg.Name, unmarshalledTag.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, state := range allStates {
|
||||||
|
uv, tv := unmarshalledTag.Get(state), tg.Get(state)
|
||||||
|
if uv != tv {
|
||||||
|
t.Fatalf("state %d inconsistent. expected %d to equal %d", state, uv, tv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if unmarshalledTag.Total() != tg.Total() {
|
||||||
|
t.Fatalf("tag names not equal. want %d got %d", tg.Total(), unmarshalledTag.Total())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(unmarshalledTag.Address) != len(tg.Address) {
|
||||||
|
t.Fatalf("expected tag addresses to be equal length")
|
||||||
|
}
|
||||||
|
}
|
96
swarm/chunk/tags.go
Normal file
96
swarm/chunk/tags.go
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tags hold tag information indexed by a unique random uint32
|
||||||
|
type Tags struct {
|
||||||
|
tags *sync.Map
|
||||||
|
rng *rand.Rand
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTags creates a tags object
|
||||||
|
func NewTags() *Tags {
|
||||||
|
return &Tags{
|
||||||
|
tags: &sync.Map{},
|
||||||
|
rng: rand.New(rand.NewSource(time.Now().Unix())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new tag, stores it by the name and returns it
|
||||||
|
// it returns an error if the tag with this name already exists
|
||||||
|
func (ts *Tags) New(s string, total int64) (*Tag, error) {
|
||||||
|
t := &Tag{
|
||||||
|
Uid: ts.rng.Uint32(),
|
||||||
|
Name: s,
|
||||||
|
startedAt: time.Now(),
|
||||||
|
total: total,
|
||||||
|
}
|
||||||
|
if _, loaded := ts.tags.LoadOrStore(t.Uid, t); loaded {
|
||||||
|
return nil, errExists
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// All returns all existing tags in Tags' sync.Map
|
||||||
|
// Note that tags are returned in no particular order
|
||||||
|
func (ts *Tags) All() (t []*Tag) {
|
||||||
|
ts.tags.Range(func(k, v interface{}) bool {
|
||||||
|
t = append(t, v.(*Tag))
|
||||||
|
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the undelying tag for the uid or an error if not found
|
||||||
|
func (ts *Tags) Get(uid uint32) (*Tag, error) {
|
||||||
|
t, ok := ts.tags.Load(uid)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("tag not found")
|
||||||
|
}
|
||||||
|
return t.(*Tag), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFromContext gets a tag from the tag uid stored in the context
|
||||||
|
func (ts *Tags) GetFromContext(ctx context.Context) (*Tag, error) {
|
||||||
|
uid := sctx.GetTag(ctx)
|
||||||
|
t, ok := ts.tags.Load(uid)
|
||||||
|
if !ok {
|
||||||
|
return nil, errTagNotFound
|
||||||
|
}
|
||||||
|
return t.(*Tag), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range exposes sync.Map's iterator
|
||||||
|
func (ts *Tags) Range(fn func(k, v interface{}) bool) {
|
||||||
|
ts.tags.Range(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *Tags) Delete(k interface{}) {
|
||||||
|
ts.tags.Delete(k)
|
||||||
|
}
|
48
swarm/chunk/tags_test.go
Normal file
48
swarm/chunk/tags_test.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package chunk
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestAll(t *testing.T) {
|
||||||
|
ts := NewTags()
|
||||||
|
|
||||||
|
ts.New("1", 1)
|
||||||
|
ts.New("2", 1)
|
||||||
|
|
||||||
|
all := ts.All()
|
||||||
|
|
||||||
|
if len(all) != 2 {
|
||||||
|
t.Fatalf("expected length to be 2 got %d", len(all))
|
||||||
|
}
|
||||||
|
|
||||||
|
if n := all[0].Total(); n != 1 {
|
||||||
|
t.Fatalf("expected tag 0 total to be 1 got %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n := all[1].Total(); n != 1 {
|
||||||
|
t.Fatalf("expected tag 1 total to be 1 got %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.New("3", 1)
|
||||||
|
all = ts.All()
|
||||||
|
|
||||||
|
if len(all) != 3 {
|
||||||
|
t.Fatalf("expected length to be 3 got %d", len(all))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -31,6 +31,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
colorable "github.com/mattn/go-colorable"
|
colorable "github.com/mattn/go-colorable"
|
||||||
@ -1614,11 +1615,11 @@ func TestFUSE(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(datadir)
|
defer os.RemoveAll(datadir)
|
||||||
|
|
||||||
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
|
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32), chunk.NewTags())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
ta := &testAPI{api: api.NewAPI(fileStore, nil, nil, nil)}
|
ta := &testAPI{api: api.NewAPI(fileStore, nil, nil, nil, chunk.NewTags())}
|
||||||
|
|
||||||
//run a short suite of tests
|
//run a short suite of tests
|
||||||
//approx time: 28s
|
//approx time: 28s
|
||||||
|
@ -116,7 +116,7 @@ func (h *Hive) Stop() error {
|
|||||||
log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4]))
|
log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4]))
|
||||||
h.EachConn(nil, 255, func(p *Peer, _ int) bool {
|
h.EachConn(nil, 255, func(p *Peer, _ int) bool {
|
||||||
log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4]))
|
log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4]))
|
||||||
p.Drop(nil)
|
p.Drop()
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ func TestHiveStatePersistance(t *testing.T) {
|
|||||||
|
|
||||||
const peersCount = 5
|
const peersCount = 5
|
||||||
|
|
||||||
startHive := func(t *testing.T, dir string) (h *Hive) {
|
startHive := func(t *testing.T, dir string) (h *Hive, cleanupFunc func()) {
|
||||||
store, err := state.NewDBStore(dir)
|
store, err := state.NewDBStore(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -137,27 +137,30 @@ func TestHiveStatePersistance(t *testing.T) {
|
|||||||
if err := h.Start(s.Server); err != nil {
|
if err := h.Start(s.Server); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
return h
|
|
||||||
|
cleanupFunc = func() {
|
||||||
|
err := h.Stop()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Stop()
|
||||||
|
}
|
||||||
|
return h, cleanupFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
h1 := startHive(t, dir)
|
h1, cleanup1 := startHive(t, dir)
|
||||||
peers := make(map[string]bool)
|
peers := make(map[string]bool)
|
||||||
for i := 0; i < peersCount; i++ {
|
for i := 0; i < peersCount; i++ {
|
||||||
raddr := RandomAddr()
|
raddr := RandomAddr()
|
||||||
h1.Register(raddr)
|
h1.Register(raddr)
|
||||||
peers[raddr.String()] = true
|
peers[raddr.String()] = true
|
||||||
}
|
}
|
||||||
if err = h1.Stop(); err != nil {
|
cleanup1()
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// start the hive and check that we know of all expected peers
|
// start the hive and check that we know of all expected peers
|
||||||
h2 := startHive(t, dir)
|
h2, cleanup2 := startHive(t, dir)
|
||||||
defer func() {
|
cleanup2()
|
||||||
if err = h2.Stop(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
h2.Kademlia.EachAddr(nil, 256, func(addr *BzzAddr, po int) bool {
|
h2.Kademlia.EachAddr(nil, 256, func(addr *BzzAddr, po int) bool {
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||||
sv "github.com/ethereum/go-ethereum/swarm/version"
|
sv "github.com/ethereum/go-ethereum/swarm/version"
|
||||||
@ -82,14 +83,14 @@ func NewKadParams() *KadParams {
|
|||||||
// Kademlia is a table of live peers and a db of known peers (node records)
|
// Kademlia is a table of live peers and a db of known peers (node records)
|
||||||
type Kademlia struct {
|
type Kademlia struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
*KadParams // Kademlia configuration parameters
|
*KadParams // Kademlia configuration parameters
|
||||||
base []byte // immutable baseaddress of the table
|
base []byte // immutable baseaddress of the table
|
||||||
addrs *pot.Pot // pots container for known peer addresses
|
addrs *pot.Pot // pots container for known peer addresses
|
||||||
conns *pot.Pot // pots container for live peer connections
|
conns *pot.Pot // pots container for live peer connections
|
||||||
depth uint8 // stores the last current depth of saturation
|
depth uint8 // stores the last current depth of saturation
|
||||||
nDepth int // stores the last neighbourhood depth
|
nDepth int // stores the last neighbourhood depth
|
||||||
nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
|
nDepthMu sync.RWMutex // protects neighbourhood depth nDepth
|
||||||
addrCountC chan int // returned by AddrCountC function to signal peer count change
|
nDepthSig []chan struct{} // signals when neighbourhood depth nDepth is changed
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewKademlia creates a Kademlia table for base address addr
|
// NewKademlia creates a Kademlia table for base address addr
|
||||||
@ -138,6 +139,9 @@ func (e *entry) Hex() string {
|
|||||||
func (k *Kademlia) Register(peers ...*BzzAddr) error {
|
func (k *Kademlia) Register(peers ...*BzzAddr) error {
|
||||||
k.lock.Lock()
|
k.lock.Lock()
|
||||||
defer k.lock.Unlock()
|
defer k.lock.Unlock()
|
||||||
|
|
||||||
|
metrics.GetOrRegisterCounter("kad.register", nil).Inc(1)
|
||||||
|
|
||||||
var known, size int
|
var known, size int
|
||||||
for _, p := range peers {
|
for _, p := range peers {
|
||||||
log.Trace("kademlia trying to register", "addr", p)
|
log.Trace("kademlia trying to register", "addr", p)
|
||||||
@ -164,8 +168,6 @@ func (k *Kademlia) Register(peers ...*BzzAddr) error {
|
|||||||
return newEntry(p)
|
return newEntry(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("found among known peers, underlay addr is same, do nothing", "new", p, "old", e.BzzAddr)
|
|
||||||
|
|
||||||
return v
|
return v
|
||||||
})
|
})
|
||||||
if found {
|
if found {
|
||||||
@ -173,12 +175,8 @@ func (k *Kademlia) Register(peers ...*BzzAddr) error {
|
|||||||
}
|
}
|
||||||
size++
|
size++
|
||||||
}
|
}
|
||||||
// send new address count value only if there are new addresses
|
|
||||||
if k.addrCountC != nil && size-known > 0 {
|
|
||||||
k.addrCountC <- k.addrs.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
k.sendNeighbourhoodDepthChange()
|
k.setNeighbourhoodDepth()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -186,6 +184,9 @@ func (k *Kademlia) Register(peers ...*BzzAddr) error {
|
|||||||
func (k *Kademlia) SuggestPeer() (suggestedPeer *BzzAddr, saturationDepth int, changed bool) {
|
func (k *Kademlia) SuggestPeer() (suggestedPeer *BzzAddr, saturationDepth int, changed bool) {
|
||||||
k.lock.Lock()
|
k.lock.Lock()
|
||||||
defer k.lock.Unlock()
|
defer k.lock.Unlock()
|
||||||
|
|
||||||
|
metrics.GetOrRegisterCounter("kad.suggestpeer", nil).Inc(1)
|
||||||
|
|
||||||
radius := neighbourhoodRadiusForPot(k.conns, k.NeighbourhoodSize, k.base)
|
radius := neighbourhoodRadiusForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||||
// collect undersaturated bins in ascending order of number of connected peers
|
// collect undersaturated bins in ascending order of number of connected peers
|
||||||
// and from shallow to deep (ascending order of PO)
|
// and from shallow to deep (ascending order of PO)
|
||||||
@ -297,6 +298,9 @@ func (k *Kademlia) SuggestPeer() (suggestedPeer *BzzAddr, saturationDepth int, c
|
|||||||
func (k *Kademlia) On(p *Peer) (uint8, bool) {
|
func (k *Kademlia) On(p *Peer) (uint8, bool) {
|
||||||
k.lock.Lock()
|
k.lock.Lock()
|
||||||
defer k.lock.Unlock()
|
defer k.lock.Unlock()
|
||||||
|
|
||||||
|
metrics.GetOrRegisterCounter("kad.on", nil).Inc(1)
|
||||||
|
|
||||||
var ins bool
|
var ins bool
|
||||||
k.conns, _, _, _ = pot.Swap(k.conns, p, Pof, func(v pot.Val) pot.Val {
|
k.conns, _, _, _ = pot.Swap(k.conns, p, Pof, func(v pot.Val) pot.Val {
|
||||||
// if not found live
|
// if not found live
|
||||||
@ -315,12 +319,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
|
|||||||
k.addrs, _, _, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
|
k.addrs, _, _, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
|
||||||
return a
|
return a
|
||||||
})
|
})
|
||||||
// send new address count value only if the peer is inserted
|
|
||||||
if k.addrCountC != nil {
|
|
||||||
k.addrCountC <- k.addrs.Size()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
log.Trace(k.string())
|
|
||||||
// calculate if depth of saturation changed
|
// calculate if depth of saturation changed
|
||||||
depth := uint8(k.saturation())
|
depth := uint8(k.saturation())
|
||||||
var changed bool
|
var changed bool
|
||||||
@ -328,75 +327,72 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
|
|||||||
changed = true
|
changed = true
|
||||||
k.depth = depth
|
k.depth = depth
|
||||||
}
|
}
|
||||||
k.sendNeighbourhoodDepthChange()
|
k.setNeighbourhoodDepth()
|
||||||
return k.depth, changed
|
return k.depth, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeighbourhoodDepthC returns the channel that sends a new kademlia
|
// setNeighbourhoodDepth calculates neighbourhood depth with depthForPot,
|
||||||
// neighbourhood depth on each change.
|
// sets it to the nDepth and sends a signal to every nDepthSig channel.
|
||||||
// Not receiving from the returned channel will block On function
|
func (k *Kademlia) setNeighbourhoodDepth() {
|
||||||
// when the neighbourhood depth is changed.
|
nDepth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||||
// TODO: Why is this exported, and if it should be; why can't we have more subscribers than one?
|
var changed bool
|
||||||
func (k *Kademlia) NeighbourhoodDepthC() <-chan int {
|
k.nDepthMu.Lock()
|
||||||
k.lock.Lock()
|
if nDepth != k.nDepth {
|
||||||
defer k.lock.Unlock()
|
k.nDepth = nDepth
|
||||||
if k.nDepthC == nil {
|
changed = true
|
||||||
k.nDepthC = make(chan int)
|
|
||||||
}
|
}
|
||||||
return k.nDepthC
|
k.nDepthMu.Unlock()
|
||||||
}
|
|
||||||
|
|
||||||
// CloseNeighbourhoodDepthC closes the channel returned by
|
if len(k.nDepthSig) > 0 && changed {
|
||||||
// NeighbourhoodDepthC and stops sending neighbourhood change.
|
for _, c := range k.nDepthSig {
|
||||||
func (k *Kademlia) CloseNeighbourhoodDepthC() {
|
// Every nDepthSig channel has a buffer capacity of 1,
|
||||||
k.lock.Lock()
|
// so every receiver will get the signal even if the
|
||||||
defer k.lock.Unlock()
|
// select statement has the default case to avoid blocking.
|
||||||
|
select {
|
||||||
if k.nDepthC != nil {
|
case c <- struct{}{}:
|
||||||
close(k.nDepthC)
|
default:
|
||||||
k.nDepthC = nil
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendNeighbourhoodDepthChange sends new neighbourhood depth to k.nDepth channel
|
|
||||||
// if it is initialized.
|
|
||||||
func (k *Kademlia) sendNeighbourhoodDepthChange() {
|
|
||||||
// nDepthC is initialized when NeighbourhoodDepthC is called and returned by it.
|
|
||||||
// It provides signaling of neighbourhood depth change.
|
|
||||||
// This part of the code is sending new neighbourhood depth to nDepthC if that condition is met.
|
|
||||||
if k.nDepthC != nil {
|
|
||||||
nDepth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
|
||||||
if nDepth != k.nDepth {
|
|
||||||
k.nDepth = nDepth
|
|
||||||
k.nDepthC <- nDepth
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddrCountC returns the channel that sends a new
|
// NeighbourhoodDepth returns the value calculated by depthForPot function
|
||||||
// address count value on each change.
|
// in setNeighbourhoodDepth method.
|
||||||
// Not receiving from the returned channel will block Register function
|
func (k *Kademlia) NeighbourhoodDepth() int {
|
||||||
// when address count value changes.
|
k.nDepthMu.RLock()
|
||||||
func (k *Kademlia) AddrCountC() <-chan int {
|
defer k.nDepthMu.RUnlock()
|
||||||
k.lock.Lock()
|
return k.nDepth
|
||||||
defer k.lock.Unlock()
|
|
||||||
|
|
||||||
if k.addrCountC == nil {
|
|
||||||
k.addrCountC = make(chan int)
|
|
||||||
}
|
|
||||||
return k.addrCountC
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloseAddrCountC closes the channel returned by
|
// SubscribeToNeighbourhoodDepthChange returns the channel that signals
|
||||||
// AddrCountC and stops sending address count change.
|
// when neighbourhood depth value is changed. The current neighbourhood depth
|
||||||
func (k *Kademlia) CloseAddrCountC() {
|
// is returned by NeighbourhoodDepth method. Returned function unsubscribes
|
||||||
|
// the channel from signaling and releases the resources. Returned function is safe
|
||||||
|
// to be called multiple times.
|
||||||
|
func (k *Kademlia) SubscribeToNeighbourhoodDepthChange() (c <-chan struct{}, unsubscribe func()) {
|
||||||
|
channel := make(chan struct{}, 1)
|
||||||
|
var closeOnce sync.Once
|
||||||
|
|
||||||
k.lock.Lock()
|
k.lock.Lock()
|
||||||
defer k.lock.Unlock()
|
defer k.lock.Unlock()
|
||||||
|
|
||||||
if k.addrCountC != nil {
|
k.nDepthSig = append(k.nDepthSig, channel)
|
||||||
close(k.addrCountC)
|
|
||||||
k.addrCountC = nil
|
unsubscribe = func() {
|
||||||
|
k.lock.Lock()
|
||||||
|
defer k.lock.Unlock()
|
||||||
|
|
||||||
|
for i, c := range k.nDepthSig {
|
||||||
|
if c == channel {
|
||||||
|
k.nDepthSig = append(k.nDepthSig[:i], k.nDepthSig[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
closeOnce.Do(func() { close(channel) })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return channel, unsubscribe
|
||||||
}
|
}
|
||||||
|
|
||||||
// Off removes a peer from among live peers
|
// Off removes a peer from among live peers
|
||||||
@ -422,11 +418,7 @@ func (k *Kademlia) Off(p *Peer) {
|
|||||||
// v cannot be nil, but no need to check
|
// v cannot be nil, but no need to check
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
// send new address count value only if the peer is deleted
|
k.setNeighbourhoodDepth()
|
||||||
if k.addrCountC != nil {
|
|
||||||
k.addrCountC <- k.addrs.Size()
|
|
||||||
}
|
|
||||||
k.sendNeighbourhoodDepthChange()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -484,13 +476,6 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int) bool) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeighbourhoodDepth returns the depth for the pot, see depthForPot
|
|
||||||
func (k *Kademlia) NeighbourhoodDepth() (depth int) {
|
|
||||||
k.lock.RLock()
|
|
||||||
defer k.lock.RUnlock()
|
|
||||||
return depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
|
||||||
}
|
|
||||||
|
|
||||||
// neighbourhoodRadiusForPot returns the neighbourhood radius of the kademlia
|
// neighbourhoodRadiusForPot returns the neighbourhood radius of the kademlia
|
||||||
// neighbourhood radius encloses the nearest neighbour set with size >= neighbourhoodSize
|
// neighbourhood radius encloses the nearest neighbour set with size >= neighbourhoodSize
|
||||||
// i.e., neighbourhood radius is the deepest PO such that all bins not shallower altogether
|
// i.e., neighbourhood radius is the deepest PO such that all bins not shallower altogether
|
||||||
@ -608,7 +593,7 @@ func (k *Kademlia) string() string {
|
|||||||
if len(sv.GitCommit) > 0 {
|
if len(sv.GitCommit) > 0 {
|
||||||
rows = append(rows, fmt.Sprintf("commit hash: %s", sv.GitCommit))
|
rows = append(rows, fmt.Sprintf("commit hash: %s", sv.GitCommit))
|
||||||
}
|
}
|
||||||
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()[:3]))
|
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()))
|
||||||
rows = append(rows, fmt.Sprintf("population: %d (%d), NeighbourhoodSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.NeighbourhoodSize, k.MinBinSize, k.MaxBinSize))
|
rows = append(rows, fmt.Sprintf("population: %d (%d), NeighbourhoodSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.NeighbourhoodSize, k.MinBinSize, k.MaxBinSize))
|
||||||
|
|
||||||
liverows := make([]string, k.MaxProxDisplay)
|
liverows := make([]string, k.MaxProxDisplay)
|
||||||
|
@ -541,7 +541,7 @@ func TestKademliaHiveString(t *testing.T) {
|
|||||||
tk.Register("10000000", "10000001")
|
tk.Register("10000000", "10000001")
|
||||||
tk.MaxProxDisplay = 8
|
tk.MaxProxDisplay = 8
|
||||||
h := tk.String()
|
h := tk.String()
|
||||||
expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
|
expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 0000000000000000000000000000000000000000000000000000000000000000\npopulation: 2 (4), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
|
||||||
if expH[104:] != h[104:] {
|
if expH[104:] != h[104:] {
|
||||||
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h)
|
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h)
|
||||||
}
|
}
|
||||||
@ -560,3 +560,113 @@ func newTestDiscoveryPeer(addr pot.Address, kad *Kademlia) *Peer {
|
|||||||
}
|
}
|
||||||
return NewPeer(bp, kad)
|
return NewPeer(bp, kad)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestKademlia_SubscribeToNeighbourhoodDepthChange checks if correct
|
||||||
|
// signaling over SubscribeToNeighbourhoodDepthChange channels are made
|
||||||
|
// when neighbourhood depth is changed.
|
||||||
|
func TestKademlia_SubscribeToNeighbourhoodDepthChange(t *testing.T) {
|
||||||
|
|
||||||
|
testSignal := func(t *testing.T, k *testKademlia, prevDepth int, c <-chan struct{}) (newDepth int) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case _, ok := <-c:
|
||||||
|
if !ok {
|
||||||
|
t.Error("closed signal channel")
|
||||||
|
}
|
||||||
|
newDepth = k.NeighbourhoodDepth()
|
||||||
|
if prevDepth == newDepth {
|
||||||
|
t.Error("depth not changed")
|
||||||
|
}
|
||||||
|
return newDepth
|
||||||
|
case <-time.After(2 * time.Second):
|
||||||
|
t.Error("timeout")
|
||||||
|
}
|
||||||
|
return newDepth
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("single subscription", func(t *testing.T) {
|
||||||
|
k := newTestKademlia(t, "00000000")
|
||||||
|
|
||||||
|
c, u := k.SubscribeToNeighbourhoodDepthChange()
|
||||||
|
defer u()
|
||||||
|
|
||||||
|
depth := k.NeighbourhoodDepth()
|
||||||
|
|
||||||
|
k.On("11111101", "01000000", "10000000", "00000010")
|
||||||
|
|
||||||
|
testSignal(t, k, depth, c)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("multiple subscriptions", func(t *testing.T) {
|
||||||
|
k := newTestKademlia(t, "00000000")
|
||||||
|
|
||||||
|
c1, u1 := k.SubscribeToNeighbourhoodDepthChange()
|
||||||
|
defer u1()
|
||||||
|
|
||||||
|
c2, u2 := k.SubscribeToNeighbourhoodDepthChange()
|
||||||
|
defer u2()
|
||||||
|
|
||||||
|
depth := k.NeighbourhoodDepth()
|
||||||
|
|
||||||
|
k.On("11111101", "01000000", "10000000", "00000010")
|
||||||
|
|
||||||
|
testSignal(t, k, depth, c1)
|
||||||
|
|
||||||
|
testSignal(t, k, depth, c2)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("multiple changes", func(t *testing.T) {
|
||||||
|
k := newTestKademlia(t, "00000000")
|
||||||
|
|
||||||
|
c, u := k.SubscribeToNeighbourhoodDepthChange()
|
||||||
|
defer u()
|
||||||
|
|
||||||
|
depth := k.NeighbourhoodDepth()
|
||||||
|
|
||||||
|
k.On("11111101", "01000000", "10000000", "00000010")
|
||||||
|
|
||||||
|
depth = testSignal(t, k, depth, c)
|
||||||
|
|
||||||
|
k.On("11111101", "01000010", "10000010", "00000110")
|
||||||
|
|
||||||
|
testSignal(t, k, depth, c)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no depth change", func(t *testing.T) {
|
||||||
|
k := newTestKademlia(t, "00000000")
|
||||||
|
|
||||||
|
c, u := k.SubscribeToNeighbourhoodDepthChange()
|
||||||
|
defer u()
|
||||||
|
|
||||||
|
// does not trigger the depth change
|
||||||
|
k.On("11111101")
|
||||||
|
|
||||||
|
select {
|
||||||
|
case _, ok := <-c:
|
||||||
|
if !ok {
|
||||||
|
t.Error("closed signal channel")
|
||||||
|
}
|
||||||
|
t.Error("signal received")
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
// all fine
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no new peers", func(t *testing.T) {
|
||||||
|
k := newTestKademlia(t, "00000000")
|
||||||
|
|
||||||
|
changeC, unsubscribe := k.SubscribeToNeighbourhoodDepthChange()
|
||||||
|
defer unsubscribe()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case _, ok := <-changeC:
|
||||||
|
if !ok {
|
||||||
|
t.Error("closed signal channel")
|
||||||
|
}
|
||||||
|
t.Error("signal received")
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
// all fine
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -235,6 +235,7 @@ func TestBzzHandshakeNetworkIDMismatch(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer s.Stop()
|
||||||
node := s.Nodes[0]
|
node := s.Nodes[0]
|
||||||
|
|
||||||
err = s.testHandshake(
|
err = s.testHandshake(
|
||||||
@ -258,6 +259,7 @@ func TestBzzHandshakeVersionMismatch(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer s.Stop()
|
||||||
node := s.Nodes[0]
|
node := s.Nodes[0]
|
||||||
|
|
||||||
err = s.testHandshake(
|
err = s.testHandshake(
|
||||||
@ -281,6 +283,7 @@ func TestBzzHandshakeSuccess(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer s.Stop()
|
||||||
node := s.Nodes[0]
|
node := s.Nodes[0]
|
||||||
|
|
||||||
err = s.testHandshake(
|
err = s.testHandshake(
|
||||||
@ -312,6 +315,7 @@ func TestBzzHandshakeLightNode(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer pt.Stop()
|
||||||
|
|
||||||
node := pt.Nodes[0]
|
node := pt.Nodes[0]
|
||||||
addr := NewAddr(node)
|
addr := NewAddr(node)
|
||||||
|
@ -156,6 +156,7 @@ func createSimServiceMap(discovery bool) map[string]ServiceFunc {
|
|||||||
// Call WaitTillSnapshotRecreated() function and wait until it returns
|
// Call WaitTillSnapshotRecreated() function and wait until it returns
|
||||||
// Iterate the nodes and check if all the connections are successfully recreated
|
// Iterate the nodes and check if all the connections are successfully recreated
|
||||||
func TestWaitTillSnapshotRecreated(t *testing.T) {
|
func TestWaitTillSnapshotRecreated(t *testing.T) {
|
||||||
|
t.Skip("test is flaky. disabling until underlying problem is addressed")
|
||||||
var err error
|
var err error
|
||||||
sim := New(createSimServiceMap(true))
|
sim := New(createSimServiceMap(true))
|
||||||
_, err = sim.AddNodesAndConnectRing(16)
|
_, err = sim.AddNodesAndConnectRing(16)
|
||||||
|
@ -30,16 +30,19 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
|
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
colorable "github.com/mattn/go-colorable"
|
colorable "github.com/mattn/go-colorable"
|
||||||
)
|
)
|
||||||
@ -51,7 +54,6 @@ var (
|
|||||||
useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)")
|
useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)")
|
||||||
longrunning = flag.Bool("longrunning", false, "do run long-running tests")
|
longrunning = flag.Bool("longrunning", false, "do run long-running tests")
|
||||||
|
|
||||||
bucketKeyDB = simulation.BucketKey("db")
|
|
||||||
bucketKeyStore = simulation.BucketKey("store")
|
bucketKeyStore = simulation.BucketKey("store")
|
||||||
bucketKeyFileStore = simulation.BucketKey("filestore")
|
bucketKeyFileStore = simulation.BucketKey("filestore")
|
||||||
bucketKeyNetStore = simulation.BucketKey("netstore")
|
bucketKeyNetStore = simulation.BucketKey("netstore")
|
||||||
@ -113,26 +115,24 @@ func newNetStoreAndDeliveryWithRequestFunc(ctx *adapters.ServiceContext, bucket
|
|||||||
func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map, addr *network.BzzAddr) (*storage.NetStore, *Delivery, func(), error) {
|
func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map, addr *network.BzzAddr) (*storage.NetStore, *Delivery, func(), error) {
|
||||||
n := ctx.Config.Node()
|
n := ctx.Config.Node()
|
||||||
|
|
||||||
store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
|
localStore, localStoreCleanup, err := newTestLocalStore(n.ID(), addr, nil)
|
||||||
if *useMockStore {
|
|
||||||
store, datadir, err = createMockStore(mockmem.NewGlobalStore(), n.ID(), addr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
localStore := store.(*storage.LocalStore)
|
|
||||||
netStore, err := storage.NewNetStore(localStore, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
localStore.Close()
|
||||||
|
localStoreCleanup()
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams(), chunk.NewTags())
|
||||||
|
|
||||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||||
delivery := NewDelivery(kad, netStore)
|
delivery := NewDelivery(kad, netStore)
|
||||||
|
|
||||||
bucket.Store(bucketKeyStore, store)
|
bucket.Store(bucketKeyStore, localStore)
|
||||||
bucket.Store(bucketKeyDB, netStore)
|
|
||||||
bucket.Store(bucketKeyDelivery, delivery)
|
bucket.Store(bucketKeyDelivery, delivery)
|
||||||
bucket.Store(bucketKeyFileStore, fileStore)
|
bucket.Store(bucketKeyFileStore, fileStore)
|
||||||
// for the kademlia object, we use the global key from the simulation package,
|
// for the kademlia object, we use the global key from the simulation package,
|
||||||
@ -141,13 +141,13 @@ func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map,
|
|||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
netStore.Close()
|
netStore.Close()
|
||||||
os.RemoveAll(datadir)
|
localStoreCleanup()
|
||||||
}
|
}
|
||||||
|
|
||||||
return netStore, delivery, cleanup, nil
|
return netStore, delivery, cleanup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
|
func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *localstore.DB, func(), error) {
|
||||||
// setup
|
// setup
|
||||||
addr := network.RandomAddr() // tested peers peer address
|
addr := network.RandomAddr() // tested peers peer address
|
||||||
to := network.NewKademlia(addr.OAddr, network.NewKadParams())
|
to := network.NewKademlia(addr.OAddr, network.NewKadParams())
|
||||||
@ -161,11 +161,7 @@ func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTeste
|
|||||||
os.RemoveAll(datadir)
|
os.RemoveAll(datadir)
|
||||||
}
|
}
|
||||||
|
|
||||||
params := storage.NewDefaultLocalStoreParams()
|
localStore, err := localstore.New(datadir, addr.Over(), nil)
|
||||||
params.Init(datadir)
|
|
||||||
params.BaseKey = addr.Over()
|
|
||||||
|
|
||||||
localStore, err := storage.NewTestLocalStoreForAddr(params)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeDataDir()
|
removeDataDir()
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
@ -173,17 +169,16 @@ func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTeste
|
|||||||
|
|
||||||
netStore, err := storage.NewNetStore(localStore, nil)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
localStore.Close()
|
||||||
removeDataDir()
|
removeDataDir()
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
delivery := NewDelivery(to, netStore)
|
delivery := NewDelivery(to, netStore)
|
||||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||||
streamer := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), registryOptions, nil)
|
intervalsStore := state.NewInmemoryStore()
|
||||||
teardown := func() {
|
streamer := NewRegistry(addr.ID(), delivery, netStore, intervalsStore, registryOptions, nil)
|
||||||
streamer.Close()
|
|
||||||
removeDataDir()
|
|
||||||
}
|
|
||||||
prvkey, err := crypto.GenerateKey()
|
prvkey, err := crypto.GenerateKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeDataDir()
|
removeDataDir()
|
||||||
@ -191,7 +186,13 @@ func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTeste
|
|||||||
}
|
}
|
||||||
|
|
||||||
protocolTester := p2ptest.NewProtocolTester(prvkey, 1, streamer.runProtocol)
|
protocolTester := p2ptest.NewProtocolTester(prvkey, 1, streamer.runProtocol)
|
||||||
|
teardown := func() {
|
||||||
|
protocolTester.Stop()
|
||||||
|
streamer.Close()
|
||||||
|
intervalsStore.Close()
|
||||||
|
netStore.Close()
|
||||||
|
removeDataDir()
|
||||||
|
}
|
||||||
err = waitForPeers(streamer, 10*time.Second, 1)
|
err = waitForPeers(streamer, 10*time.Second, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
teardown()
|
teardown()
|
||||||
@ -228,24 +229,37 @@ func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// not used in this context, only to fulfill ChunkStore interface
|
// not used in this context, only to fulfill ChunkStore interface
|
||||||
func (rrs *roundRobinStore) Has(ctx context.Context, addr storage.Address) bool {
|
func (rrs *roundRobinStore) Has(_ context.Context, _ storage.Address) (bool, error) {
|
||||||
panic("RoundRobinStor doesn't support HasChunk")
|
return false, errors.New("roundRobinStore doesn't support Has")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rrs *roundRobinStore) Get(ctx context.Context, addr storage.Address) (storage.Chunk, error) {
|
func (rrs *roundRobinStore) Get(_ context.Context, _ chunk.ModeGet, _ storage.Address) (storage.Chunk, error) {
|
||||||
return nil, errors.New("get not well defined on round robin store")
|
return nil, errors.New("roundRobinStore doesn't support Get")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rrs *roundRobinStore) Put(ctx context.Context, chunk storage.Chunk) error {
|
func (rrs *roundRobinStore) Put(ctx context.Context, mode chunk.ModePut, ch storage.Chunk) (bool, error) {
|
||||||
i := atomic.AddUint32(&rrs.index, 1)
|
i := atomic.AddUint32(&rrs.index, 1)
|
||||||
idx := int(i) % len(rrs.stores)
|
idx := int(i) % len(rrs.stores)
|
||||||
return rrs.stores[idx].Put(ctx, chunk)
|
return rrs.stores[idx].Put(ctx, mode, ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rrs *roundRobinStore) Close() {
|
func (rrs *roundRobinStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
|
||||||
|
return errors.New("roundRobinStore doesn't support Set")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rrs *roundRobinStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
|
||||||
|
return 0, errors.New("roundRobinStore doesn't support LastPullSubscriptionBinID")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rrs *roundRobinStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rrs *roundRobinStore) Close() error {
|
||||||
for _, store := range rrs.stores {
|
for _, store := range rrs.stores {
|
||||||
store.Close()
|
store.Close()
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
|
func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
|
||||||
@ -311,24 +325,28 @@ func generateRandomFile() (string, error) {
|
|||||||
return string(b), nil
|
return string(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//create a local store for the given node
|
func newTestLocalStore(id enode.ID, addr *network.BzzAddr, globalStore mock.GlobalStorer) (localStore *localstore.DB, cleanup func(), err error) {
|
||||||
func createTestLocalStorageForID(id enode.ID, addr *network.BzzAddr) (storage.ChunkStore, string, error) {
|
dir, err := ioutil.TempDir("", "swarm-stream-")
|
||||||
var datadir string
|
|
||||||
var err error
|
|
||||||
datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString()))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
var store storage.ChunkStore
|
cleanup = func() {
|
||||||
params := storage.NewDefaultLocalStoreParams()
|
os.RemoveAll(dir)
|
||||||
params.ChunkDbPath = datadir
|
}
|
||||||
params.BaseKey = addr.Over()
|
|
||||||
store, err = storage.NewTestLocalStoreForAddr(params)
|
var mockStore *mock.NodeStore
|
||||||
|
if globalStore != nil {
|
||||||
|
mockStore = globalStore.NewNodeStore(common.BytesToAddress(id.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
localStore, err = localstore.New(dir, addr.Over(), &localstore.Options{
|
||||||
|
MockStore: mockStore,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.RemoveAll(datadir)
|
cleanup()
|
||||||
return nil, "", err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return store, datadir, nil
|
return localStore, cleanup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// watchDisconnections receives simulation peer events in a new goroutine and sets atomic value
|
// watchDisconnections receives simulation peer events in a new goroutine and sets atomic value
|
||||||
|
@ -20,9 +20,11 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
@ -32,11 +34,6 @@ import (
|
|||||||
olog "github.com/opentracing/opentracing-go/log"
|
olog "github.com/opentracing/opentracing-go/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
swarmChunkServerStreamName = "RETRIEVE_REQUEST"
|
|
||||||
deliveryCap = 32
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil)
|
processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil)
|
||||||
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
|
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
|
||||||
@ -44,93 +41,25 @@ var (
|
|||||||
|
|
||||||
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil)
|
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil)
|
||||||
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil)
|
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil)
|
||||||
|
|
||||||
|
lastReceivedChunksMsg = metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
type Delivery struct {
|
type Delivery struct {
|
||||||
chunkStore storage.SyncChunkStore
|
netStore *storage.NetStore
|
||||||
kad *network.Kademlia
|
kad *network.Kademlia
|
||||||
getPeer func(enode.ID) *Peer
|
getPeer func(enode.ID) *Peer
|
||||||
|
quit chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDelivery(kad *network.Kademlia, chunkStore storage.SyncChunkStore) *Delivery {
|
func NewDelivery(kad *network.Kademlia, netStore *storage.NetStore) *Delivery {
|
||||||
return &Delivery{
|
return &Delivery{
|
||||||
chunkStore: chunkStore,
|
netStore: netStore,
|
||||||
kad: kad,
|
kad: kad,
|
||||||
|
quit: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwarmChunkServer implements Server
|
|
||||||
type SwarmChunkServer struct {
|
|
||||||
deliveryC chan []byte
|
|
||||||
batchC chan []byte
|
|
||||||
chunkStore storage.ChunkStore
|
|
||||||
currentLen uint64
|
|
||||||
quit chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSwarmChunkServer is SwarmChunkServer constructor
|
|
||||||
func NewSwarmChunkServer(chunkStore storage.ChunkStore) *SwarmChunkServer {
|
|
||||||
s := &SwarmChunkServer{
|
|
||||||
deliveryC: make(chan []byte, deliveryCap),
|
|
||||||
batchC: make(chan []byte),
|
|
||||||
chunkStore: chunkStore,
|
|
||||||
quit: make(chan struct{}),
|
|
||||||
}
|
|
||||||
go s.processDeliveries()
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// processDeliveries handles delivered chunk hashes
|
|
||||||
func (s *SwarmChunkServer) processDeliveries() {
|
|
||||||
var hashes []byte
|
|
||||||
var batchC chan []byte
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
return
|
|
||||||
case hash := <-s.deliveryC:
|
|
||||||
hashes = append(hashes, hash...)
|
|
||||||
batchC = s.batchC
|
|
||||||
case batchC <- hashes:
|
|
||||||
hashes = nil
|
|
||||||
batchC = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SessionIndex returns zero in all cases for SwarmChunkServer.
|
|
||||||
func (s *SwarmChunkServer) SessionIndex() (uint64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNextBatch
|
|
||||||
func (s *SwarmChunkServer) SetNextBatch(_, _ uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) {
|
|
||||||
select {
|
|
||||||
case hashes = <-s.batchC:
|
|
||||||
case <-s.quit:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
from = s.currentLen
|
|
||||||
s.currentLen += uint64(len(hashes))
|
|
||||||
to = s.currentLen
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close needs to be called on a stream server
|
|
||||||
func (s *SwarmChunkServer) Close() {
|
|
||||||
close(s.quit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetData retrives chunk data from db store
|
|
||||||
func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
|
|
||||||
chunk, err := s.chunkStore.Get(ctx, storage.Address(key))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return chunk.Data(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
|
// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
|
||||||
type RetrieveRequestMsg struct {
|
type RetrieveRequestMsg struct {
|
||||||
Addr storage.Address
|
Addr storage.Address
|
||||||
@ -149,12 +78,6 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
|
|||||||
|
|
||||||
osp.LogFields(olog.String("ref", req.Addr.String()))
|
osp.LogFields(olog.String("ref", req.Addr.String()))
|
||||||
|
|
||||||
s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", true))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
streamer := s.Server.(*SwarmChunkServer)
|
|
||||||
|
|
||||||
var cancel func()
|
var cancel func()
|
||||||
// TODO: do something with this hardcoded timeout, maybe use TTL in the future
|
// TODO: do something with this hardcoded timeout, maybe use TTL in the future
|
||||||
ctx = context.WithValue(ctx, "peer", sp.ID().String())
|
ctx = context.WithValue(ctx, "peer", sp.ID().String())
|
||||||
@ -164,36 +87,26 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
|
|||||||
go func() {
|
go func() {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
case <-streamer.quit:
|
case <-d.quit:
|
||||||
}
|
}
|
||||||
cancel()
|
cancel()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer osp.Finish()
|
defer osp.Finish()
|
||||||
chunk, err := d.chunkStore.Get(ctx, req.Addr)
|
ch, err := d.netStore.Get(ctx, chunk.ModeGetRequest, req.Addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
retrieveChunkFail.Inc(1)
|
retrieveChunkFail.Inc(1)
|
||||||
log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err)
|
log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if req.SkipCheck {
|
syncing := false
|
||||||
syncing := false
|
|
||||||
osp.LogFields(olog.Bool("skipCheck", true))
|
|
||||||
|
|
||||||
err = sp.Deliver(ctx, chunk, s.priority, syncing)
|
err = sp.Deliver(ctx, ch, Top, syncing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("ERROR in handleRetrieveRequestMsg", "err", err)
|
log.Warn("ERROR in handleRetrieveRequestMsg", "err", err)
|
||||||
}
|
|
||||||
osp.LogFields(olog.Bool("delivered", true))
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
osp.LogFields(olog.Bool("skipCheck", false))
|
osp.LogFields(olog.Bool("delivered", true))
|
||||||
select {
|
|
||||||
case streamer.deliveryC <- chunk.Address()[:]:
|
|
||||||
case <-streamer.quit:
|
|
||||||
}
|
|
||||||
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -216,7 +129,7 @@ type ChunkDeliveryMsgRetrieval ChunkDeliveryMsg
|
|||||||
type ChunkDeliveryMsgSyncing ChunkDeliveryMsg
|
type ChunkDeliveryMsgSyncing ChunkDeliveryMsg
|
||||||
|
|
||||||
// chunk delivery msg is response to retrieverequest msg
|
// chunk delivery msg is response to retrieverequest msg
|
||||||
func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error {
|
func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req interface{}) error {
|
||||||
var osp opentracing.Span
|
var osp opentracing.Span
|
||||||
ctx, osp = spancontext.StartSpan(
|
ctx, osp = spancontext.StartSpan(
|
||||||
ctx,
|
ctx,
|
||||||
@ -224,36 +137,58 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch
|
|||||||
|
|
||||||
processReceivedChunksCount.Inc(1)
|
processReceivedChunksCount.Inc(1)
|
||||||
|
|
||||||
// retrieve the span for the originating retrieverequest
|
// record the last time we received a chunk delivery message
|
||||||
spanId := fmt.Sprintf("stream.send.request.%v.%v", sp.ID(), req.Addr)
|
lastReceivedChunksMsg.Update(time.Now().UnixNano())
|
||||||
span := tracing.ShiftSpanByKey(spanId)
|
|
||||||
|
|
||||||
log.Trace("handle.chunk.delivery", "ref", req.Addr, "from peer", sp.ID())
|
var msg *ChunkDeliveryMsg
|
||||||
|
var mode chunk.ModePut
|
||||||
|
switch r := req.(type) {
|
||||||
|
case *ChunkDeliveryMsgRetrieval:
|
||||||
|
msg = (*ChunkDeliveryMsg)(r)
|
||||||
|
peerPO := chunk.Proximity(sp.BzzAddr.Over(), msg.Addr)
|
||||||
|
po := chunk.Proximity(d.kad.BaseAddr(), msg.Addr)
|
||||||
|
depth := d.kad.NeighbourhoodDepth()
|
||||||
|
// chunks within the area of responsibility should always sync
|
||||||
|
// https://github.com/ethersphere/go-ethereum/pull/1282#discussion_r269406125
|
||||||
|
if po >= depth || peerPO < po {
|
||||||
|
mode = chunk.ModePutSync
|
||||||
|
} else {
|
||||||
|
// do not sync if peer that is sending us a chunk is closer to the chunk then we are
|
||||||
|
mode = chunk.ModePutRequest
|
||||||
|
}
|
||||||
|
case *ChunkDeliveryMsgSyncing:
|
||||||
|
msg = (*ChunkDeliveryMsg)(r)
|
||||||
|
mode = chunk.ModePutSync
|
||||||
|
case *ChunkDeliveryMsg:
|
||||||
|
msg = r
|
||||||
|
mode = chunk.ModePutSync
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Trace("handle.chunk.delivery", "ref", msg.Addr, "from peer", sp.ID())
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer osp.Finish()
|
defer osp.Finish()
|
||||||
|
|
||||||
if span != nil {
|
msg.peer = sp
|
||||||
span.LogFields(olog.String("finish", "from handleChunkDeliveryMsg"))
|
log.Trace("handle.chunk.delivery", "put", msg.Addr)
|
||||||
defer span.Finish()
|
_, err := d.netStore.Put(ctx, mode, storage.NewChunk(msg.Addr, msg.SData))
|
||||||
}
|
|
||||||
|
|
||||||
req.peer = sp
|
|
||||||
log.Trace("handle.chunk.delivery", "put", req.Addr)
|
|
||||||
err := d.chunkStore.Put(ctx, storage.NewChunk(req.Addr, req.SData))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == storage.ErrChunkInvalid {
|
if err == storage.ErrChunkInvalid {
|
||||||
// we removed this log because it spams the logs
|
// we removed this log because it spams the logs
|
||||||
// TODO: Enable this log line
|
// TODO: Enable this log line
|
||||||
// log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", req.Addr, )
|
// log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", msg.Addr, )
|
||||||
req.peer.Drop(err)
|
msg.peer.Drop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Trace("handle.chunk.delivery", "done put", req.Addr, "err", err)
|
log.Trace("handle.chunk.delivery", "done put", msg.Addr, "err", err)
|
||||||
}()
|
}()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Delivery) Close() {
|
||||||
|
close(d.quit)
|
||||||
|
}
|
||||||
|
|
||||||
// RequestFromPeers sends a chunk retrieve request to a peer
|
// RequestFromPeers sends a chunk retrieve request to a peer
|
||||||
// The most eligible peer that hasn't already been sent to is chosen
|
// The most eligible peer that hasn't already been sent to is chosen
|
||||||
// TODO: define "eligible"
|
// TODO: define "eligible"
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/p2p/protocols"
|
"github.com/ethereum/go-ethereum/p2p/protocols"
|
||||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
|
pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
|
||||||
@ -40,64 +41,11 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
//Tests initializing a retrieve request
|
|
||||||
func TestStreamerRetrieveRequest(t *testing.T) {
|
|
||||||
regOpts := &RegistryOptions{
|
|
||||||
Retrieval: RetrievalClientOnly,
|
|
||||||
Syncing: SyncingDisabled,
|
|
||||||
}
|
|
||||||
tester, streamer, _, teardown, err := newStreamerTester(regOpts)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer teardown()
|
|
||||||
|
|
||||||
node := tester.Nodes[0]
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
req := network.NewRequest(
|
|
||||||
storage.Address(hash0[:]),
|
|
||||||
true,
|
|
||||||
&sync.Map{},
|
|
||||||
)
|
|
||||||
streamer.delivery.RequestFromPeers(ctx, req)
|
|
||||||
|
|
||||||
stream := NewStream(swarmChunkServerStreamName, "", true)
|
|
||||||
|
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
|
||||||
Label: "RetrieveRequestMsg",
|
|
||||||
Expects: []p2ptest.Expect{
|
|
||||||
{ //start expecting a subscription for RETRIEVE_REQUEST due to `RetrievalClientOnly`
|
|
||||||
Code: 4,
|
|
||||||
Msg: &SubscribeMsg{
|
|
||||||
Stream: stream,
|
|
||||||
History: nil,
|
|
||||||
Priority: Top,
|
|
||||||
},
|
|
||||||
Peer: node.ID(),
|
|
||||||
},
|
|
||||||
{ //expect a retrieve request message for the given hash
|
|
||||||
Code: 5,
|
|
||||||
Msg: &RetrieveRequestMsg{
|
|
||||||
Addr: hash0[:],
|
|
||||||
SkipCheck: true,
|
|
||||||
},
|
|
||||||
Peer: node.ID(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
|
//Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
|
||||||
//Should time out as the peer does not have the chunk (no syncing happened previously)
|
//Should time out as the peer does not have the chunk (no syncing happened previously)
|
||||||
func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
|
func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
|
||||||
tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
|
tester, _, _, teardown, err := newStreamerTester(&RegistryOptions{
|
||||||
Retrieval: RetrievalEnabled,
|
Syncing: SyncingDisabled, //do no syncing
|
||||||
Syncing: SyncingDisabled, //do no syncing
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -108,30 +56,8 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
|
|||||||
|
|
||||||
chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
|
chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
|
||||||
|
|
||||||
peer := streamer.getPeer(node.ID())
|
|
||||||
|
|
||||||
stream := NewStream(swarmChunkServerStreamName, "", true)
|
|
||||||
//simulate pre-subscription to RETRIEVE_REQUEST stream on peer
|
|
||||||
peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
|
|
||||||
Stream: stream,
|
|
||||||
History: nil,
|
|
||||||
Priority: Top,
|
|
||||||
})
|
|
||||||
|
|
||||||
//test the exchange
|
//test the exchange
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
err = tester.TestExchanges(p2ptest.Exchange{
|
||||||
Expects: []p2ptest.Expect{
|
|
||||||
{ //first expect a subscription to the RETRIEVE_REQUEST stream
|
|
||||||
Code: 4,
|
|
||||||
Msg: &SubscribeMsg{
|
|
||||||
Stream: stream,
|
|
||||||
History: nil,
|
|
||||||
Priority: Top,
|
|
||||||
},
|
|
||||||
Peer: node.ID(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, p2ptest.Exchange{
|
|
||||||
Label: "RetrieveRequestMsg",
|
Label: "RetrieveRequestMsg",
|
||||||
Triggers: []p2ptest.Trigger{
|
Triggers: []p2ptest.Trigger{
|
||||||
{ //then the actual RETRIEVE_REQUEST....
|
{ //then the actual RETRIEVE_REQUEST....
|
||||||
@ -158,7 +84,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
|
|||||||
|
|
||||||
//should fail with a timeout as the peer we are requesting
|
//should fail with a timeout as the peer we are requesting
|
||||||
//the chunk from does not have the chunk
|
//the chunk from does not have the chunk
|
||||||
expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
|
expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
|
||||||
if err == nil || err.Error() != expectedError {
|
if err == nil || err.Error() != expectedError {
|
||||||
t.Fatalf("Expected error %v, got %v", expectedError, err)
|
t.Fatalf("Expected error %v, got %v", expectedError, err)
|
||||||
}
|
}
|
||||||
@ -167,9 +93,8 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
|
|||||||
// upstream request server receives a retrieve Request and responds with
|
// upstream request server receives a retrieve Request and responds with
|
||||||
// offered hashes or delivery if skipHash is set to true
|
// offered hashes or delivery if skipHash is set to true
|
||||||
func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
|
func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
|
||||||
tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
|
tester, _, localStore, teardown, err := newStreamerTester(&RegistryOptions{
|
||||||
Retrieval: RetrievalEnabled,
|
Syncing: SyncingDisabled,
|
||||||
Syncing: SyncingDisabled,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -178,36 +103,14 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
|
|||||||
|
|
||||||
node := tester.Nodes[0]
|
node := tester.Nodes[0]
|
||||||
|
|
||||||
peer := streamer.getPeer(node.ID())
|
hash := storage.Address(hash1[:])
|
||||||
|
ch := storage.NewChunk(hash, hash1[:])
|
||||||
stream := NewStream(swarmChunkServerStreamName, "", true)
|
_, err = localStore.Put(context.TODO(), chunk.ModePutUpload, ch)
|
||||||
|
|
||||||
peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
|
|
||||||
Stream: stream,
|
|
||||||
History: nil,
|
|
||||||
Priority: Top,
|
|
||||||
})
|
|
||||||
|
|
||||||
hash := storage.Address(hash0[:])
|
|
||||||
chunk := storage.NewChunk(hash, hash)
|
|
||||||
err = localStore.Put(context.TODO(), chunk)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no err got %v", err)
|
t.Fatalf("Expected no err got %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
err = tester.TestExchanges(p2ptest.Exchange{
|
||||||
Expects: []p2ptest.Expect{
|
|
||||||
{
|
|
||||||
Code: 4,
|
|
||||||
Msg: &SubscribeMsg{
|
|
||||||
Stream: stream,
|
|
||||||
History: nil,
|
|
||||||
Priority: Top,
|
|
||||||
},
|
|
||||||
Peer: node.ID(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, p2ptest.Exchange{
|
|
||||||
Label: "RetrieveRequestMsg",
|
Label: "RetrieveRequestMsg",
|
||||||
Triggers: []p2ptest.Trigger{
|
Triggers: []p2ptest.Trigger{
|
||||||
{
|
{
|
||||||
@ -218,53 +121,12 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
|
|||||||
Peer: node.ID(),
|
Peer: node.ID(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Expects: []p2ptest.Expect{
|
|
||||||
{
|
|
||||||
Code: 1,
|
|
||||||
Msg: &OfferedHashesMsg{
|
|
||||||
HandoverProof: &HandoverProof{
|
|
||||||
Handover: &Handover{},
|
|
||||||
},
|
|
||||||
Hashes: hash,
|
|
||||||
From: 0,
|
|
||||||
// TODO: why is this 32???
|
|
||||||
To: 32,
|
|
||||||
Stream: stream,
|
|
||||||
},
|
|
||||||
Peer: node.ID(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hash = storage.Address(hash1[:])
|
|
||||||
chunk = storage.NewChunk(hash, hash1[:])
|
|
||||||
err = localStore.Put(context.TODO(), chunk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Expected no err got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
|
||||||
Label: "RetrieveRequestMsg",
|
|
||||||
Triggers: []p2ptest.Trigger{
|
|
||||||
{
|
|
||||||
Code: 5,
|
|
||||||
Msg: &RetrieveRequestMsg{
|
|
||||||
Addr: hash,
|
|
||||||
SkipCheck: true,
|
|
||||||
},
|
|
||||||
Peer: node.ID(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Expects: []p2ptest.Expect{
|
Expects: []p2ptest.Expect{
|
||||||
{
|
{
|
||||||
Code: 6,
|
Code: 6,
|
||||||
Msg: &ChunkDeliveryMsg{
|
Msg: &ChunkDeliveryMsg{
|
||||||
Addr: hash,
|
Addr: ch.Address(),
|
||||||
SData: hash,
|
SData: ch.Data(),
|
||||||
},
|
},
|
||||||
Peer: node.ID(),
|
Peer: node.ID(),
|
||||||
},
|
},
|
||||||
@ -294,7 +156,7 @@ func TestRequestFromPeers(t *testing.T) {
|
|||||||
|
|
||||||
// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
|
// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
|
||||||
sp := &Peer{
|
sp := &Peer{
|
||||||
Peer: protocolsPeer,
|
BzzPeer: &network.BzzPeer{Peer: protocolsPeer, BzzAddr: addr},
|
||||||
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
|
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
|
||||||
streamer: r,
|
streamer: r,
|
||||||
}
|
}
|
||||||
@ -334,7 +196,7 @@ func TestRequestFromPeersWithLightNode(t *testing.T) {
|
|||||||
r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
|
r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
|
||||||
// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
|
// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
|
||||||
sp := &Peer{
|
sp := &Peer{
|
||||||
Peer: protocolsPeer,
|
BzzPeer: &network.BzzPeer{Peer: protocolsPeer, BzzAddr: addr},
|
||||||
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
|
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
|
||||||
streamer: r,
|
streamer: r,
|
||||||
}
|
}
|
||||||
@ -358,8 +220,7 @@ func TestRequestFromPeersWithLightNode(t *testing.T) {
|
|||||||
|
|
||||||
func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
|
func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
|
||||||
tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
|
tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
Syncing: SyncingDisabled,
|
||||||
Syncing: SyncingDisabled,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -420,14 +281,14 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// wait for the chunk to get stored
|
// wait for the chunk to get stored
|
||||||
storedChunk, err := localStore.Get(ctx, chunkKey)
|
storedChunk, err := localStore.Get(ctx, chunk.ModeGetRequest, chunkKey)
|
||||||
for err != nil {
|
for err != nil {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
|
t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
storedChunk, err = localStore.Get(ctx, chunkKey)
|
storedChunk, err = localStore.Get(ctx, chunk.ModeGetRequest, chunkKey)
|
||||||
time.Sleep(50 * time.Millisecond)
|
time.Sleep(50 * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -471,7 +332,6 @@ func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool)
|
|||||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
SkipCheck: skipCheck,
|
SkipCheck: skipCheck,
|
||||||
Syncing: SyncingDisabled,
|
Syncing: SyncingDisabled,
|
||||||
Retrieval: RetrievalEnabled,
|
|
||||||
}, nil)
|
}, nil)
|
||||||
bucket.Store(bucketKeyRegistry, r)
|
bucket.Store(bucketKeyRegistry, r)
|
||||||
|
|
||||||
@ -520,7 +380,7 @@ func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool)
|
|||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
//...which then gets passed to the round-robin file store
|
//...which then gets passed to the round-robin file store
|
||||||
roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
|
roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams(), chunk.NewTags())
|
||||||
//now we can actually upload a (random) file to the round-robin store
|
//now we can actually upload a (random) file to the round-robin store
|
||||||
size := chunkCount * chunkSize
|
size := chunkCount * chunkSize
|
||||||
log.Debug("Storing data to file store")
|
log.Debug("Storing data to file store")
|
||||||
@ -622,7 +482,6 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
|
|||||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
SkipCheck: skipCheck,
|
SkipCheck: skipCheck,
|
||||||
Syncing: SyncingDisabled,
|
Syncing: SyncingDisabled,
|
||||||
Retrieval: RetrievalDisabled,
|
|
||||||
SyncUpdateDelay: 0,
|
SyncUpdateDelay: 0,
|
||||||
}, nil)
|
}, nil)
|
||||||
bucket.Store(bucketKeyRegistry, r)
|
bucket.Store(bucketKeyRegistry, r)
|
||||||
@ -700,7 +559,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
|
|||||||
errs := make(chan error)
|
errs := make(chan error)
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
go func(h storage.Address) {
|
go func(h storage.Address) {
|
||||||
_, err := netStore.Get(ctx, h)
|
_, err := netStore.Get(ctx, chunk.ModeGetRequest, h)
|
||||||
log.Warn("test check netstore get", "hash", h, "err", err)
|
log.Warn("test check netstore get", "hash", h, "err", err)
|
||||||
errs <- err
|
errs <- err
|
||||||
}(hash)
|
}(hash)
|
||||||
|
@ -66,7 +66,6 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
|
||||||
Syncing: SyncingRegisterOnly,
|
Syncing: SyncingRegisterOnly,
|
||||||
SkipCheck: skipCheck,
|
SkipCheck: skipCheck,
|
||||||
}, nil)
|
}, nil)
|
||||||
@ -287,20 +286,20 @@ func enableNotifications(r *Registry, peerID enode.ID, s Stream) error {
|
|||||||
|
|
||||||
type testExternalClient struct {
|
type testExternalClient struct {
|
||||||
hashes chan []byte
|
hashes chan []byte
|
||||||
store storage.SyncChunkStore
|
netStore *storage.NetStore
|
||||||
enableNotificationsC chan struct{}
|
enableNotificationsC chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestExternalClient(store storage.SyncChunkStore) *testExternalClient {
|
func newTestExternalClient(netStore *storage.NetStore) *testExternalClient {
|
||||||
return &testExternalClient{
|
return &testExternalClient{
|
||||||
hashes: make(chan []byte),
|
hashes: make(chan []byte),
|
||||||
store: store,
|
netStore: netStore,
|
||||||
enableNotificationsC: make(chan struct{}),
|
enableNotificationsC: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
|
func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
|
||||||
wait := c.store.FetchFunc(ctx, storage.Address(hash))
|
wait := c.netStore.FetchFunc(ctx, storage.Address(hash))
|
||||||
if wait == nil {
|
if wait == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -21,95 +21,11 @@ import (
|
|||||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This test checks the default behavior of the server, that is
|
|
||||||
// when it is serving Retrieve requests.
|
|
||||||
func TestLigthnodeRetrieveRequestWithRetrieve(t *testing.T) {
|
|
||||||
registryOptions := &RegistryOptions{
|
|
||||||
Retrieval: RetrievalClientOnly,
|
|
||||||
Syncing: SyncingDisabled,
|
|
||||||
}
|
|
||||||
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer teardown()
|
|
||||||
|
|
||||||
node := tester.Nodes[0]
|
|
||||||
|
|
||||||
stream := NewStream(swarmChunkServerStreamName, "", false)
|
|
||||||
|
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
|
||||||
Label: "SubscribeMsg",
|
|
||||||
Triggers: []p2ptest.Trigger{
|
|
||||||
{
|
|
||||||
Code: 4,
|
|
||||||
Msg: &SubscribeMsg{
|
|
||||||
Stream: stream,
|
|
||||||
},
|
|
||||||
Peer: node.ID(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tester.TestDisconnected(&p2ptest.Disconnect{Peer: node.ID()})
|
|
||||||
if err == nil || err.Error() != "timed out waiting for peers to disconnect" {
|
|
||||||
t.Fatalf("Expected no disconnect, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This test checks the Lightnode behavior of server, when serving Retrieve
|
|
||||||
// requests are disabled
|
|
||||||
func TestLigthnodeRetrieveRequestWithoutRetrieve(t *testing.T) {
|
|
||||||
registryOptions := &RegistryOptions{
|
|
||||||
Retrieval: RetrievalDisabled,
|
|
||||||
Syncing: SyncingDisabled,
|
|
||||||
}
|
|
||||||
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer teardown()
|
|
||||||
|
|
||||||
node := tester.Nodes[0]
|
|
||||||
|
|
||||||
stream := NewStream(swarmChunkServerStreamName, "", false)
|
|
||||||
|
|
||||||
err = tester.TestExchanges(
|
|
||||||
p2ptest.Exchange{
|
|
||||||
Label: "SubscribeMsg",
|
|
||||||
Triggers: []p2ptest.Trigger{
|
|
||||||
{
|
|
||||||
Code: 4,
|
|
||||||
Msg: &SubscribeMsg{
|
|
||||||
Stream: stream,
|
|
||||||
},
|
|
||||||
Peer: node.ID(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Expects: []p2ptest.Expect{
|
|
||||||
{
|
|
||||||
Code: 7,
|
|
||||||
Msg: &SubscribeErrorMsg{
|
|
||||||
Error: "stream RETRIEVE_REQUEST not registered",
|
|
||||||
},
|
|
||||||
Peer: node.ID(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This test checks the default behavior of the server, that is
|
// This test checks the default behavior of the server, that is
|
||||||
// when syncing is enabled.
|
// when syncing is enabled.
|
||||||
func TestLigthnodeRequestSubscriptionWithSync(t *testing.T) {
|
func TestLigthnodeRequestSubscriptionWithSync(t *testing.T) {
|
||||||
registryOptions := &RegistryOptions{
|
registryOptions := &RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
Syncing: SyncingRegisterOnly,
|
||||||
Syncing: SyncingRegisterOnly,
|
|
||||||
}
|
}
|
||||||
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -153,8 +69,7 @@ func TestLigthnodeRequestSubscriptionWithSync(t *testing.T) {
|
|||||||
// when syncing is disabled.
|
// when syncing is disabled.
|
||||||
func TestLigthnodeRequestSubscriptionWithoutSync(t *testing.T) {
|
func TestLigthnodeRequestSubscriptionWithoutSync(t *testing.T) {
|
||||||
registryOptions := &RegistryOptions{
|
registryOptions := &RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
Syncing: SyncingDisabled,
|
||||||
Syncing: SyncingDisabled,
|
|
||||||
}
|
}
|
||||||
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -24,9 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
|
bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"github.com/opentracing/opentracing-go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var syncBatchTimeout = 30 * time.Second
|
var syncBatchTimeout = 30 * time.Second
|
||||||
@ -175,7 +173,11 @@ type QuitMsg struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Peer) handleQuitMsg(req *QuitMsg) error {
|
func (p *Peer) handleQuitMsg(req *QuitMsg) error {
|
||||||
return p.removeClient(req.Stream)
|
err := p.removeClient(req.Stream)
|
||||||
|
if _, ok := err.(*notFoundError); ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// OfferedHashesMsg is the protocol msg for offering to hand over a
|
// OfferedHashesMsg is the protocol msg for offering to hand over a
|
||||||
@ -197,12 +199,6 @@ func (m OfferedHashesMsg) String() string {
|
|||||||
func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error {
|
func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error {
|
||||||
metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
|
metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
|
||||||
|
|
||||||
var sp opentracing.Span
|
|
||||||
ctx, sp = spancontext.StartSpan(
|
|
||||||
ctx,
|
|
||||||
"handle.offered.hashes")
|
|
||||||
defer sp.Finish()
|
|
||||||
|
|
||||||
c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
|
c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -219,6 +215,9 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
|
|||||||
return fmt.Errorf("error initiaising bitvector of length %v: %v", lenHashes/HashSize, err)
|
return fmt.Errorf("error initiaising bitvector of length %v: %v", lenHashes/HashSize, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var wantDelaySet bool
|
||||||
|
var wantDelay time.Time
|
||||||
|
|
||||||
ctr := 0
|
ctr := 0
|
||||||
errC := make(chan error)
|
errC := make(chan error)
|
||||||
ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout)
|
ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout)
|
||||||
@ -230,6 +229,13 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
|
|||||||
if wait := c.NeedData(ctx, hash); wait != nil {
|
if wait := c.NeedData(ctx, hash); wait != nil {
|
||||||
ctr++
|
ctr++
|
||||||
want.Set(i/HashSize, true)
|
want.Set(i/HashSize, true)
|
||||||
|
|
||||||
|
// measure how long it takes before we mark chunks for retrieval, and actually send the request
|
||||||
|
if !wantDelaySet {
|
||||||
|
wantDelaySet = true
|
||||||
|
wantDelay = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
// create request and wait until the chunk data arrives and is stored
|
// create request and wait until the chunk data arrives and is stored
|
||||||
go func(w func(context.Context) error) {
|
go func(w func(context.Context) error) {
|
||||||
select {
|
select {
|
||||||
@ -247,7 +253,7 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
|
|||||||
case err := <-errC:
|
case err := <-errC:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("client.handleOfferedHashesMsg() error waiting for chunk, dropping peer", "peer", p.ID(), "err", err)
|
log.Debug("client.handleOfferedHashesMsg() error waiting for chunk, dropping peer", "peer", p.ID(), "err", err)
|
||||||
p.Drop(err)
|
p.Drop()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@ -283,28 +289,34 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
|
|||||||
From: from,
|
From: from,
|
||||||
To: to,
|
To: to,
|
||||||
}
|
}
|
||||||
go func() {
|
|
||||||
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
|
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
|
||||||
select {
|
select {
|
||||||
case err := <-c.next:
|
case err := <-c.next:
|
||||||
if err != nil {
|
|
||||||
log.Warn("c.next error dropping peer", "err", err)
|
|
||||||
p.Drop(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-c.quit:
|
|
||||||
log.Debug("client.handleOfferedHashesMsg() quit")
|
|
||||||
return
|
|
||||||
case <-ctx.Done():
|
|
||||||
log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
|
|
||||||
err := p.SendPriority(ctx, msg, c.priority)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("SendPriority error", "err", err)
|
log.Warn("c.next error dropping peer", "err", err)
|
||||||
|
p.Drop()
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}()
|
case <-c.quit:
|
||||||
|
log.Debug("client.handleOfferedHashesMsg() quit")
|
||||||
|
return nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
|
||||||
|
|
||||||
|
// record want delay
|
||||||
|
if wantDelaySet {
|
||||||
|
metrics.GetOrRegisterResettingTimer("handleoffered.wantdelay", nil).UpdateSince(wantDelay)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p.SendPriority(ctx, msg, c.priority)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("SendPriority error", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,8 +24,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/p2p/protocols"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
|
pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
|
"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
@ -54,7 +56,7 @@ var ErrMaxPeerServers = errors.New("max peer servers")
|
|||||||
|
|
||||||
// Peer is the Peer extension for the streaming protocol
|
// Peer is the Peer extension for the streaming protocol
|
||||||
type Peer struct {
|
type Peer struct {
|
||||||
*protocols.Peer
|
*network.BzzPeer
|
||||||
streamer *Registry
|
streamer *Registry
|
||||||
pq *pq.PriorityQueue
|
pq *pq.PriorityQueue
|
||||||
serverMu sync.RWMutex
|
serverMu sync.RWMutex
|
||||||
@ -74,9 +76,9 @@ type WrappedPriorityMsg struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewPeer is the constructor for Peer
|
// NewPeer is the constructor for Peer
|
||||||
func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
|
func NewPeer(peer *network.BzzPeer, streamer *Registry) *Peer {
|
||||||
p := &Peer{
|
p := &Peer{
|
||||||
Peer: peer,
|
BzzPeer: peer,
|
||||||
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
|
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
|
||||||
streamer: streamer,
|
streamer: streamer,
|
||||||
servers: make(map[Stream]*server),
|
servers: make(map[Stream]*server),
|
||||||
@ -90,7 +92,7 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
|
|||||||
err := p.Send(wmsg.Context, wmsg.Msg)
|
err := p.Send(wmsg.Context, wmsg.Msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err)
|
log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err)
|
||||||
p.Drop(err)
|
p.Drop()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -134,7 +136,7 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
|
|||||||
func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8, syncing bool) error {
|
func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8, syncing bool) error {
|
||||||
var msg interface{}
|
var msg interface{}
|
||||||
|
|
||||||
spanName := "send.chunk.delivery"
|
metrics.GetOrRegisterCounter("peer.deliver", nil).Inc(1)
|
||||||
|
|
||||||
//we send different types of messages if delivery is for syncing or retrievals,
|
//we send different types of messages if delivery is for syncing or retrievals,
|
||||||
//even if handling and content of the message are the same,
|
//even if handling and content of the message are the same,
|
||||||
@ -144,16 +146,13 @@ func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8,
|
|||||||
Addr: chunk.Address(),
|
Addr: chunk.Address(),
|
||||||
SData: chunk.Data(),
|
SData: chunk.Data(),
|
||||||
}
|
}
|
||||||
spanName += ".syncing"
|
|
||||||
} else {
|
} else {
|
||||||
msg = &ChunkDeliveryMsgRetrieval{
|
msg = &ChunkDeliveryMsgRetrieval{
|
||||||
Addr: chunk.Address(),
|
Addr: chunk.Address(),
|
||||||
SData: chunk.Data(),
|
SData: chunk.Data(),
|
||||||
}
|
}
|
||||||
spanName += ".retrieval"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, "stream_send_tag", nil)
|
|
||||||
return p.SendPriority(ctx, msg, priority)
|
return p.SendPriority(ctx, msg, priority)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -416,7 +415,174 @@ func (p *Peer) removeClientParams(s Stream) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Peer) close() {
|
func (p *Peer) close() {
|
||||||
|
p.serverMu.Lock()
|
||||||
|
defer p.serverMu.Unlock()
|
||||||
|
|
||||||
for _, s := range p.servers {
|
for _, s := range p.servers {
|
||||||
s.Close()
|
s.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p.servers = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// runUpdateSyncing is a long running function that creates the initial
|
||||||
|
// syncing subscriptions to the peer and waits for neighbourhood depth change
|
||||||
|
// to create new ones or quit existing ones based on the new neighbourhood depth
|
||||||
|
// and if peer enters or leaves nearest neighbourhood by using
|
||||||
|
// syncSubscriptionsDiff and updateSyncSubscriptions functions.
|
||||||
|
func (p *Peer) runUpdateSyncing() {
|
||||||
|
timer := time.NewTimer(p.streamer.syncUpdateDelay)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
case <-p.streamer.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
kad := p.streamer.delivery.kad
|
||||||
|
po := chunk.Proximity(p.BzzAddr.Over(), kad.BaseAddr())
|
||||||
|
|
||||||
|
depth := kad.NeighbourhoodDepth()
|
||||||
|
|
||||||
|
log.Debug("update syncing subscriptions: initial", "peer", p.ID(), "po", po, "depth", depth)
|
||||||
|
|
||||||
|
// initial subscriptions
|
||||||
|
p.updateSyncSubscriptions(syncSubscriptionsDiff(po, -1, depth, kad.MaxProxDisplay))
|
||||||
|
|
||||||
|
depthChangeSignal, unsubscribeDepthChangeSignal := kad.SubscribeToNeighbourhoodDepthChange()
|
||||||
|
defer unsubscribeDepthChangeSignal()
|
||||||
|
|
||||||
|
prevDepth := depth
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case _, ok := <-depthChangeSignal:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// update subscriptions for this peer when depth changes
|
||||||
|
depth := kad.NeighbourhoodDepth()
|
||||||
|
log.Debug("update syncing subscriptions", "peer", p.ID(), "po", po, "depth", depth)
|
||||||
|
p.updateSyncSubscriptions(syncSubscriptionsDiff(po, prevDepth, depth, kad.MaxProxDisplay))
|
||||||
|
prevDepth = depth
|
||||||
|
case <-p.streamer.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Debug("update syncing subscriptions: exiting", "peer", p.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateSyncSubscriptions accepts two slices of integers, the first one
|
||||||
|
// representing proximity order bins for required syncing subscriptions
|
||||||
|
// and the second one representing bins for syncing subscriptions that
|
||||||
|
// need to be removed. This function sends request for subscription
|
||||||
|
// messages and quit messages for provided bins.
|
||||||
|
func (p *Peer) updateSyncSubscriptions(subBins, quitBins []int) {
|
||||||
|
if p.streamer.getPeer(p.ID()) == nil {
|
||||||
|
log.Debug("update syncing subscriptions", "peer not found", p.ID())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Debug("update syncing subscriptions", "peer", p.ID(), "subscribe", subBins, "quit", quitBins)
|
||||||
|
for _, po := range subBins {
|
||||||
|
p.subscribeSync(po)
|
||||||
|
}
|
||||||
|
for _, po := range quitBins {
|
||||||
|
p.quitSync(po)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// subscribeSync send the request for syncing subscriptions to the peer
|
||||||
|
// using subscriptionFunc. This function is used to request syncing subscriptions
|
||||||
|
// when new peer is added to the registry and on neighbourhood depth change.
|
||||||
|
func (p *Peer) subscribeSync(po int) {
|
||||||
|
err := subscriptionFunc(p.streamer, p.ID(), uint8(po))
|
||||||
|
if err != nil {
|
||||||
|
log.Error("subscription", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// quitSync sends the quit message for live and history syncing streams to the peer.
|
||||||
|
// This function is used in runUpdateSyncing indirectly over updateSyncSubscriptions
|
||||||
|
// to remove unneeded syncing subscriptions on neighbourhood depth change.
|
||||||
|
func (p *Peer) quitSync(po int) {
|
||||||
|
live := NewStream("SYNC", FormatSyncBinKey(uint8(po)), true)
|
||||||
|
history := getHistoryStream(live)
|
||||||
|
err := p.streamer.Quit(p.ID(), live)
|
||||||
|
if err != nil && err != p2p.ErrShuttingDown {
|
||||||
|
log.Error("quit", "err", err, "peer", p.ID(), "stream", live)
|
||||||
|
}
|
||||||
|
err = p.streamer.Quit(p.ID(), history)
|
||||||
|
if err != nil && err != p2p.ErrShuttingDown {
|
||||||
|
log.Error("quit", "err", err, "peer", p.ID(), "stream", history)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p.removeServer(live)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("remove server", "err", err, "peer", p.ID(), "stream", live)
|
||||||
|
}
|
||||||
|
err = p.removeServer(history)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("remove server", "err", err, "peer", p.ID(), "stream", live)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncSubscriptionsDiff calculates to which proximity order bins a peer
|
||||||
|
// (with po peerPO) needs to be subscribed after kademlia neighbourhood depth
|
||||||
|
// change from prevDepth to newDepth. Max argument limits the number of
|
||||||
|
// proximity order bins. Returned values are slices of integers which represent
|
||||||
|
// proximity order bins, the first one to which additional subscriptions need to
|
||||||
|
// be requested and the second one which subscriptions need to be quit. Argument
|
||||||
|
// prevDepth with value less then 0 represents no previous depth, used for
|
||||||
|
// initial syncing subscriptions.
|
||||||
|
func syncSubscriptionsDiff(peerPO, prevDepth, newDepth, max int) (subBins, quitBins []int) {
|
||||||
|
newStart, newEnd := syncBins(peerPO, newDepth, max)
|
||||||
|
if prevDepth < 0 {
|
||||||
|
// no previous depth, return the complete range
|
||||||
|
// for subscriptions requests and nothing for quitting
|
||||||
|
return intRange(newStart, newEnd), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
prevStart, prevEnd := syncBins(peerPO, prevDepth, max)
|
||||||
|
|
||||||
|
if newStart < prevStart {
|
||||||
|
subBins = append(subBins, intRange(newStart, prevStart)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if prevStart < newStart {
|
||||||
|
quitBins = append(quitBins, intRange(prevStart, newStart)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if newEnd < prevEnd {
|
||||||
|
quitBins = append(quitBins, intRange(newEnd, prevEnd)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if prevEnd < newEnd {
|
||||||
|
subBins = append(subBins, intRange(prevEnd, newEnd)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return subBins, quitBins
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncBins returns the range to which proximity order bins syncing
|
||||||
|
// subscriptions need to be requested, based on peer proximity and
|
||||||
|
// kademlia neighbourhood depth. Returned range is [start,end), inclusive for
|
||||||
|
// start and exclusive for end.
|
||||||
|
func syncBins(peerPO, depth, max int) (start, end int) {
|
||||||
|
if peerPO < depth {
|
||||||
|
// subscribe only to peerPO bin if it is not
|
||||||
|
// in the nearest neighbourhood
|
||||||
|
return peerPO, peerPO + 1
|
||||||
|
}
|
||||||
|
// subscribe from depth to max bin if the peer
|
||||||
|
// is in the nearest neighbourhood
|
||||||
|
return depth, max + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// intRange returns the slice of integers [start,end). The start
|
||||||
|
// is inclusive and the end is not.
|
||||||
|
func intRange(start, end int) (r []int) {
|
||||||
|
for i := start; i < end; i++ {
|
||||||
|
r = append(r, i)
|
||||||
|
}
|
||||||
|
return r
|
||||||
}
|
}
|
||||||
|
309
swarm/network/stream/peer_test.go
Normal file
309
swarm/network/stream/peer_test.go
Normal file
@ -0,0 +1,309 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package stream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestSyncSubscriptionsDiff validates the output of syncSubscriptionsDiff
|
||||||
|
// function for various arguments.
|
||||||
|
func TestSyncSubscriptionsDiff(t *testing.T) {
|
||||||
|
max := network.NewKadParams().MaxProxDisplay
|
||||||
|
for _, tc := range []struct {
|
||||||
|
po, prevDepth, newDepth int
|
||||||
|
subBins, quitBins []int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
po: 0, prevDepth: -1, newDepth: 0,
|
||||||
|
subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 1, prevDepth: -1, newDepth: 0,
|
||||||
|
subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 2, prevDepth: -1, newDepth: 0,
|
||||||
|
subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 0, prevDepth: -1, newDepth: 1,
|
||||||
|
subBins: []int{0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 1, prevDepth: -1, newDepth: 1,
|
||||||
|
subBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 2, prevDepth: -1, newDepth: 2,
|
||||||
|
subBins: []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 3, prevDepth: -1, newDepth: 2,
|
||||||
|
subBins: []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 1, prevDepth: -1, newDepth: 2,
|
||||||
|
subBins: []int{1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 0, prevDepth: 0, newDepth: 0, // 0-16 -> 0-16
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 1, prevDepth: 0, newDepth: 0, // 0-16 -> 0-16
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 0, prevDepth: 0, newDepth: 1, // 0-16 -> 0
|
||||||
|
quitBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 0, prevDepth: 0, newDepth: 2, // 0-16 -> 0
|
||||||
|
quitBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 1, prevDepth: 0, newDepth: 1, // 0-16 -> 1-16
|
||||||
|
quitBins: []int{0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 1, prevDepth: 1, newDepth: 0, // 1-16 -> 0-16
|
||||||
|
subBins: []int{0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 4, prevDepth: 0, newDepth: 1, // 0-16 -> 1-16
|
||||||
|
quitBins: []int{0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 4, prevDepth: 0, newDepth: 4, // 0-16 -> 4-16
|
||||||
|
quitBins: []int{0, 1, 2, 3},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 4, prevDepth: 0, newDepth: 5, // 0-16 -> 4
|
||||||
|
quitBins: []int{0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 4, prevDepth: 5, newDepth: 0, // 4 -> 0-16
|
||||||
|
subBins: []int{0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
po: 4, prevDepth: 5, newDepth: 6, // 4 -> 4
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
subBins, quitBins := syncSubscriptionsDiff(tc.po, tc.prevDepth, tc.newDepth, max)
|
||||||
|
if fmt.Sprint(subBins) != fmt.Sprint(tc.subBins) {
|
||||||
|
t.Errorf("po: %v, prevDepth: %v, newDepth: %v: got subBins %v, want %v", tc.po, tc.prevDepth, tc.newDepth, subBins, tc.subBins)
|
||||||
|
}
|
||||||
|
if fmt.Sprint(quitBins) != fmt.Sprint(tc.quitBins) {
|
||||||
|
t.Errorf("po: %v, prevDepth: %v, newDepth: %v: got quitBins %v, want %v", tc.po, tc.prevDepth, tc.newDepth, quitBins, tc.quitBins)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestUpdateSyncingSubscriptions validates that syncing subscriptions are correctly
|
||||||
|
// made on initial node connections and that subscriptions are correctly changed
|
||||||
|
// when kademlia neighbourhood depth is changed by connecting more nodes.
|
||||||
|
func TestUpdateSyncingSubscriptions(t *testing.T) {
|
||||||
|
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||||
|
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||||
|
addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
|
SyncUpdateDelay: 100 * time.Millisecond,
|
||||||
|
Syncing: SyncingAutoSubscribe,
|
||||||
|
}, nil)
|
||||||
|
cleanup = func() {
|
||||||
|
r.Close()
|
||||||
|
clean()
|
||||||
|
}
|
||||||
|
bucket.Store("bzz-address", addr)
|
||||||
|
return r, cleanup, nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
defer sim.Close()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
|
||||||
|
// initial nodes, first one as pivot center of the start
|
||||||
|
ids, err := sim.AddNodesAndConnectStar(10)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// pivot values
|
||||||
|
pivotRegistryID := ids[0]
|
||||||
|
pivotRegistry := sim.Service("streamer", pivotRegistryID).(*Registry)
|
||||||
|
pivotKademlia := pivotRegistry.delivery.kad
|
||||||
|
// nodes proximities from the pivot node
|
||||||
|
nodeProximities := make(map[string]int)
|
||||||
|
for _, id := range ids[1:] {
|
||||||
|
bzzAddr, ok := sim.NodeItem(id, "bzz-address")
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("no bzz address for node")
|
||||||
|
}
|
||||||
|
nodeProximities[id.String()] = chunk.Proximity(pivotKademlia.BaseAddr(), bzzAddr.(*network.BzzAddr).Over())
|
||||||
|
}
|
||||||
|
// wait until sync subscriptions are done for all nodes
|
||||||
|
waitForSubscriptions(t, pivotRegistry, ids[1:]...)
|
||||||
|
|
||||||
|
// check initial sync streams
|
||||||
|
err = checkSyncStreamsWithRetry(pivotRegistry, nodeProximities)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// add more nodes until the depth is changed
|
||||||
|
prevDepth := pivotKademlia.NeighbourhoodDepth()
|
||||||
|
var noDepthChangeChecked bool // true it there was a check when no depth is changed
|
||||||
|
for {
|
||||||
|
ids, err := sim.AddNodes(5)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// add new nodes to sync subscriptions check
|
||||||
|
for _, id := range ids {
|
||||||
|
bzzAddr, ok := sim.NodeItem(id, "bzz-address")
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("no bzz address for node")
|
||||||
|
}
|
||||||
|
nodeProximities[id.String()] = chunk.Proximity(pivotKademlia.BaseAddr(), bzzAddr.(*network.BzzAddr).Over())
|
||||||
|
}
|
||||||
|
err = sim.Net.ConnectNodesStar(ids, pivotRegistryID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
waitForSubscriptions(t, pivotRegistry, ids...)
|
||||||
|
|
||||||
|
newDepth := pivotKademlia.NeighbourhoodDepth()
|
||||||
|
// depth is not changed, check if streams are still correct
|
||||||
|
if newDepth == prevDepth {
|
||||||
|
err = checkSyncStreamsWithRetry(pivotRegistry, nodeProximities)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
noDepthChangeChecked = true
|
||||||
|
}
|
||||||
|
// do the final check when depth is changed and
|
||||||
|
// there has been at least one check
|
||||||
|
// for the case when depth is not changed
|
||||||
|
if newDepth != prevDepth && noDepthChangeChecked {
|
||||||
|
// check sync streams for changed depth
|
||||||
|
return checkSyncStreamsWithRetry(pivotRegistry, nodeProximities)
|
||||||
|
}
|
||||||
|
prevDepth = newDepth
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if result.Error != nil {
|
||||||
|
t.Fatal(result.Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForSubscriptions is a test helper function that blocks until
|
||||||
|
// stream server subscriptions are established on the provided registry
|
||||||
|
// to the nodes with provided IDs.
|
||||||
|
func waitForSubscriptions(t *testing.T, r *Registry, ids ...enode.ID) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
for retries := 0; retries < 100; retries++ {
|
||||||
|
subs := r.api.GetPeerServerSubscriptions()
|
||||||
|
if allSubscribed(subs, ids) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
t.Fatalf("missing subscriptions")
|
||||||
|
}
|
||||||
|
|
||||||
|
// allSubscribed returns true if nodes with ids have subscriptions
|
||||||
|
// in provided subs map.
|
||||||
|
func allSubscribed(subs map[string][]string, ids []enode.ID) bool {
|
||||||
|
for _, id := range ids {
|
||||||
|
if s, ok := subs[id.String()]; !ok || len(s) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkSyncStreamsWithRetry is calling checkSyncStreams with retries.
|
||||||
|
func checkSyncStreamsWithRetry(r *Registry, nodeProximities map[string]int) (err error) {
|
||||||
|
for retries := 0; retries < 5; retries++ {
|
||||||
|
err = checkSyncStreams(r, nodeProximities)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkSyncStreams validates that registry contains expected sync
|
||||||
|
// subscriptions to nodes with proximities in a map nodeProximities.
|
||||||
|
func checkSyncStreams(r *Registry, nodeProximities map[string]int) error {
|
||||||
|
depth := r.delivery.kad.NeighbourhoodDepth()
|
||||||
|
maxPO := r.delivery.kad.MaxProxDisplay
|
||||||
|
for id, po := range nodeProximities {
|
||||||
|
wantStreams := syncStreams(po, depth, maxPO)
|
||||||
|
gotStreams := nodeStreams(r, id)
|
||||||
|
|
||||||
|
if r.getPeer(enode.HexID(id)) == nil {
|
||||||
|
// ignore removed peer
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(gotStreams, wantStreams) {
|
||||||
|
return fmt.Errorf("node %s got streams %v, want %v", id, gotStreams, wantStreams)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncStreams returns expected sync streams that need to be
|
||||||
|
// established between a node with kademlia neighbourhood depth
|
||||||
|
// and a node with proximity order po.
|
||||||
|
func syncStreams(po, depth, maxPO int) (streams []string) {
|
||||||
|
start, end := syncBins(po, depth, maxPO)
|
||||||
|
for bin := start; bin < end; bin++ {
|
||||||
|
streams = append(streams, NewStream("SYNC", FormatSyncBinKey(uint8(bin)), false).String())
|
||||||
|
streams = append(streams, NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true).String())
|
||||||
|
}
|
||||||
|
return streams
|
||||||
|
}
|
||||||
|
|
||||||
|
// nodeStreams returns stream server subscriptions on a registry
|
||||||
|
// to the peer with provided id.
|
||||||
|
func nodeStreams(r *Registry, id string) []string {
|
||||||
|
streams := r.api.GetPeerServerSubscriptions()[id]
|
||||||
|
sort.Strings(streams)
|
||||||
|
return streams
|
||||||
|
}
|
@ -25,6 +25,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
@ -118,7 +119,6 @@ var retrievalSimServiceMap = map[string]simulation.ServiceFunc{
|
|||||||
}
|
}
|
||||||
|
|
||||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
Retrieval: RetrievalEnabled,
|
|
||||||
Syncing: SyncingAutoSubscribe,
|
Syncing: SyncingAutoSubscribe,
|
||||||
SyncUpdateDelay: syncUpdateDelay,
|
SyncUpdateDelay: syncUpdateDelay,
|
||||||
}, nil)
|
}, nil)
|
||||||
@ -278,8 +278,8 @@ func runRetrievalTest(t *testing.T, chunkCount int, nodeCount int) error {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("No localstore")
|
return fmt.Errorf("No localstore")
|
||||||
}
|
}
|
||||||
lstore := item.(*storage.LocalStore)
|
store := item.(chunk.Store)
|
||||||
conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
|
conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, store)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||||
@ -117,7 +118,6 @@ var simServiceMap = map[string]simulation.ServiceFunc{
|
|||||||
store := state.NewInmemoryStore()
|
store := state.NewInmemoryStore()
|
||||||
|
|
||||||
r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
|
r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
|
||||||
Syncing: SyncingAutoSubscribe,
|
Syncing: SyncingAutoSubscribe,
|
||||||
SyncUpdateDelay: 3 * time.Second,
|
SyncUpdateDelay: 3 * time.Second,
|
||||||
}, nil)
|
}, nil)
|
||||||
@ -190,10 +190,10 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
|
|||||||
node := sim.Net.GetRandomUpNode()
|
node := sim.Net.GetRandomUpNode()
|
||||||
item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
|
item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("No localstore")
|
return errors.New("no store in simulation bucket")
|
||||||
}
|
}
|
||||||
lstore := item.(*storage.LocalStore)
|
store := item.(chunk.Store)
|
||||||
hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
|
hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, store)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -221,25 +221,25 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
|
|||||||
localChunks := conf.idToChunksMap[id]
|
localChunks := conf.idToChunksMap[id]
|
||||||
for _, ch := range localChunks {
|
for _, ch := range localChunks {
|
||||||
//get the real chunk by the index in the index array
|
//get the real chunk by the index in the index array
|
||||||
chunk := conf.hashes[ch]
|
ch := conf.hashes[ch]
|
||||||
log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
|
log.Trace("node has chunk", "address", ch)
|
||||||
//check if the expected chunk is indeed in the localstore
|
//check if the expected chunk is indeed in the localstore
|
||||||
var err error
|
var err error
|
||||||
if *useMockStore {
|
if *useMockStore {
|
||||||
//use the globalStore if the mockStore should be used; in that case,
|
//use the globalStore if the mockStore should be used; in that case,
|
||||||
//the complete localStore stack is bypassed for getting the chunk
|
//the complete localStore stack is bypassed for getting the chunk
|
||||||
_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
|
_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), ch)
|
||||||
} else {
|
} else {
|
||||||
//use the actual localstore
|
//use the actual localstore
|
||||||
item, ok := sim.NodeItem(id, bucketKeyStore)
|
item, ok := sim.NodeItem(id, bucketKeyStore)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Error accessing localstore")
|
return errors.New("no store in simulation bucket")
|
||||||
}
|
}
|
||||||
lstore := item.(*storage.LocalStore)
|
store := item.(chunk.Store)
|
||||||
_, err = lstore.Get(ctx, chunk)
|
_, err = store.Get(ctx, chunk.ModeGetLookup, ch)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
|
log.Debug("chunk not found", "address", ch.Hex(), "node", id)
|
||||||
// Do not get crazy with logging the warn message
|
// Do not get crazy with logging the warn message
|
||||||
time.Sleep(500 * time.Millisecond)
|
time.Sleep(500 * time.Millisecond)
|
||||||
continue REPEAT
|
continue REPEAT
|
||||||
@ -247,10 +247,10 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
|
|||||||
evt := &simulations.Event{
|
evt := &simulations.Event{
|
||||||
Type: EventTypeChunkArrived,
|
Type: EventTypeChunkArrived,
|
||||||
Node: sim.Net.GetNode(id),
|
Node: sim.Net.GetNode(id),
|
||||||
Data: chunk.String(),
|
Data: ch.String(),
|
||||||
}
|
}
|
||||||
sim.Net.Events().Send(evt)
|
sim.Net.Events().Send(evt)
|
||||||
log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
|
log.Trace("chunk found", "address", ch.Hex(), "node", id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -296,9 +296,9 @@ func mapKeysToNodes(conf *synctestConfig) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//upload a file(chunks) to a single local node store
|
//upload a file(chunks) to a single local node store
|
||||||
func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
|
func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, store chunk.Store) ([]storage.Address, error) {
|
||||||
log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
|
log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
|
||||||
fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(store, storage.NewFileStoreParams(), chunk.NewTags())
|
||||||
size := chunkSize
|
size := chunkSize
|
||||||
var rootAddrs []storage.Address
|
var rootAddrs []storage.Address
|
||||||
for i := 0; i < chunkCount; i++ {
|
for i := 0; i < chunkCount; i++ {
|
||||||
|
@ -18,7 +18,6 @@ package stream
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -49,7 +48,6 @@ const (
|
|||||||
|
|
||||||
// Enumerate options for syncing and retrieval
|
// Enumerate options for syncing and retrieval
|
||||||
type SyncingOption int
|
type SyncingOption int
|
||||||
type RetrievalOption int
|
|
||||||
|
|
||||||
// Syncing options
|
// Syncing options
|
||||||
const (
|
const (
|
||||||
@ -61,17 +59,6 @@ const (
|
|||||||
SyncingAutoSubscribe
|
SyncingAutoSubscribe
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
// Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only)
|
|
||||||
RetrievalDisabled RetrievalOption = iota
|
|
||||||
// Only the client side of the retrieve request is registered.
|
|
||||||
// (light nodes do not serve retrieve requests)
|
|
||||||
// once the client is registered, subscription to retrieve request stream is always sent
|
|
||||||
RetrievalClientOnly
|
|
||||||
// Both client and server funcs are registered, subscribe sent automatically
|
|
||||||
RetrievalEnabled
|
|
||||||
)
|
|
||||||
|
|
||||||
// subscriptionFunc is used to determine what to do in order to perform subscriptions
|
// subscriptionFunc is used to determine what to do in order to perform subscriptions
|
||||||
// usually we would start to really subscribe to nodes, but for tests other functionality may be needed
|
// usually we would start to really subscribe to nodes, but for tests other functionality may be needed
|
||||||
// (see TestRequestPeerSubscriptions in streamer_test.go)
|
// (see TestRequestPeerSubscriptions in streamer_test.go)
|
||||||
@ -79,59 +66,58 @@ var subscriptionFunc = doRequestSubscription
|
|||||||
|
|
||||||
// Registry registry for outgoing and incoming streamer constructors
|
// Registry registry for outgoing and incoming streamer constructors
|
||||||
type Registry struct {
|
type Registry struct {
|
||||||
addr enode.ID
|
addr enode.ID
|
||||||
api *API
|
api *API
|
||||||
skipCheck bool
|
skipCheck bool
|
||||||
clientMu sync.RWMutex
|
clientMu sync.RWMutex
|
||||||
serverMu sync.RWMutex
|
serverMu sync.RWMutex
|
||||||
peersMu sync.RWMutex
|
peersMu sync.RWMutex
|
||||||
serverFuncs map[string]func(*Peer, string, bool) (Server, error)
|
serverFuncs map[string]func(*Peer, string, bool) (Server, error)
|
||||||
clientFuncs map[string]func(*Peer, string, bool) (Client, error)
|
clientFuncs map[string]func(*Peer, string, bool) (Client, error)
|
||||||
peers map[enode.ID]*Peer
|
peers map[enode.ID]*Peer
|
||||||
delivery *Delivery
|
delivery *Delivery
|
||||||
intervalsStore state.Store
|
intervalsStore state.Store
|
||||||
autoRetrieval bool // automatically subscribe to retrieve request stream
|
maxPeerServers int
|
||||||
maxPeerServers int
|
spec *protocols.Spec //this protocol's spec
|
||||||
spec *protocols.Spec //this protocol's spec
|
balance protocols.Balance //implements protocols.Balance, for accounting
|
||||||
balance protocols.Balance //implements protocols.Balance, for accounting
|
prices protocols.Prices //implements protocols.Prices, provides prices to accounting
|
||||||
prices protocols.Prices //implements protocols.Prices, provides prices to accounting
|
quit chan struct{} // terminates registry goroutines
|
||||||
quit chan struct{} // terminates registry goroutines
|
syncMode SyncingOption
|
||||||
|
syncUpdateDelay time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegistryOptions holds optional values for NewRegistry constructor.
|
// RegistryOptions holds optional values for NewRegistry constructor.
|
||||||
type RegistryOptions struct {
|
type RegistryOptions struct {
|
||||||
SkipCheck bool
|
SkipCheck bool
|
||||||
Syncing SyncingOption // Defines syncing behavior
|
Syncing SyncingOption // Defines syncing behavior
|
||||||
Retrieval RetrievalOption // Defines retrieval behavior
|
|
||||||
SyncUpdateDelay time.Duration
|
SyncUpdateDelay time.Duration
|
||||||
MaxPeerServers int // The limit of servers for each peer in registry
|
MaxPeerServers int // The limit of servers for each peer in registry
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRegistry is Streamer constructor
|
// NewRegistry is Streamer constructor
|
||||||
func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry {
|
func NewRegistry(localID enode.ID, delivery *Delivery, netStore *storage.NetStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry {
|
||||||
if options == nil {
|
if options == nil {
|
||||||
options = &RegistryOptions{}
|
options = &RegistryOptions{}
|
||||||
}
|
}
|
||||||
if options.SyncUpdateDelay <= 0 {
|
if options.SyncUpdateDelay <= 0 {
|
||||||
options.SyncUpdateDelay = 15 * time.Second
|
options.SyncUpdateDelay = 15 * time.Second
|
||||||
}
|
}
|
||||||
// check if retrieval has been disabled
|
|
||||||
retrieval := options.Retrieval != RetrievalDisabled
|
|
||||||
|
|
||||||
quit := make(chan struct{})
|
quit := make(chan struct{})
|
||||||
|
|
||||||
streamer := &Registry{
|
streamer := &Registry{
|
||||||
addr: localID,
|
addr: localID,
|
||||||
skipCheck: options.SkipCheck,
|
skipCheck: options.SkipCheck,
|
||||||
serverFuncs: make(map[string]func(*Peer, string, bool) (Server, error)),
|
serverFuncs: make(map[string]func(*Peer, string, bool) (Server, error)),
|
||||||
clientFuncs: make(map[string]func(*Peer, string, bool) (Client, error)),
|
clientFuncs: make(map[string]func(*Peer, string, bool) (Client, error)),
|
||||||
peers: make(map[enode.ID]*Peer),
|
peers: make(map[enode.ID]*Peer),
|
||||||
delivery: delivery,
|
delivery: delivery,
|
||||||
intervalsStore: intervalsStore,
|
intervalsStore: intervalsStore,
|
||||||
autoRetrieval: retrieval,
|
maxPeerServers: options.MaxPeerServers,
|
||||||
maxPeerServers: options.MaxPeerServers,
|
balance: balance,
|
||||||
balance: balance,
|
quit: quit,
|
||||||
quit: quit,
|
syncUpdateDelay: options.SyncUpdateDelay,
|
||||||
|
syncMode: options.Syncing,
|
||||||
}
|
}
|
||||||
|
|
||||||
streamer.setupSpec()
|
streamer.setupSpec()
|
||||||
@ -139,124 +125,10 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
|
|||||||
streamer.api = NewAPI(streamer)
|
streamer.api = NewAPI(streamer)
|
||||||
delivery.getPeer = streamer.getPeer
|
delivery.getPeer = streamer.getPeer
|
||||||
|
|
||||||
// if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only)
|
|
||||||
if options.Retrieval == RetrievalEnabled {
|
|
||||||
streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, live bool) (Server, error) {
|
|
||||||
if !live {
|
|
||||||
return nil, errors.New("only live retrieval requests supported")
|
|
||||||
}
|
|
||||||
return NewSwarmChunkServer(delivery.chunkStore), nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests)
|
|
||||||
if options.Retrieval != RetrievalDisabled {
|
|
||||||
streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
|
|
||||||
return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// If syncing is not disabled, the syncing functions are registered (both client and server)
|
// If syncing is not disabled, the syncing functions are registered (both client and server)
|
||||||
if options.Syncing != SyncingDisabled {
|
if options.Syncing != SyncingDisabled {
|
||||||
RegisterSwarmSyncerServer(streamer, syncChunkStore)
|
RegisterSwarmSyncerServer(streamer, netStore)
|
||||||
RegisterSwarmSyncerClient(streamer, syncChunkStore)
|
RegisterSwarmSyncerClient(streamer, netStore)
|
||||||
}
|
|
||||||
|
|
||||||
// if syncing is set to automatically subscribe to the syncing stream, start the subscription process
|
|
||||||
if options.Syncing == SyncingAutoSubscribe {
|
|
||||||
// latestIntC function ensures that
|
|
||||||
// - receiving from the in chan is not blocked by processing inside the for loop
|
|
||||||
// - the latest int value is delivered to the loop after the processing is done
|
|
||||||
// In context of NeighbourhoodDepthC:
|
|
||||||
// after the syncing is done updating inside the loop, we do not need to update on the intermediate
|
|
||||||
// depth changes, only to the latest one
|
|
||||||
latestIntC := func(in <-chan int) <-chan int {
|
|
||||||
out := make(chan int, 1)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(out)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case i, ok := <-in:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-out:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
out <- i
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
kad := streamer.delivery.kad
|
|
||||||
// get notification channels from Kademlia before returning
|
|
||||||
// from this function to avoid race with Close method and
|
|
||||||
// the goroutine created below
|
|
||||||
depthC := latestIntC(kad.NeighbourhoodDepthC())
|
|
||||||
addressBookSizeC := latestIntC(kad.AddrCountC())
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// wait for kademlia table to be healthy
|
|
||||||
// but return if Registry is closed before
|
|
||||||
select {
|
|
||||||
case <-time.After(options.SyncUpdateDelay):
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// initial requests for syncing subscription to peers
|
|
||||||
streamer.updateSyncing()
|
|
||||||
|
|
||||||
for depth := range depthC {
|
|
||||||
log.Debug("Kademlia neighbourhood depth change", "depth", depth)
|
|
||||||
|
|
||||||
// Prevent too early sync subscriptions by waiting until there are no
|
|
||||||
// new peers connecting. Sync streams updating will be done after no
|
|
||||||
// peers are connected for at least SyncUpdateDelay period.
|
|
||||||
timer := time.NewTimer(options.SyncUpdateDelay)
|
|
||||||
// Hard limit to sync update delay, preventing long delays
|
|
||||||
// on a very dynamic network
|
|
||||||
maxTimer := time.NewTimer(3 * time.Minute)
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-maxTimer.C:
|
|
||||||
// force syncing update when a hard timeout is reached
|
|
||||||
log.Trace("Sync subscriptions update on hard timeout")
|
|
||||||
// request for syncing subscription to new peers
|
|
||||||
streamer.updateSyncing()
|
|
||||||
break loop
|
|
||||||
case <-timer.C:
|
|
||||||
// start syncing as no new peers has been added to kademlia
|
|
||||||
// for some time
|
|
||||||
log.Trace("Sync subscriptions update")
|
|
||||||
// request for syncing subscription to new peers
|
|
||||||
streamer.updateSyncing()
|
|
||||||
break loop
|
|
||||||
case size := <-addressBookSizeC:
|
|
||||||
log.Trace("Kademlia address book size changed on depth change", "size", size)
|
|
||||||
// new peers has been added to kademlia,
|
|
||||||
// reset the timer to prevent early sync subscriptions
|
|
||||||
if !timer.Stop() {
|
|
||||||
<-timer.C
|
|
||||||
}
|
|
||||||
timer.Reset(options.SyncUpdateDelay)
|
|
||||||
case <-quit:
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
timer.Stop()
|
|
||||||
maxTimer.Stop()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return streamer
|
return streamer
|
||||||
@ -381,7 +253,7 @@ func (r *Registry) Subscribe(peerId enode.ID, s Stream, h *Range, priority uint8
|
|||||||
}
|
}
|
||||||
log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
|
log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
|
||||||
|
|
||||||
return peer.SendPriority(context.TODO(), msg, priority)
|
return peer.Send(context.TODO(), msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Registry) Unsubscribe(peerId enode.ID, s Stream) error {
|
func (r *Registry) Unsubscribe(peerId enode.ID, s Stream) error {
|
||||||
@ -422,8 +294,7 @@ func (r *Registry) Quit(peerId enode.ID, s Stream) error {
|
|||||||
func (r *Registry) Close() error {
|
func (r *Registry) Close() error {
|
||||||
// Stop sending neighborhood depth change and address count
|
// Stop sending neighborhood depth change and address count
|
||||||
// change from Kademlia that were initiated in NewRegistry constructor.
|
// change from Kademlia that were initiated in NewRegistry constructor.
|
||||||
r.delivery.kad.CloseNeighbourhoodDepthC()
|
r.delivery.Close()
|
||||||
r.delivery.kad.CloseAddrCountC()
|
|
||||||
close(r.quit)
|
close(r.quit)
|
||||||
return r.intervalsStore.Close()
|
return r.intervalsStore.Close()
|
||||||
}
|
}
|
||||||
@ -438,6 +309,7 @@ func (r *Registry) getPeer(peerId enode.ID) *Peer {
|
|||||||
func (r *Registry) setPeer(peer *Peer) {
|
func (r *Registry) setPeer(peer *Peer) {
|
||||||
r.peersMu.Lock()
|
r.peersMu.Lock()
|
||||||
r.peers[peer.ID()] = peer
|
r.peers[peer.ID()] = peer
|
||||||
|
metrics.GetOrRegisterCounter("registry.setpeer", nil).Inc(1)
|
||||||
metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
|
metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
|
||||||
r.peersMu.Unlock()
|
r.peersMu.Unlock()
|
||||||
}
|
}
|
||||||
@ -445,6 +317,7 @@ func (r *Registry) setPeer(peer *Peer) {
|
|||||||
func (r *Registry) deletePeer(peer *Peer) {
|
func (r *Registry) deletePeer(peer *Peer) {
|
||||||
r.peersMu.Lock()
|
r.peersMu.Lock()
|
||||||
delete(r.peers, peer.ID())
|
delete(r.peers, peer.ID())
|
||||||
|
metrics.GetOrRegisterCounter("registry.deletepeer", nil).Inc(1)
|
||||||
metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
|
metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
|
||||||
r.peersMu.Unlock()
|
r.peersMu.Unlock()
|
||||||
}
|
}
|
||||||
@ -458,132 +331,31 @@ func (r *Registry) peersCount() (c int) {
|
|||||||
|
|
||||||
// Run protocol run function
|
// Run protocol run function
|
||||||
func (r *Registry) Run(p *network.BzzPeer) error {
|
func (r *Registry) Run(p *network.BzzPeer) error {
|
||||||
sp := NewPeer(p.Peer, r)
|
sp := NewPeer(p, r)
|
||||||
r.setPeer(sp)
|
r.setPeer(sp)
|
||||||
|
|
||||||
|
if r.syncMode == SyncingAutoSubscribe {
|
||||||
|
go sp.runUpdateSyncing()
|
||||||
|
}
|
||||||
|
|
||||||
defer r.deletePeer(sp)
|
defer r.deletePeer(sp)
|
||||||
defer close(sp.quit)
|
defer close(sp.quit)
|
||||||
defer sp.close()
|
defer sp.close()
|
||||||
|
|
||||||
if r.autoRetrieval && !p.LightNode {
|
|
||||||
err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", true), nil, Top)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return sp.Run(sp.HandleMsg)
|
return sp.Run(sp.HandleMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateSyncing subscribes to SYNC streams by iterating over the
|
|
||||||
// kademlia connections and bins. If there are existing SYNC streams
|
|
||||||
// and they are no longer required after iteration, request to Quit
|
|
||||||
// them will be send to appropriate peers.
|
|
||||||
func (r *Registry) updateSyncing() {
|
|
||||||
kad := r.delivery.kad
|
|
||||||
// map of all SYNC streams for all peers
|
|
||||||
// used at the and of the function to remove servers
|
|
||||||
// that are not needed anymore
|
|
||||||
subs := make(map[enode.ID]map[Stream]struct{})
|
|
||||||
r.peersMu.RLock()
|
|
||||||
for id, peer := range r.peers {
|
|
||||||
peer.serverMu.RLock()
|
|
||||||
for stream := range peer.servers {
|
|
||||||
if stream.Name == "SYNC" {
|
|
||||||
if _, ok := subs[id]; !ok {
|
|
||||||
subs[id] = make(map[Stream]struct{})
|
|
||||||
}
|
|
||||||
subs[id][stream] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
peer.serverMu.RUnlock()
|
|
||||||
}
|
|
||||||
r.peersMu.RUnlock()
|
|
||||||
|
|
||||||
// start requesting subscriptions from peers
|
|
||||||
r.requestPeerSubscriptions(kad, subs)
|
|
||||||
|
|
||||||
// remove SYNC servers that do not need to be subscribed
|
|
||||||
for id, streams := range subs {
|
|
||||||
if len(streams) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
peer := r.getPeer(id)
|
|
||||||
if peer == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for stream := range streams {
|
|
||||||
log.Debug("Remove sync server", "peer", id, "stream", stream)
|
|
||||||
err := r.Quit(peer.ID(), stream)
|
|
||||||
if err != nil && err != p2p.ErrShuttingDown {
|
|
||||||
log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// requestPeerSubscriptions calls on each live peer in the kademlia table
|
|
||||||
// and sends a `RequestSubscription` to peers according to their bin
|
|
||||||
// and their relationship with kademlia's depth.
|
|
||||||
// Also check `TestRequestPeerSubscriptions` in order to understand the
|
|
||||||
// expected behavior.
|
|
||||||
// The function expects:
|
|
||||||
// * the kademlia
|
|
||||||
// * a map of subscriptions
|
|
||||||
// * the actual function to subscribe
|
|
||||||
// (in case of the test, it doesn't do real subscriptions)
|
|
||||||
func (r *Registry) requestPeerSubscriptions(kad *network.Kademlia, subs map[enode.ID]map[Stream]struct{}) {
|
|
||||||
|
|
||||||
var startPo int
|
|
||||||
var endPo int
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
// kademlia's depth
|
|
||||||
kadDepth := kad.NeighbourhoodDepth()
|
|
||||||
// request subscriptions for all nodes and bins
|
|
||||||
// nil as base takes the node's base; we need to pass 255 as `EachConn` runs
|
|
||||||
// from deepest bins backwards
|
|
||||||
kad.EachConn(nil, 255, func(p *network.Peer, po int) bool {
|
|
||||||
// nodes that do not provide stream protocol
|
|
||||||
// should not be subscribed, e.g. bootnodes
|
|
||||||
if !p.HasCap("stream") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
//if the peer's bin is shallower than the kademlia depth,
|
|
||||||
//only the peer's bin should be subscribed
|
|
||||||
if po < kadDepth {
|
|
||||||
startPo = po
|
|
||||||
endPo = po
|
|
||||||
} else {
|
|
||||||
//if the peer's bin is equal or deeper than the kademlia depth,
|
|
||||||
//each bin from the depth up to k.MaxProxDisplay should be subscribed
|
|
||||||
startPo = kadDepth
|
|
||||||
endPo = kad.MaxProxDisplay
|
|
||||||
}
|
|
||||||
|
|
||||||
for bin := startPo; bin <= endPo; bin++ {
|
|
||||||
//do the actual subscription
|
|
||||||
ok = subscriptionFunc(r, p, uint8(bin), subs)
|
|
||||||
}
|
|
||||||
return ok
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// doRequestSubscription sends the actual RequestSubscription to the peer
|
// doRequestSubscription sends the actual RequestSubscription to the peer
|
||||||
func doRequestSubscription(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
|
func doRequestSubscription(r *Registry, id enode.ID, bin uint8) error {
|
||||||
log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", p.ID(), "bin", bin)
|
log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", id, "bin", bin)
|
||||||
// bin is always less then 256 and it is safe to convert it to type uint8
|
// bin is always less then 256 and it is safe to convert it to type uint8
|
||||||
stream := NewStream("SYNC", FormatSyncBinKey(bin), true)
|
stream := NewStream("SYNC", FormatSyncBinKey(bin), true)
|
||||||
if streams, ok := subs[p.ID()]; ok {
|
err := r.RequestSubscription(id, stream, NewRange(0, 0), High)
|
||||||
// delete live and history streams from the map, so that it won't be removed with a Quit request
|
|
||||||
delete(streams, stream)
|
|
||||||
delete(streams, getHistoryStream(stream))
|
|
||||||
}
|
|
||||||
err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
|
log.Debug("Request subscription", "err", err, "peer", id, "stream", stream)
|
||||||
return false
|
return err
|
||||||
}
|
}
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||||
@ -619,24 +391,66 @@ func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
|
|||||||
return p.handleUnsubscribeMsg(msg)
|
return p.handleUnsubscribeMsg(msg)
|
||||||
|
|
||||||
case *OfferedHashesMsg:
|
case *OfferedHashesMsg:
|
||||||
return p.handleOfferedHashesMsg(ctx, msg)
|
go func() {
|
||||||
|
err := p.handleOfferedHashesMsg(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err.Error())
|
||||||
|
p.Drop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
|
||||||
case *TakeoverProofMsg:
|
case *TakeoverProofMsg:
|
||||||
return p.handleTakeoverProofMsg(ctx, msg)
|
go func() {
|
||||||
|
err := p.handleTakeoverProofMsg(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err.Error())
|
||||||
|
p.Drop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
|
||||||
case *WantedHashesMsg:
|
case *WantedHashesMsg:
|
||||||
return p.handleWantedHashesMsg(ctx, msg)
|
go func() {
|
||||||
|
err := p.handleWantedHashesMsg(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err.Error())
|
||||||
|
p.Drop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
|
||||||
case *ChunkDeliveryMsgRetrieval:
|
case *ChunkDeliveryMsgRetrieval:
|
||||||
// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
|
// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
|
||||||
return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
|
go func() {
|
||||||
|
err := p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err.Error())
|
||||||
|
p.Drop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
|
||||||
case *ChunkDeliveryMsgSyncing:
|
case *ChunkDeliveryMsgSyncing:
|
||||||
// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
|
// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
|
||||||
return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
|
go func() {
|
||||||
|
err := p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err.Error())
|
||||||
|
p.Drop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
|
||||||
case *RetrieveRequestMsg:
|
case *RetrieveRequestMsg:
|
||||||
return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg)
|
go func() {
|
||||||
|
err := p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err.Error())
|
||||||
|
p.Drop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
|
||||||
case *RequestSubscriptionMsg:
|
case *RequestSubscriptionMsg:
|
||||||
return p.handleRequestSubscription(ctx, msg)
|
return p.handleRequestSubscription(ctx, msg)
|
||||||
@ -767,7 +581,7 @@ func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil {
|
if err := p.Send(context.TODO(), tp); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if c.to > 0 && tp.Takeover.End >= c.to {
|
if c.to > 0 && tp.Takeover.End >= c.to {
|
||||||
@ -969,15 +783,13 @@ func (api *API) UnsubscribeStream(peerId enode.ID, s Stream) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GetPeerSubscriptions is a API function which allows to query a peer for stream subscriptions it has.
|
GetPeerServerSubscriptions is a API function which allows to query a peer for stream subscriptions it has.
|
||||||
It can be called via RPC.
|
It can be called via RPC.
|
||||||
It returns a map of node IDs with an array of string representations of Stream objects.
|
It returns a map of node IDs with an array of string representations of Stream objects.
|
||||||
*/
|
*/
|
||||||
func (api *API) GetPeerSubscriptions() map[string][]string {
|
func (api *API) GetPeerServerSubscriptions() map[string][]string {
|
||||||
//create the empty map
|
|
||||||
pstreams := make(map[string][]string)
|
pstreams := make(map[string][]string)
|
||||||
|
|
||||||
//iterate all streamer peers
|
|
||||||
api.streamer.peersMu.RLock()
|
api.streamer.peersMu.RLock()
|
||||||
defer api.streamer.peersMu.RUnlock()
|
defer api.streamer.peersMu.RUnlock()
|
||||||
|
|
||||||
|
@ -28,9 +28,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
@ -39,6 +36,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -539,7 +537,7 @@ func TestStreamerDownstreamCorruptHashesMsgExchange(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedError := errors.New("Message handler error: (msg code 1): error invalid hashes length (len: 40)")
|
expectedError := errors.New("subprotocol error")
|
||||||
if err := tester.TestDisconnected(&p2ptest.Disconnect{Peer: node.ID(), Error: expectedError}); err != nil {
|
if err := tester.TestDisconnected(&p2ptest.Disconnect{Peer: node.ID(), Error: expectedError}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -779,7 +777,6 @@ func TestStreamerRequestSubscriptionQuitMsgExchange(t *testing.T) {
|
|||||||
func TestMaxPeerServersWithUnsubscribe(t *testing.T) {
|
func TestMaxPeerServersWithUnsubscribe(t *testing.T) {
|
||||||
var maxPeerServers = 6
|
var maxPeerServers = 6
|
||||||
tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
|
tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
|
||||||
Syncing: SyncingDisabled,
|
Syncing: SyncingDisabled,
|
||||||
MaxPeerServers: maxPeerServers,
|
MaxPeerServers: maxPeerServers,
|
||||||
})
|
})
|
||||||
@ -940,8 +937,7 @@ func TestMaxPeerServersWithoutUnsubscribe(t *testing.T) {
|
|||||||
//`Price` interface implementation
|
//`Price` interface implementation
|
||||||
func TestHasPriceImplementation(t *testing.T) {
|
func TestHasPriceImplementation(t *testing.T) {
|
||||||
_, r, _, teardown, err := newStreamerTester(&RegistryOptions{
|
_, r, _, teardown, err := newStreamerTester(&RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
Syncing: SyncingDisabled,
|
||||||
Syncing: SyncingDisabled,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -967,164 +963,8 @@ func TestHasPriceImplementation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// TestGetServerSubscriptions is a unit test for the api.GetPeerServerSubscriptions() function
|
||||||
TestRequestPeerSubscriptions is a unit test for stream's pull sync subscriptions.
|
func TestGetServerSubscriptions(t *testing.T) {
|
||||||
|
|
||||||
The test does:
|
|
||||||
* assign each connected peer to a bin map
|
|
||||||
* build up a known kademlia in advance
|
|
||||||
* run the EachConn function, which returns supposed subscription bins
|
|
||||||
* store all supposed bins per peer in a map
|
|
||||||
* check that all peers have the expected subscriptions
|
|
||||||
|
|
||||||
This kad table and its peers are copied from network.TestKademliaCase1,
|
|
||||||
it represents an edge case but for the purpose of testing the
|
|
||||||
syncing subscriptions it is just fine.
|
|
||||||
|
|
||||||
Addresses used in this test are discovered as part of the simulation network
|
|
||||||
in higher level tests for streaming. They were generated randomly.
|
|
||||||
|
|
||||||
The resulting kademlia looks like this:
|
|
||||||
=========================================================================
|
|
||||||
Fri Dec 21 20:02:39 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1
|
|
||||||
population: 12 (12), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
|
|
||||||
000 2 8196 835f | 2 8196 (0) 835f (0)
|
|
||||||
001 2 2690 28f0 | 2 2690 (0) 28f0 (0)
|
|
||||||
002 2 4d72 4a45 | 2 4d72 (0) 4a45 (0)
|
|
||||||
003 1 646e | 1 646e (0)
|
|
||||||
004 3 769c 76d1 7656 | 3 769c (0) 76d1 (0) 7656 (0)
|
|
||||||
============ DEPTH: 5 ==========================================
|
|
||||||
005 1 7a48 | 1 7a48 (0)
|
|
||||||
006 1 7cbd | 1 7cbd (0)
|
|
||||||
007 0 | 0
|
|
||||||
008 0 | 0
|
|
||||||
009 0 | 0
|
|
||||||
010 0 | 0
|
|
||||||
011 0 | 0
|
|
||||||
012 0 | 0
|
|
||||||
013 0 | 0
|
|
||||||
014 0 | 0
|
|
||||||
015 0 | 0
|
|
||||||
=========================================================================
|
|
||||||
*/
|
|
||||||
func TestRequestPeerSubscriptions(t *testing.T) {
|
|
||||||
// the pivot address; this is the actual kademlia node
|
|
||||||
pivotAddr := "7efef1c41d77f843ad167be95f6660567eb8a4a59f39240000cce2e0d65baf8e"
|
|
||||||
|
|
||||||
// a map of bin number to addresses from the given kademlia
|
|
||||||
binMap := make(map[int][]string)
|
|
||||||
binMap[0] = []string{
|
|
||||||
"835fbbf1d16ba7347b6e2fc552d6e982148d29c624ea20383850df3c810fa8fc",
|
|
||||||
"81968a2d8fb39114342ee1da85254ec51e0608d7f0f6997c2a8354c260a71009",
|
|
||||||
}
|
|
||||||
binMap[1] = []string{
|
|
||||||
"28f0bc1b44658548d6e05dd16d4c2fe77f1da5d48b6774bc4263b045725d0c19",
|
|
||||||
"2690a910c33ee37b91eb6c4e0731d1d345e2dc3b46d308503a6e85bbc242c69e",
|
|
||||||
}
|
|
||||||
binMap[2] = []string{
|
|
||||||
"4a45f1fc63e1a9cb9dfa44c98da2f3d20c2923e5d75ff60b2db9d1bdb0c54d51",
|
|
||||||
"4d72a04ddeb851a68cd197ef9a92a3e2ff01fbbff638e64929dd1a9c2e150112",
|
|
||||||
}
|
|
||||||
binMap[3] = []string{
|
|
||||||
"646e9540c84f6a2f9cf6585d45a4c219573b4fd1b64a3c9a1386fc5cf98c0d4d",
|
|
||||||
}
|
|
||||||
binMap[4] = []string{
|
|
||||||
"7656caccdc79cd8d7ce66d415cc96a718e8271c62fb35746bfc2b49faf3eebf3",
|
|
||||||
"76d1e83c71ca246d042e37ff1db181f2776265fbcfdc890ce230bfa617c9c2f0",
|
|
||||||
"769ce86aa90b518b7ed382f9fdacfbed93574e18dc98fe6c342e4f9f409c2d5a",
|
|
||||||
}
|
|
||||||
binMap[5] = []string{
|
|
||||||
"7a48f75f8ca60487ae42d6f92b785581b40b91f2da551ae73d5eae46640e02e8",
|
|
||||||
}
|
|
||||||
binMap[6] = []string{
|
|
||||||
"7cbd42350bde8e18ae5b955b5450f8e2cef3419f92fbf5598160c60fd78619f0",
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the pivot's kademlia
|
|
||||||
addr := common.FromHex(pivotAddr)
|
|
||||||
k := network.NewKademlia(addr, network.NewKadParams())
|
|
||||||
|
|
||||||
// construct the peers and the kademlia
|
|
||||||
for _, binaddrs := range binMap {
|
|
||||||
for _, a := range binaddrs {
|
|
||||||
addr := common.FromHex(a)
|
|
||||||
k.On(network.NewPeer(&network.BzzPeer{BzzAddr: &network.BzzAddr{OAddr: addr}}, k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: check kad table is same
|
|
||||||
// currently k.String() prints date so it will never be the same :)
|
|
||||||
// --> implement JSON representation of kad table
|
|
||||||
log.Debug(k.String())
|
|
||||||
|
|
||||||
// simulate that we would do subscriptions: just store the bin numbers
|
|
||||||
fakeSubscriptions := make(map[string][]int)
|
|
||||||
//after the test, we need to reset the subscriptionFunc to the default
|
|
||||||
defer func() { subscriptionFunc = doRequestSubscription }()
|
|
||||||
// define the function which should run for each connection
|
|
||||||
// instead of doing real subscriptions, we just store the bin numbers
|
|
||||||
subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
|
|
||||||
// get the peer ID
|
|
||||||
peerstr := fmt.Sprintf("%x", p.Over())
|
|
||||||
// create the array of bins per peer
|
|
||||||
if _, ok := fakeSubscriptions[peerstr]; !ok {
|
|
||||||
fakeSubscriptions[peerstr] = make([]int, 0)
|
|
||||||
}
|
|
||||||
// store the (fake) bin subscription
|
|
||||||
log.Debug(fmt.Sprintf("Adding fake subscription for peer %s with bin %d", peerstr, bin))
|
|
||||||
fakeSubscriptions[peerstr] = append(fakeSubscriptions[peerstr], int(bin))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// create just a simple Registry object in order to be able to call...
|
|
||||||
r := &Registry{}
|
|
||||||
r.requestPeerSubscriptions(k, nil)
|
|
||||||
// calculate the kademlia depth
|
|
||||||
kdepth := k.NeighbourhoodDepth()
|
|
||||||
|
|
||||||
// now, check that all peers have the expected (fake) subscriptions
|
|
||||||
// iterate the bin map
|
|
||||||
for bin, peers := range binMap {
|
|
||||||
// for every peer...
|
|
||||||
for _, peer := range peers {
|
|
||||||
// ...get its (fake) subscriptions
|
|
||||||
fakeSubsForPeer := fakeSubscriptions[peer]
|
|
||||||
// if the peer's bin is shallower than the kademlia depth...
|
|
||||||
if bin < kdepth {
|
|
||||||
// (iterate all (fake) subscriptions)
|
|
||||||
for _, subbin := range fakeSubsForPeer {
|
|
||||||
// ...only the peer's bin should be "subscribed"
|
|
||||||
// (and thus have only one subscription)
|
|
||||||
if subbin != bin || len(fakeSubsForPeer) != 1 {
|
|
||||||
t.Fatalf("Did not get expected subscription for bin < depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else { //if the peer's bin is equal or higher than the kademlia depth...
|
|
||||||
// (iterate all (fake) subscriptions)
|
|
||||||
for i, subbin := range fakeSubsForPeer {
|
|
||||||
// ...each bin from the peer's bin number up to k.MaxProxDisplay should be "subscribed"
|
|
||||||
// as we start from depth we can use the iteration index to check
|
|
||||||
if subbin != i+kdepth {
|
|
||||||
t.Fatalf("Did not get expected subscription for bin > depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
|
|
||||||
}
|
|
||||||
// the last "subscription" should be k.MaxProxDisplay
|
|
||||||
if i == len(fakeSubsForPeer)-1 && subbin != k.MaxProxDisplay {
|
|
||||||
t.Fatalf("Expected last subscription to be: %d, but is: %d", k.MaxProxDisplay, subbin)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// print some output
|
|
||||||
for p, subs := range fakeSubscriptions {
|
|
||||||
log.Debug(fmt.Sprintf("Peer %s has the following fake subscriptions: ", p))
|
|
||||||
for _, bin := range subs {
|
|
||||||
log.Debug(fmt.Sprintf("%d,", bin))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGetSubscriptions is a unit test for the api.GetPeerSubscriptions() function
|
|
||||||
func TestGetSubscriptions(t *testing.T) {
|
|
||||||
// create an amount of dummy peers
|
// create an amount of dummy peers
|
||||||
testPeerCount := 8
|
testPeerCount := 8
|
||||||
// every peer will have this amount of dummy servers
|
// every peer will have this amount of dummy servers
|
||||||
@ -1135,7 +975,7 @@ func TestGetSubscriptions(t *testing.T) {
|
|||||||
r := &Registry{}
|
r := &Registry{}
|
||||||
api := NewAPI(r)
|
api := NewAPI(r)
|
||||||
// call once, at this point should be empty
|
// call once, at this point should be empty
|
||||||
regs := api.GetPeerSubscriptions()
|
regs := api.GetPeerServerSubscriptions()
|
||||||
if len(regs) != 0 {
|
if len(regs) != 0 {
|
||||||
t.Fatal("Expected subscription count to be 0, but it is not")
|
t.Fatal("Expected subscription count to be 0, but it is not")
|
||||||
}
|
}
|
||||||
@ -1159,7 +999,7 @@ func TestGetSubscriptions(t *testing.T) {
|
|||||||
r.peers = peerMap
|
r.peers = peerMap
|
||||||
|
|
||||||
// call the subscriptions again
|
// call the subscriptions again
|
||||||
regs = api.GetPeerSubscriptions()
|
regs = api.GetPeerServerSubscriptions()
|
||||||
// count how many (fake) subscriptions there are
|
// count how many (fake) subscriptions there are
|
||||||
cnt := 0
|
cnt := 0
|
||||||
for _, reg := range regs {
|
for _, reg := range regs {
|
||||||
@ -1175,11 +1015,11 @@ func TestGetSubscriptions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
TestGetSubscriptionsRPC sets up a simulation network of `nodeCount` nodes,
|
TestGetServerSubscriptionsRPC sets up a simulation network of `nodeCount` nodes,
|
||||||
starts the simulation, waits for SyncUpdateDelay in order to kick off
|
starts the simulation, waits for SyncUpdateDelay in order to kick off
|
||||||
stream registration, then tests that there are subscriptions.
|
stream registration, then tests that there are subscriptions.
|
||||||
*/
|
*/
|
||||||
func TestGetSubscriptionsRPC(t *testing.T) {
|
func TestGetServerSubscriptionsRPC(t *testing.T) {
|
||||||
|
|
||||||
if testutil.RaceEnabled && os.Getenv("TRAVIS") == "true" {
|
if testutil.RaceEnabled && os.Getenv("TRAVIS") == "true" {
|
||||||
t.Skip("flaky with -race on Travis")
|
t.Skip("flaky with -race on Travis")
|
||||||
@ -1206,15 +1046,13 @@ func TestGetSubscriptionsRPC(t *testing.T) {
|
|||||||
defer func() { subscriptionFunc = doRequestSubscription }()
|
defer func() { subscriptionFunc = doRequestSubscription }()
|
||||||
|
|
||||||
// we use this subscriptionFunc for this test: just increases count and calls the actual subscription
|
// we use this subscriptionFunc for this test: just increases count and calls the actual subscription
|
||||||
subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
|
subscriptionFunc = func(r *Registry, id enode.ID, bin uint8) error {
|
||||||
// syncing starts after syncUpdateDelay and loops after that Duration; we only want to count at the first iteration
|
// syncing starts after syncUpdateDelay and loops after that Duration; we only want to count at the first iteration
|
||||||
// in the first iteration, subs will be empty (no existing subscriptions), thus we can use this check
|
// in the first iteration, subs will be empty (no existing subscriptions), thus we can use this check
|
||||||
// this avoids flakyness
|
// this avoids flakyness
|
||||||
if len(subs) == 0 {
|
expectedMsgCount.inc()
|
||||||
expectedMsgCount.inc()
|
doRequestSubscription(r, id, bin)
|
||||||
}
|
return nil
|
||||||
doRequestSubscription(r, p, bin, subs)
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
// create a standard sim
|
// create a standard sim
|
||||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||||
@ -1226,7 +1064,6 @@ func TestGetSubscriptionsRPC(t *testing.T) {
|
|||||||
|
|
||||||
// configure so that sync registrations actually happen
|
// configure so that sync registrations actually happen
|
||||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
Retrieval: RetrievalEnabled,
|
|
||||||
Syncing: SyncingAutoSubscribe, //enable sync registrations
|
Syncing: SyncingAutoSubscribe, //enable sync registrations
|
||||||
SyncUpdateDelay: syncUpdateDelay,
|
SyncUpdateDelay: syncUpdateDelay,
|
||||||
}, nil)
|
}, nil)
|
||||||
@ -1321,7 +1158,7 @@ func TestGetSubscriptionsRPC(t *testing.T) {
|
|||||||
|
|
||||||
//ask it for subscriptions
|
//ask it for subscriptions
|
||||||
pstreams := make(map[string][]string)
|
pstreams := make(map[string][]string)
|
||||||
err = client.Call(&pstreams, "stream_getPeerSubscriptions")
|
err = client.Call(&pstreams, "stream_getPeerServerSubscriptions")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("client call stream_getPeerSubscriptions: %v", err)
|
return fmt.Errorf("client call stream_getPeerSubscriptions: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
)
|
)
|
||||||
@ -35,27 +36,29 @@ const (
|
|||||||
// * live request delivery with or without checkback
|
// * live request delivery with or without checkback
|
||||||
// * (live/non-live historical) chunk syncing per proximity bin
|
// * (live/non-live historical) chunk syncing per proximity bin
|
||||||
type SwarmSyncerServer struct {
|
type SwarmSyncerServer struct {
|
||||||
po uint8
|
correlateId string //used for logging
|
||||||
store storage.SyncChunkStore
|
po uint8
|
||||||
quit chan struct{}
|
netStore *storage.NetStore
|
||||||
|
quit chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSwarmSyncerServer is constructor for SwarmSyncerServer
|
// NewSwarmSyncerServer is constructor for SwarmSyncerServer
|
||||||
func NewSwarmSyncerServer(po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) {
|
func NewSwarmSyncerServer(po uint8, netStore *storage.NetStore, correlateId string) (*SwarmSyncerServer, error) {
|
||||||
return &SwarmSyncerServer{
|
return &SwarmSyncerServer{
|
||||||
po: po,
|
correlateId: correlateId,
|
||||||
store: syncChunkStore,
|
po: po,
|
||||||
quit: make(chan struct{}),
|
netStore: netStore,
|
||||||
|
quit: make(chan struct{}),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore storage.SyncChunkStore) {
|
func RegisterSwarmSyncerServer(streamer *Registry, netStore *storage.NetStore) {
|
||||||
streamer.RegisterServerFunc("SYNC", func(_ *Peer, t string, _ bool) (Server, error) {
|
streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, _ bool) (Server, error) {
|
||||||
po, err := ParseSyncBinKey(t)
|
po, err := ParseSyncBinKey(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewSwarmSyncerServer(po, syncChunkStore)
|
return NewSwarmSyncerServer(po, netStore, p.ID().String()+"|"+string(po))
|
||||||
})
|
})
|
||||||
// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
|
// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
|
||||||
// return NewOutgoingProvableSwarmSyncer(po, db)
|
// return NewOutgoingProvableSwarmSyncer(po, db)
|
||||||
@ -69,130 +72,138 @@ func (s *SwarmSyncerServer) Close() {
|
|||||||
|
|
||||||
// GetData retrieves the actual chunk from netstore
|
// GetData retrieves the actual chunk from netstore
|
||||||
func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
|
func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
|
||||||
chunk, err := s.store.Get(ctx, storage.Address(key))
|
ch, err := s.netStore.Get(ctx, chunk.ModeGetSync, storage.Address(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return chunk.Data(), nil
|
return ch.Data(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SessionIndex returns current storage bin (po) index.
|
// SessionIndex returns current storage bin (po) index.
|
||||||
func (s *SwarmSyncerServer) SessionIndex() (uint64, error) {
|
func (s *SwarmSyncerServer) SessionIndex() (uint64, error) {
|
||||||
return s.store.BinIndex(s.po), nil
|
return s.netStore.LastPullSubscriptionBinID(s.po)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBatch retrieves the next batch of hashes from the dbstore
|
// SetNextBatch retrieves the next batch of hashes from the localstore.
|
||||||
|
// It expects a range of bin IDs, both ends inclusive in syncing, and returns
|
||||||
|
// concatenated byte slice of chunk addresses and bin IDs of the first and
|
||||||
|
// the last one in that slice. The batch may have up to BatchSize number of
|
||||||
|
// chunk addresses. If at least one chunk is added to the batch and no new chunks
|
||||||
|
// are added in batchTimeout period, the batch will be returned. This function
|
||||||
|
// will block until new chunks are received from localstore pull subscription.
|
||||||
func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
|
func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
|
||||||
var batch []byte
|
//TODO: maybe add unit test for intervals usage in netstore/localstore together with SwarmSyncerServer?
|
||||||
i := 0
|
if from > 0 {
|
||||||
|
from--
|
||||||
var ticker *time.Ticker
|
|
||||||
defer func() {
|
|
||||||
if ticker != nil {
|
|
||||||
ticker.Stop()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
var wait bool
|
|
||||||
for {
|
|
||||||
if wait {
|
|
||||||
if ticker == nil {
|
|
||||||
ticker = time.NewTicker(1000 * time.Millisecond)
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
case <-s.quit:
|
|
||||||
return nil, 0, 0, nil, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
|
|
||||||
err := s.store.Iterator(from, to, s.po, func(key storage.Address, idx uint64) bool {
|
|
||||||
select {
|
|
||||||
case <-s.quit:
|
|
||||||
return false
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
batch = append(batch, key[:]...)
|
|
||||||
i++
|
|
||||||
to = idx
|
|
||||||
return i < BatchSize
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, 0, nil, err
|
|
||||||
}
|
|
||||||
if len(batch) > 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
wait = true
|
|
||||||
}
|
}
|
||||||
|
batchStart := time.Now()
|
||||||
|
descriptors, stop := s.netStore.SubscribePull(context.Background(), s.po, from, to)
|
||||||
|
defer stop()
|
||||||
|
|
||||||
log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.store.BinIndex(s.po))
|
const batchTimeout = 2 * time.Second
|
||||||
return batch, from, to, nil, nil
|
|
||||||
|
var (
|
||||||
|
batch []byte
|
||||||
|
batchSize int
|
||||||
|
batchStartID *uint64
|
||||||
|
batchEndID uint64
|
||||||
|
timer *time.Timer
|
||||||
|
timerC <-chan time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
defer func(start time.Time) {
|
||||||
|
metrics.GetOrRegisterResettingTimer("syncer.set-next-batch.total-time", nil).UpdateSince(start)
|
||||||
|
metrics.GetOrRegisterCounter("syncer.set-next-batch.batch-size", nil).Inc(int64(batchSize))
|
||||||
|
if timer != nil {
|
||||||
|
timer.Stop()
|
||||||
|
}
|
||||||
|
}(batchStart)
|
||||||
|
|
||||||
|
for iterate := true; iterate; {
|
||||||
|
select {
|
||||||
|
case d, ok := <-descriptors:
|
||||||
|
if !ok {
|
||||||
|
iterate = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
batch = append(batch, d.Address[:]...)
|
||||||
|
// This is the most naive approach to label the chunk as synced
|
||||||
|
// allowing it to be garbage collected. A proper way requires
|
||||||
|
// validating that the chunk is successfully stored by the peer.
|
||||||
|
err := s.netStore.Set(context.Background(), chunk.ModeSetSync, d.Address)
|
||||||
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter("syncer.set-next-batch.set-sync-err", nil).Inc(1)
|
||||||
|
log.Debug("syncer pull subscription - err setting chunk as synced", "correlateId", s.correlateId, "err", err)
|
||||||
|
return nil, 0, 0, nil, err
|
||||||
|
}
|
||||||
|
batchSize++
|
||||||
|
if batchStartID == nil {
|
||||||
|
// set batch start id only if
|
||||||
|
// this is the first iteration
|
||||||
|
batchStartID = &d.BinID
|
||||||
|
}
|
||||||
|
batchEndID = d.BinID
|
||||||
|
if batchSize >= BatchSize {
|
||||||
|
iterate = false
|
||||||
|
metrics.GetOrRegisterCounter("syncer.set-next-batch.full-batch", nil).Inc(1)
|
||||||
|
log.Debug("syncer pull subscription - batch size reached", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
|
||||||
|
}
|
||||||
|
if timer == nil {
|
||||||
|
timer = time.NewTimer(batchTimeout)
|
||||||
|
} else {
|
||||||
|
log.Debug("syncer pull subscription - stopping timer", "correlateId", s.correlateId)
|
||||||
|
if !timer.Stop() {
|
||||||
|
<-timer.C
|
||||||
|
}
|
||||||
|
log.Debug("syncer pull subscription - channel drained, resetting timer", "correlateId", s.correlateId)
|
||||||
|
timer.Reset(batchTimeout)
|
||||||
|
}
|
||||||
|
timerC = timer.C
|
||||||
|
case <-timerC:
|
||||||
|
// return batch if new chunks are not
|
||||||
|
// received after some time
|
||||||
|
iterate = false
|
||||||
|
metrics.GetOrRegisterCounter("syncer.set-next-batch.timer-expire", nil).Inc(1)
|
||||||
|
log.Debug("syncer pull subscription timer expired", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
|
||||||
|
case <-s.quit:
|
||||||
|
iterate = false
|
||||||
|
log.Debug("syncer pull subscription - quit received", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if batchStartID == nil {
|
||||||
|
// if batch start id is not set, return 0
|
||||||
|
batchStartID = new(uint64)
|
||||||
|
}
|
||||||
|
return batch, *batchStartID, batchEndID, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwarmSyncerClient
|
// SwarmSyncerClient
|
||||||
type SwarmSyncerClient struct {
|
type SwarmSyncerClient struct {
|
||||||
store storage.SyncChunkStore
|
netStore *storage.NetStore
|
||||||
peer *Peer
|
peer *Peer
|
||||||
stream Stream
|
stream Stream
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSwarmSyncerClient is a contructor for provable data exchange syncer
|
// NewSwarmSyncerClient is a contructor for provable data exchange syncer
|
||||||
func NewSwarmSyncerClient(p *Peer, store storage.SyncChunkStore, stream Stream) (*SwarmSyncerClient, error) {
|
func NewSwarmSyncerClient(p *Peer, netStore *storage.NetStore, stream Stream) (*SwarmSyncerClient, error) {
|
||||||
return &SwarmSyncerClient{
|
return &SwarmSyncerClient{
|
||||||
store: store,
|
netStore: netStore,
|
||||||
peer: p,
|
peer: p,
|
||||||
stream: stream,
|
stream: stream,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
|
|
||||||
// func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Address, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
|
|
||||||
// retrieveC := make(storage.Chunk, chunksCap)
|
|
||||||
// RunChunkRequestor(p, retrieveC)
|
|
||||||
// storeC := make(storage.Chunk, chunksCap)
|
|
||||||
// RunChunkStorer(store, storeC)
|
|
||||||
// s := &SwarmSyncerClient{
|
|
||||||
// po: po,
|
|
||||||
// priority: priority,
|
|
||||||
// sessionAt: sessionAt,
|
|
||||||
// start: index,
|
|
||||||
// end: index,
|
|
||||||
// nextC: make(chan struct{}, 1),
|
|
||||||
// intervals: intervals,
|
|
||||||
// sessionRoot: sessionRoot,
|
|
||||||
// sessionReader: chunker.Join(sessionRoot, retrieveC),
|
|
||||||
// retrieveC: retrieveC,
|
|
||||||
// storeC: storeC,
|
|
||||||
// }
|
|
||||||
// return s
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // StartSyncing is called on the Peer to start the syncing process
|
|
||||||
// // the idea is that it is called only after kademlia is close to healthy
|
|
||||||
// func StartSyncing(s *Streamer, peerId enode.ID, po uint8, nn bool) {
|
|
||||||
// lastPO := po
|
|
||||||
// if nn {
|
|
||||||
// lastPO = maxPO
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// for i := po; i <= lastPO; i++ {
|
|
||||||
// s.Subscribe(peerId, "SYNC", newSyncLabel("LIVE", po), 0, 0, High, true)
|
|
||||||
// s.Subscribe(peerId, "SYNC", newSyncLabel("HISTORY", po), 0, 0, Mid, false)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// RegisterSwarmSyncerClient registers the client constructor function for
|
// RegisterSwarmSyncerClient registers the client constructor function for
|
||||||
// to handle incoming sync streams
|
// to handle incoming sync streams
|
||||||
func RegisterSwarmSyncerClient(streamer *Registry, store storage.SyncChunkStore) {
|
func RegisterSwarmSyncerClient(streamer *Registry, netStore *storage.NetStore) {
|
||||||
streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
|
streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
|
||||||
return NewSwarmSyncerClient(p, store, NewStream("SYNC", t, live))
|
return NewSwarmSyncerClient(p, netStore, NewStream("SYNC", t, live))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeedData
|
// NeedData
|
||||||
func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func(context.Context) error) {
|
func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func(context.Context) error) {
|
||||||
return s.store.FetchFunc(ctx, key)
|
return s.netStore.FetchFunc(ctx, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BatchDone
|
// BatchDone
|
||||||
|
@ -21,22 +21,20 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -55,24 +53,6 @@ func TestSyncerSimulation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
|
|
||||||
address := common.BytesToAddress(id.Bytes())
|
|
||||||
mockStore := globalStore.NewNodeStore(address)
|
|
||||||
params := storage.NewDefaultLocalStoreParams()
|
|
||||||
|
|
||||||
datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
params.Init(datadir)
|
|
||||||
params.BaseKey = addr.Over()
|
|
||||||
lstore, err = storage.NewLocalStore(params, mockStore)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
return lstore, datadir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) {
|
func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) {
|
||||||
|
|
||||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||||
@ -103,7 +83,6 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
|
|||||||
}
|
}
|
||||||
|
|
||||||
r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
|
r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
|
||||||
Syncing: SyncingAutoSubscribe,
|
Syncing: SyncingAutoSubscribe,
|
||||||
SkipCheck: skipCheck,
|
SkipCheck: skipCheck,
|
||||||
}, nil)
|
}, nil)
|
||||||
@ -181,17 +160,32 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
|
|||||||
if i < nodes-1 {
|
if i < nodes-1 {
|
||||||
hashCounts[i] = hashCounts[i+1]
|
hashCounts[i] = hashCounts[i+1]
|
||||||
}
|
}
|
||||||
item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB)
|
item, ok := sim.NodeItem(nodeIDs[i], bucketKeyStore)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("No DB")
|
return fmt.Errorf("No DB")
|
||||||
}
|
}
|
||||||
netStore := item.(*storage.NetStore)
|
store := item.(chunk.Store)
|
||||||
netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
|
until, err := store.LastPullSubscriptionBinID(po)
|
||||||
hashes[i] = append(hashes[i], addr)
|
if err != nil {
|
||||||
totalHashes++
|
return err
|
||||||
hashCounts[i]++
|
}
|
||||||
return true
|
if until > 0 {
|
||||||
})
|
c, _ := store.SubscribePull(ctx, po, 0, until)
|
||||||
|
for iterate := true; iterate; {
|
||||||
|
select {
|
||||||
|
case cd, ok := <-c:
|
||||||
|
if !ok {
|
||||||
|
iterate = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
hashes[i] = append(hashes[i], cd.Address)
|
||||||
|
totalHashes++
|
||||||
|
hashCounts[i]++
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
var total, found int
|
var total, found int
|
||||||
for _, node := range nodeIDs {
|
for _, node := range nodeIDs {
|
||||||
@ -200,12 +194,12 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
|
|||||||
for j := i; j < nodes; j++ {
|
for j := i; j < nodes; j++ {
|
||||||
total += len(hashes[j])
|
total += len(hashes[j])
|
||||||
for _, key := range hashes[j] {
|
for _, key := range hashes[j] {
|
||||||
item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB)
|
item, ok := sim.NodeItem(nodeIDs[j], bucketKeyStore)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("No DB")
|
return fmt.Errorf("No DB")
|
||||||
}
|
}
|
||||||
db := item.(*storage.NetStore)
|
db := item.(chunk.Store)
|
||||||
_, err := db.Get(ctx, key)
|
_, err := db.Get(ctx, chunk.ModeGetRequest, key)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
found++
|
found++
|
||||||
}
|
}
|
||||||
@ -216,7 +210,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
|
|||||||
if total == found && total > 0 {
|
if total == found && total > 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Total not equallying found: total is %d", total)
|
return fmt.Errorf("Total not equallying found %v: total is %d", found, total)
|
||||||
})
|
})
|
||||||
|
|
||||||
if result.Error != nil {
|
if result.Error != nil {
|
||||||
@ -237,8 +231,7 @@ func TestSameVersionID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
Syncing: SyncingAutoSubscribe,
|
||||||
Syncing: SyncingAutoSubscribe,
|
|
||||||
}, nil)
|
}, nil)
|
||||||
bucket.Store(bucketKeyRegistry, r)
|
bucket.Store(bucketKeyRegistry, r)
|
||||||
|
|
||||||
@ -301,8 +294,7 @@ func TestDifferentVersionID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
Retrieval: RetrievalDisabled,
|
Syncing: SyncingAutoSubscribe,
|
||||||
Syncing: SyncingAutoSubscribe,
|
|
||||||
}, nil)
|
}, nil)
|
||||||
bucket.Store(bucketKeyRegistry, r)
|
bucket.Store(bucketKeyRegistry, r)
|
||||||
|
|
||||||
|
@ -23,11 +23,13 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -416,7 +418,7 @@ func uploadFile(swarm *Swarm) (storage.Address, string, error) {
|
|||||||
// uniqueness is very certain.
|
// uniqueness is very certain.
|
||||||
data := fmt.Sprintf("test content %s %x", time.Now().Round(0), b)
|
data := fmt.Sprintf("test content %s %x", time.Now().Round(0), b)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
k, wait, err := swarm.api.Put(ctx, data, "text/plain", false)
|
k, wait, err := putString(ctx, swarm.api, data, "text/plain", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
@ -530,3 +532,31 @@ func retrieve(
|
|||||||
|
|
||||||
return uint64(totalCheckCount) - atomic.LoadUint64(totalFoundCount)
|
return uint64(totalCheckCount) - atomic.LoadUint64(totalFoundCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// putString provides singleton manifest creation on top of api.API
|
||||||
|
func putString(ctx context.Context, a *api.API, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) {
|
||||||
|
r := strings.NewReader(content)
|
||||||
|
tag, err := a.Tags.New("unnamed-tag", 0)
|
||||||
|
|
||||||
|
log.Trace("created new tag", "uid", tag.Uid)
|
||||||
|
|
||||||
|
cCtx := sctx.SetTag(ctx, tag.Uid)
|
||||||
|
key, waitContent, err := a.Store(cCtx, r, int64(len(content)), toEncrypt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
|
||||||
|
r = strings.NewReader(manifest)
|
||||||
|
key, waitManifest, err := a.Store(cCtx, r, int64(len(manifest)), toEncrypt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
tag.DoneSplit(key)
|
||||||
|
return key, func(ctx context.Context) error {
|
||||||
|
err := waitContent(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return waitManifest(ctx)
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
// asymmetrical key exchange between two directly connected peers
|
// asymmetrical key exchange between two directly connected peers
|
||||||
// full address, partial address (8 bytes) and empty address
|
// full address, partial address (8 bytes) and empty address
|
||||||
func TestHandshake(t *testing.T) {
|
func TestHandshake(t *testing.T) {
|
||||||
|
t.Skip("Handshakes have not been maintained for a longer period, and have started to fail. They should be reviewed and possible removed.")
|
||||||
t.Run("32", testHandshake)
|
t.Run("32", testHandshake)
|
||||||
t.Run("8", testHandshake)
|
t.Run("8", testHandshake)
|
||||||
t.Run("0", testHandshake)
|
t.Run("0", testHandshake)
|
||||||
|
@ -5,12 +5,15 @@ import "context"
|
|||||||
type (
|
type (
|
||||||
HTTPRequestIDKey struct{}
|
HTTPRequestIDKey struct{}
|
||||||
requestHostKey struct{}
|
requestHostKey struct{}
|
||||||
|
tagKey struct{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// SetHost sets the http request host in the context
|
||||||
func SetHost(ctx context.Context, domain string) context.Context {
|
func SetHost(ctx context.Context, domain string) context.Context {
|
||||||
return context.WithValue(ctx, requestHostKey{}, domain)
|
return context.WithValue(ctx, requestHostKey{}, domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetHost gets the request host from the context
|
||||||
func GetHost(ctx context.Context) string {
|
func GetHost(ctx context.Context) string {
|
||||||
v, ok := ctx.Value(requestHostKey{}).(string)
|
v, ok := ctx.Value(requestHostKey{}).(string)
|
||||||
if ok {
|
if ok {
|
||||||
@ -18,3 +21,17 @@ func GetHost(ctx context.Context) string {
|
|||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetTag sets the tag unique identifier in the context
|
||||||
|
func SetTag(ctx context.Context, tagId uint32) context.Context {
|
||||||
|
return context.WithValue(ctx, tagKey{}, tagId)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTag gets the tag unique identifier from the context
|
||||||
|
func GetTag(ctx context.Context) uint32 {
|
||||||
|
v, ok := ctx.Value(tagKey{}).(uint32)
|
||||||
|
if ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
@ -45,16 +45,7 @@ const (
|
|||||||
// It provides a schema functionality to store fields and indexes
|
// It provides a schema functionality to store fields and indexes
|
||||||
// information about naming and types.
|
// information about naming and types.
|
||||||
type DB struct {
|
type DB struct {
|
||||||
ldb *leveldb.DB
|
ldb *leveldb.DB
|
||||||
|
|
||||||
compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
|
|
||||||
compReadMeter metrics.Meter // Meter for measuring the data read during compaction
|
|
||||||
compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
|
|
||||||
writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
|
|
||||||
writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
|
|
||||||
diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
|
|
||||||
diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
|
|
||||||
|
|
||||||
quit chan struct{} // Quit channel to stop the metrics collection before closing the database
|
quit chan struct{} // Quit channel to stop the metrics collection before closing the database
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,13 +77,10 @@ func NewDB(path string, metricsPrefix string) (db *DB, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure meters for DB
|
|
||||||
db.configure(metricsPrefix)
|
|
||||||
|
|
||||||
// Create a quit channel for the periodic metrics collector and run it
|
// Create a quit channel for the periodic metrics collector and run it
|
||||||
db.quit = make(chan struct{})
|
db.quit = make(chan struct{})
|
||||||
|
|
||||||
go db.meter(10 * time.Second)
|
go db.meter(metricsPrefix, 10*time.Second)
|
||||||
|
|
||||||
return db, nil
|
return db, nil
|
||||||
}
|
}
|
||||||
@ -169,19 +157,22 @@ func (db *DB) Close() (err error) {
|
|||||||
return db.ldb.Close()
|
return db.ldb.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure configures the database metrics collectors
|
func (db *DB) meter(prefix string, refresh time.Duration) {
|
||||||
func (db *DB) configure(prefix string) {
|
// Meter for measuring the total time spent in database compaction
|
||||||
// Initialize all the metrics collector at the requested prefix
|
compTimeMeter := metrics.NewRegisteredMeter(prefix+"compact/time", nil)
|
||||||
db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
|
// Meter for measuring the data read during compaction
|
||||||
db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
|
compReadMeter := metrics.NewRegisteredMeter(prefix+"compact/input", nil)
|
||||||
db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
|
// Meter for measuring the data written during compaction
|
||||||
db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
|
compWriteMeter := metrics.NewRegisteredMeter(prefix+"compact/output", nil)
|
||||||
db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
|
// Meter for measuring the write delay number due to database compaction
|
||||||
db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
|
writeDelayMeter := metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
|
||||||
db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
|
// Meter for measuring the write delay duration due to database compaction
|
||||||
}
|
writeDelayNMeter := metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
|
||||||
|
// Meter for measuring the effective amount of data read
|
||||||
|
diskReadMeter := metrics.NewRegisteredMeter(prefix+"disk/read", nil)
|
||||||
|
// Meter for measuring the effective amount of data written
|
||||||
|
diskWriteMeter := metrics.NewRegisteredMeter(prefix+"disk/write", nil)
|
||||||
|
|
||||||
func (db *DB) meter(refresh time.Duration) {
|
|
||||||
// Create the counters to store current and previous compaction values
|
// Create the counters to store current and previous compaction values
|
||||||
compactions := make([][]float64, 2)
|
compactions := make([][]float64, 2)
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
@ -234,14 +225,14 @@ func (db *DB) meter(refresh time.Duration) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Update all the requested meters
|
// Update all the requested meters
|
||||||
if db.compTimeMeter != nil {
|
if compTimeMeter != nil {
|
||||||
db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
|
compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
|
||||||
}
|
}
|
||||||
if db.compReadMeter != nil {
|
if compReadMeter != nil {
|
||||||
db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
|
compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
|
||||||
}
|
}
|
||||||
if db.compWriteMeter != nil {
|
if compWriteMeter != nil {
|
||||||
db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
|
compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve the write delay statistic
|
// Retrieve the write delay statistic
|
||||||
@ -265,11 +256,11 @@ func (db *DB) meter(refresh time.Duration) {
|
|||||||
log.Error("Failed to parse delay duration", "err", err)
|
log.Error("Failed to parse delay duration", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if db.writeDelayNMeter != nil {
|
if writeDelayNMeter != nil {
|
||||||
db.writeDelayNMeter.Mark(delayN - delaystats[0])
|
writeDelayNMeter.Mark(delayN - delaystats[0])
|
||||||
}
|
}
|
||||||
if db.writeDelayMeter != nil {
|
if writeDelayMeter != nil {
|
||||||
db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
|
writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
|
||||||
}
|
}
|
||||||
// If a warning that db is performing compaction has been displayed, any subsequent
|
// If a warning that db is performing compaction has been displayed, any subsequent
|
||||||
// warnings will be withheld for one minute not to overwhelm the user.
|
// warnings will be withheld for one minute not to overwhelm the user.
|
||||||
@ -300,11 +291,11 @@ func (db *DB) meter(refresh time.Duration) {
|
|||||||
log.Error("Bad syntax of write entry", "entry", parts[1])
|
log.Error("Bad syntax of write entry", "entry", parts[1])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if db.diskReadMeter != nil {
|
if diskReadMeter != nil {
|
||||||
db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
|
diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
|
||||||
}
|
}
|
||||||
if db.diskWriteMeter != nil {
|
if diskWriteMeter != nil {
|
||||||
db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
|
diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
|
||||||
}
|
}
|
||||||
iostats[0], iostats[1] = nRead, nWrite
|
iostats[0], iostats[1] = nRead, nWrite
|
||||||
|
|
||||||
|
@ -40,9 +40,7 @@ type Item struct {
|
|||||||
Data []byte
|
Data []byte
|
||||||
AccessTimestamp int64
|
AccessTimestamp int64
|
||||||
StoreTimestamp int64
|
StoreTimestamp int64
|
||||||
// UseMockStore is a pointer to identify
|
BinID uint64
|
||||||
// an unset state of the field in Join function.
|
|
||||||
UseMockStore *bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge is a helper method to construct a new
|
// Merge is a helper method to construct a new
|
||||||
@ -61,8 +59,8 @@ func (i Item) Merge(i2 Item) (new Item) {
|
|||||||
if i.StoreTimestamp == 0 {
|
if i.StoreTimestamp == 0 {
|
||||||
i.StoreTimestamp = i2.StoreTimestamp
|
i.StoreTimestamp = i2.StoreTimestamp
|
||||||
}
|
}
|
||||||
if i.UseMockStore == nil {
|
if i.BinID == 0 {
|
||||||
i.UseMockStore = i2.UseMockStore
|
i.BinID = i2.BinID
|
||||||
}
|
}
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ type indexSpec struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// schemaFieldKey retrives the complete LevelDB key for
|
// schemaFieldKey retrieves the complete LevelDB key for
|
||||||
// a particular field form the schema definition.
|
// a particular field form the schema definition.
|
||||||
func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) {
|
func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) {
|
||||||
if name == "" {
|
if name == "" {
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
@ -42,8 +43,10 @@ type chunkerTester struct {
|
|||||||
t test
|
t test
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var mockTag = chunk.NewTag(0, "mock-tag", 0)
|
||||||
|
|
||||||
func newTestHasherStore(store ChunkStore, hash string) *hasherStore {
|
func newTestHasherStore(store ChunkStore, hash string) *hasherStore {
|
||||||
return NewHasherStore(store, MakeHashFunc(hash), false)
|
return NewHasherStore(store, MakeHashFunc(hash), false, chunk.NewTag(0, "test-tag", 0))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRandomBrokenData(n int, tester *chunkerTester) {
|
func testRandomBrokenData(n int, tester *chunkerTester) {
|
||||||
@ -91,7 +94,7 @@ func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester)
|
|||||||
var err error
|
var err error
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
if usePyramid {
|
if usePyramid {
|
||||||
addr, wait, err = PyramidSplit(ctx, data, putGetter, putGetter)
|
addr, wait, err = PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
|
||||||
} else {
|
} else {
|
||||||
addr, wait, err = TreeSplit(ctx, data, int64(n), putGetter)
|
addr, wait, err = TreeSplit(ctx, data, int64(n), putGetter)
|
||||||
}
|
}
|
||||||
@ -188,7 +191,7 @@ func TestDataAppend(t *testing.T) {
|
|||||||
putGetter := newTestHasherStore(store, SHA3Hash)
|
putGetter := newTestHasherStore(store, SHA3Hash)
|
||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tester.t.Fatalf(err.Error())
|
tester.t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
@ -208,7 +211,7 @@ func TestDataAppend(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
putGetter = newTestHasherStore(store, SHA3Hash)
|
putGetter = newTestHasherStore(store, SHA3Hash)
|
||||||
newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter)
|
newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter, mockTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tester.t.Fatalf(err.Error())
|
tester.t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
@ -278,7 +281,7 @@ func benchmarkSplitJoin(n int, t *testing.B) {
|
|||||||
|
|
||||||
putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
|
putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
@ -335,7 +338,7 @@ func benchmarkSplitPyramidBMT(n int, t *testing.B) {
|
|||||||
putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
|
putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
_, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
@ -353,7 +356,7 @@ func benchmarkSplitPyramidSHA3(n int, t *testing.B) {
|
|||||||
putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
|
putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
_, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
@ -374,7 +377,7 @@ func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
|
|||||||
putGetter := newTestHasherStore(store, SHA3Hash)
|
putGetter := newTestHasherStore(store, SHA3Hash)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
@ -384,7 +387,7 @@ func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
putGetter = newTestHasherStore(store, SHA3Hash)
|
putGetter = newTestHasherStore(store, SHA3Hash)
|
||||||
_, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter)
|
_, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter, mockTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
|
@ -22,8 +22,6 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -59,30 +57,6 @@ func brokenLimitReader(data io.Reader, size int, errAt int) *brokenLimitedReader
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLDBStore(t *testing.T) (*LDBStore, func()) {
|
|
||||||
dir, err := ioutil.TempDir("", "bzz-storage-test")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
log.Trace("memstore.tempdir", "dir", dir)
|
|
||||||
|
|
||||||
ldbparams := NewLDBStoreParams(NewDefaultStoreParams(), dir)
|
|
||||||
db, err := NewLDBStore(ldbparams)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup := func() {
|
|
||||||
db.Close()
|
|
||||||
err := os.RemoveAll(dir)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return db, cleanup
|
|
||||||
}
|
|
||||||
|
|
||||||
func mputRandomChunks(store ChunkStore, n int) ([]Chunk, error) {
|
func mputRandomChunks(store ChunkStore, n int) ([]Chunk, error) {
|
||||||
return mput(store, n, GenerateRandomChunk)
|
return mput(store, n, GenerateRandomChunk)
|
||||||
}
|
}
|
||||||
@ -94,14 +68,15 @@ func mput(store ChunkStore, n int, f func(i int64) Chunk) (hs []Chunk, err error
|
|||||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
for i := int64(0); i < int64(n); i++ {
|
for i := int64(0); i < int64(n); i++ {
|
||||||
chunk := f(chunk.DefaultSize)
|
ch := f(chunk.DefaultSize)
|
||||||
go func() {
|
go func() {
|
||||||
|
_, err := store.Put(ctx, chunk.ModePutUpload, ch)
|
||||||
select {
|
select {
|
||||||
case errc <- store.Put(ctx, chunk):
|
case errc <- err:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
hs = append(hs, chunk)
|
hs = append(hs, ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for all chunks to be stored
|
// wait for all chunks to be stored
|
||||||
@ -123,13 +98,13 @@ func mget(store ChunkStore, hs []Address, f func(h Address, chunk Chunk) error)
|
|||||||
go func(h Address) {
|
go func(h Address) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
// TODO: write timeout with context
|
// TODO: write timeout with context
|
||||||
chunk, err := store.Get(context.TODO(), h)
|
ch, err := store.Get(context.TODO(), chunk.ModeGetRequest, h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errc <- err
|
errc <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if f != nil {
|
if f != nil {
|
||||||
err = f(h, chunk)
|
err = f(h, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errc <- err
|
errc <- err
|
||||||
return
|
return
|
||||||
@ -250,14 +225,15 @@ func NewMapChunkStore() *MapChunkStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MapChunkStore) Put(_ context.Context, ch Chunk) error {
|
func (m *MapChunkStore) Put(_ context.Context, _ chunk.ModePut, ch Chunk) (bool, error) {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
|
_, exists := m.chunks[ch.Address().Hex()]
|
||||||
m.chunks[ch.Address().Hex()] = ch
|
m.chunks[ch.Address().Hex()] = ch
|
||||||
return nil
|
return exists, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MapChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
|
func (m *MapChunkStore) Get(_ context.Context, _ chunk.ModeGet, ref Address) (Chunk, error) {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
chunk := m.chunks[ref.Hex()]
|
chunk := m.chunks[ref.Hex()]
|
||||||
@ -268,15 +244,28 @@ func (m *MapChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Need to implement Has from SyncChunkStore
|
// Need to implement Has from SyncChunkStore
|
||||||
func (m *MapChunkStore) Has(ctx context.Context, ref Address) bool {
|
func (m *MapChunkStore) Has(ctx context.Context, ref Address) (has bool, err error) {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
_, has := m.chunks[ref.Hex()]
|
_, has = m.chunks[ref.Hex()]
|
||||||
return has
|
return has, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MapChunkStore) Close() {
|
func (m *MapChunkStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapChunkStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapChunkStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapChunkStore) Close() error {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func chunkAddresses(chunks []Chunk) []Address {
|
func chunkAddresses(chunks []Chunk) []Address {
|
||||||
|
@ -1,82 +0,0 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
// this is a clone of an earlier state of the ethereum ethdb/database
|
|
||||||
// no need for queueing/caching
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
|
||||||
)
|
|
||||||
|
|
||||||
const openFileLimit = 128
|
|
||||||
|
|
||||||
type LDBDatabase struct {
|
|
||||||
db *leveldb.DB
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLDBDatabase(file string) (*LDBDatabase, error) {
|
|
||||||
// Open the db
|
|
||||||
db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: openFileLimit})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
database := &LDBDatabase{db: db}
|
|
||||||
|
|
||||||
return database, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LDBDatabase) Put(key []byte, value []byte) error {
|
|
||||||
metrics.GetOrRegisterCounter("ldbdatabase.put", nil).Inc(1)
|
|
||||||
|
|
||||||
return db.db.Put(key, value, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LDBDatabase) Get(key []byte) ([]byte, error) {
|
|
||||||
metrics.GetOrRegisterCounter("ldbdatabase.get", nil).Inc(1)
|
|
||||||
|
|
||||||
dat, err := db.db.Get(key, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dat, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LDBDatabase) Delete(key []byte) error {
|
|
||||||
return db.db.Delete(key, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LDBDatabase) NewIterator() iterator.Iterator {
|
|
||||||
metrics.GetOrRegisterCounter("ldbdatabase.newiterator", nil).Inc(1)
|
|
||||||
|
|
||||||
return db.db.NewIterator(nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LDBDatabase) Write(batch *leveldb.Batch) error {
|
|
||||||
metrics.GetOrRegisterCounter("ldbdatabase.write", nil).Inc(1)
|
|
||||||
|
|
||||||
return db.db.Write(batch, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LDBDatabase) Close() {
|
|
||||||
// Close the leveldb database
|
|
||||||
db.db.Close()
|
|
||||||
}
|
|
@ -24,6 +24,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
@ -189,7 +191,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
|
|||||||
ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout)
|
ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
chunk, err := h.chunkStore.Get(ctx, id.Addr())
|
ch, err := h.chunkStore.Get(ctx, chunk.ModeGetLookup, id.Addr())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == context.DeadlineExceeded { // chunk not found
|
if err == context.DeadlineExceeded { // chunk not found
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@ -198,7 +200,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
var request Request
|
var request Request
|
||||||
if err := request.fromChunk(chunk); err != nil {
|
if err := request.fromChunk(ch); err != nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
if request.Time <= timeLimit {
|
if request.Time <= timeLimit {
|
||||||
@ -257,14 +259,14 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad
|
|||||||
return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist")
|
return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
|
ch, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// send the chunk
|
// send the chunk
|
||||||
h.chunkStore.Put(ctx, chunk)
|
h.chunkStore.Put(ctx, chunk.ModePutUpload, ch)
|
||||||
log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data())
|
log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", ch.Data())
|
||||||
// update our feed updates map cache entry if the new update is older than the one we have, if we have it.
|
// update our feed updates map cache entry if the new update is older than the one we have, if we have it.
|
||||||
if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) {
|
if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) {
|
||||||
feedUpdate.Epoch = r.Epoch
|
feedUpdate.Epoch = r.Epoch
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -400,9 +401,7 @@ func TestValidatorInStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(datadir)
|
defer os.RemoveAll(datadir)
|
||||||
|
|
||||||
handlerParams := storage.NewDefaultLocalStoreParams()
|
localstore, err := localstore.New(datadir, make([]byte, 32), nil)
|
||||||
handlerParams.Init(datadir)
|
|
||||||
store, err := storage.NewLocalStore(handlerParams, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -410,7 +409,7 @@ func TestValidatorInStore(t *testing.T) {
|
|||||||
// set up Swarm feeds handler and add is as a validator to the localstore
|
// set up Swarm feeds handler and add is as a validator to the localstore
|
||||||
fhParams := &HandlerParams{}
|
fhParams := &HandlerParams{}
|
||||||
fh := NewHandler(fhParams)
|
fh := NewHandler(fhParams)
|
||||||
store.Validators = append(store.Validators, fh)
|
store := chunk.NewValidatorStore(localstore, fh)
|
||||||
|
|
||||||
// create content addressed chunks, one good, one faulty
|
// create content addressed chunks, one good, one faulty
|
||||||
chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
|
chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
|
||||||
@ -447,15 +446,15 @@ func TestValidatorInStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// put the chunks in the store and check their error status
|
// put the chunks in the store and check their error status
|
||||||
err = store.Put(context.Background(), goodChunk)
|
_, err = store.Put(context.Background(), chunk.ModePutUpload, goodChunk)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected error on good content address chunk with feed update validator only, but got nil")
|
t.Fatal("expected error on good content address chunk with feed update validator only, but got nil")
|
||||||
}
|
}
|
||||||
err = store.Put(context.Background(), badChunk)
|
_, err = store.Put(context.Background(), chunk.ModePutUpload, badChunk)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil")
|
t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil")
|
||||||
}
|
}
|
||||||
err = store.Put(context.Background(), uglyChunk)
|
_, err = store.Put(context.Background(), chunk.ModePutUpload, uglyChunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err)
|
t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -18,12 +18,13 @@ package feed
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -53,14 +54,14 @@ func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetF
|
|||||||
func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
|
func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
|
||||||
path := filepath.Join(datadir, testDbDirName)
|
path := filepath.Join(datadir, testDbDirName)
|
||||||
fh := NewHandler(params)
|
fh := NewHandler(params)
|
||||||
localstoreparams := storage.NewDefaultLocalStoreParams()
|
|
||||||
localstoreparams.Init(path)
|
db, err := localstore.New(filepath.Join(path, "chunks"), make([]byte, 32), nil)
|
||||||
localStore, err := storage.NewLocalStore(localstoreparams, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err)
|
return nil, err
|
||||||
}
|
}
|
||||||
localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm)))
|
|
||||||
localStore.Validators = append(localStore.Validators, fh)
|
localStore := chunk.NewValidatorStore(db, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm)), fh)
|
||||||
|
|
||||||
netStore, err := storage.NewNetStore(localStore, nil)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -21,6 +21,9 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -44,6 +47,7 @@ const (
|
|||||||
type FileStore struct {
|
type FileStore struct {
|
||||||
ChunkStore
|
ChunkStore
|
||||||
hashFunc SwarmHasher
|
hashFunc SwarmHasher
|
||||||
|
tags *chunk.Tags
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileStoreParams struct {
|
type FileStoreParams struct {
|
||||||
@ -57,22 +61,20 @@ func NewFileStoreParams() *FileStoreParams {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for testing locally
|
// for testing locally
|
||||||
func NewLocalFileStore(datadir string, basekey []byte) (*FileStore, error) {
|
func NewLocalFileStore(datadir string, basekey []byte, tags *chunk.Tags) (*FileStore, error) {
|
||||||
params := NewDefaultLocalStoreParams()
|
localStore, err := localstore.New(datadir, basekey, nil)
|
||||||
params.Init(datadir)
|
|
||||||
localStore, err := NewLocalStore(params, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
localStore.Validators = append(localStore.Validators, NewContentAddressValidator(MakeHashFunc(DefaultHash)))
|
return NewFileStore(chunk.NewValidatorStore(localStore, NewContentAddressValidator(MakeHashFunc(DefaultHash))), NewFileStoreParams(), tags), nil
|
||||||
return NewFileStore(localStore, NewFileStoreParams()), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore {
|
func NewFileStore(store ChunkStore, params *FileStoreParams, tags *chunk.Tags) *FileStore {
|
||||||
hashFunc := MakeHashFunc(params.Hash)
|
hashFunc := MakeHashFunc(params.Hash)
|
||||||
return &FileStore{
|
return &FileStore{
|
||||||
ChunkStore: store,
|
ChunkStore: store,
|
||||||
hashFunc: hashFunc,
|
hashFunc: hashFunc,
|
||||||
|
tags: tags,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,7 +85,11 @@ func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore {
|
|||||||
// It returns a reader with the chunk data and whether the content was encrypted
|
// It returns a reader with the chunk data and whether the content was encrypted
|
||||||
func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool) {
|
func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool) {
|
||||||
isEncrypted = len(addr) > f.hashFunc().Size()
|
isEncrypted = len(addr) > f.hashFunc().Size()
|
||||||
getter := NewHasherStore(f.ChunkStore, f.hashFunc, isEncrypted)
|
tag, err := f.tags.GetFromContext(ctx)
|
||||||
|
if err != nil {
|
||||||
|
tag = chunk.NewTag(0, "ephemeral-retrieval-tag", 0)
|
||||||
|
}
|
||||||
|
getter := NewHasherStore(f.ChunkStore, f.hashFunc, isEncrypted, tag)
|
||||||
reader = TreeJoin(ctx, addr, getter, 0)
|
reader = TreeJoin(ctx, addr, getter, 0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -91,8 +97,17 @@ func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChu
|
|||||||
// Store is a public API. Main entry point for document storage directly. Used by the
|
// Store is a public API. Main entry point for document storage directly. Used by the
|
||||||
// FS-aware API and httpaccess
|
// FS-aware API and httpaccess
|
||||||
func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(context.Context) error, err error) {
|
func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(context.Context) error, err error) {
|
||||||
putter := NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt)
|
tag, err := f.tags.GetFromContext(ctx)
|
||||||
return PyramidSplit(ctx, data, putter, putter)
|
if err != nil {
|
||||||
|
// some of the parts of the codebase, namely the manifest trie, do not store the context
|
||||||
|
// of the original request nor the tag with the trie, recalculating the trie hence
|
||||||
|
// loses the tag uid. thus we create an ephemeral tag here for that purpose
|
||||||
|
|
||||||
|
tag = chunk.NewTag(0, "", 0)
|
||||||
|
//return nil, nil, err
|
||||||
|
}
|
||||||
|
putter := NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt, tag)
|
||||||
|
return PyramidSplit(ctx, data, putter, putter, tag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileStore) HashSize() int {
|
func (f *FileStore) HashSize() int {
|
||||||
@ -101,12 +116,14 @@ func (f *FileStore) HashSize() int {
|
|||||||
|
|
||||||
// GetAllReferences is a public API. This endpoint returns all chunk hashes (only) for a given file
|
// GetAllReferences is a public API. This endpoint returns all chunk hashes (only) for a given file
|
||||||
func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader, toEncrypt bool) (addrs AddressCollection, err error) {
|
func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader, toEncrypt bool) (addrs AddressCollection, err error) {
|
||||||
|
tag := chunk.NewTag(0, "ephemeral-tag", 0) //this tag is just a mock ephemeral tag since we don't want to save these results
|
||||||
|
|
||||||
// create a special kind of putter, which only will store the references
|
// create a special kind of putter, which only will store the references
|
||||||
putter := &hashExplorer{
|
putter := &hashExplorer{
|
||||||
hasherStore: NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt),
|
hasherStore: NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt, tag),
|
||||||
}
|
}
|
||||||
// do the actual splitting anyway, no way around it
|
// do the actual splitting anyway, no way around it
|
||||||
_, wait, err := PyramidSplit(ctx, data, putter, putter)
|
_, wait, err := PyramidSplit(ctx, data, putter, putter, tag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -22,8 +22,11 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -35,21 +38,18 @@ func TestFileStorerandom(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testFileStoreRandom(toEncrypt bool, t *testing.T) {
|
func testFileStoreRandom(toEncrypt bool, t *testing.T) {
|
||||||
tdb, cleanup, err := newTestDbStore(false, false)
|
dir, err := ioutil.TempDir("", "swarm-storage-")
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
db := tdb.LDBStore
|
defer os.RemoveAll(dir)
|
||||||
db.setCapacity(50000)
|
localStore, err := localstore.New(dir, make([]byte, 32), nil)
|
||||||
memStore := NewMemStore(NewDefaultStoreParams(), db)
|
if err != nil {
|
||||||
localStore := &LocalStore{
|
t.Fatal(err)
|
||||||
memStore: memStore,
|
|
||||||
DbStore: db,
|
|
||||||
}
|
}
|
||||||
|
defer localStore.Close()
|
||||||
|
|
||||||
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
fileStore := NewFileStore(localStore, NewFileStoreParams(), chunk.NewTags())
|
||||||
defer os.RemoveAll("/tmp/bzz")
|
|
||||||
|
|
||||||
slice := testutil.RandomBytes(1, testDataSize)
|
slice := testutil.RandomBytes(1, testDataSize)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
@ -76,9 +76,8 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) {
|
|||||||
if !bytes.Equal(slice, resultSlice) {
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
t.Fatalf("Comparison error.")
|
t.Fatalf("Comparison error.")
|
||||||
}
|
}
|
||||||
ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666)
|
ioutil.WriteFile(filepath.Join(dir, "slice.bzz.16M"), slice, 0666)
|
||||||
ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666)
|
ioutil.WriteFile(filepath.Join(dir, "result.bzz.16M"), resultSlice, 0666)
|
||||||
localStore.memStore = NewMemStore(NewDefaultStoreParams(), db)
|
|
||||||
resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key)
|
resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key)
|
||||||
if isEncrypted != toEncrypt {
|
if isEncrypted != toEncrypt {
|
||||||
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
|
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
|
||||||
@ -104,18 +103,18 @@ func TestFileStoreCapacity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
||||||
tdb, cleanup, err := newTestDbStore(false, false)
|
dir, err := ioutil.TempDir("", "swarm-storage-")
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
db := tdb.LDBStore
|
defer os.RemoveAll(dir)
|
||||||
memStore := NewMemStore(NewDefaultStoreParams(), db)
|
localStore, err := localstore.New(dir, make([]byte, 32), nil)
|
||||||
localStore := &LocalStore{
|
if err != nil {
|
||||||
memStore: memStore,
|
t.Fatal(err)
|
||||||
DbStore: db,
|
|
||||||
}
|
}
|
||||||
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
defer localStore.Close()
|
||||||
|
|
||||||
|
fileStore := NewFileStore(localStore, NewFileStoreParams(), chunk.NewTags())
|
||||||
slice := testutil.RandomBytes(1, testDataSize)
|
slice := testutil.RandomBytes(1, testDataSize)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
key, wait, err := fileStore.Store(ctx, bytes.NewReader(slice), testDataSize, toEncrypt)
|
key, wait, err := fileStore.Store(ctx, bytes.NewReader(slice), testDataSize, toEncrypt)
|
||||||
@ -141,10 +140,6 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
|||||||
if !bytes.Equal(slice, resultSlice) {
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
t.Fatalf("Comparison error.")
|
t.Fatalf("Comparison error.")
|
||||||
}
|
}
|
||||||
// Clear memStore
|
|
||||||
memStore.setCapacity(0)
|
|
||||||
// check whether it is, indeed, empty
|
|
||||||
fileStore.ChunkStore = memStore
|
|
||||||
resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key)
|
resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key)
|
||||||
if isEncrypted != toEncrypt {
|
if isEncrypted != toEncrypt {
|
||||||
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
|
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
|
||||||
@ -177,18 +172,18 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
|||||||
// TestGetAllReferences only tests that GetAllReferences returns an expected
|
// TestGetAllReferences only tests that GetAllReferences returns an expected
|
||||||
// number of references for a given file
|
// number of references for a given file
|
||||||
func TestGetAllReferences(t *testing.T) {
|
func TestGetAllReferences(t *testing.T) {
|
||||||
tdb, cleanup, err := newTestDbStore(false, false)
|
dir, err := ioutil.TempDir("", "swarm-storage-")
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
db := tdb.LDBStore
|
defer os.RemoveAll(dir)
|
||||||
memStore := NewMemStore(NewDefaultStoreParams(), db)
|
localStore, err := localstore.New(dir, make([]byte, 32), nil)
|
||||||
localStore := &LocalStore{
|
if err != nil {
|
||||||
memStore: memStore,
|
t.Fatal(err)
|
||||||
DbStore: db,
|
|
||||||
}
|
}
|
||||||
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
defer localStore.Close()
|
||||||
|
|
||||||
|
fileStore := NewFileStore(localStore, NewFileStoreParams(), chunk.NewTags())
|
||||||
|
|
||||||
// testRuns[i] and expectedLen[i] are dataSize and expected length respectively
|
// testRuns[i] and expectedLen[i] are dataSize and expected length respectively
|
||||||
testRuns := []int{1024, 8192, 16000, 30000, 1000000}
|
testRuns := []int{1024, 8192, 16000, 30000, 1000000}
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
|
|
||||||
type hasherStore struct {
|
type hasherStore struct {
|
||||||
store ChunkStore
|
store ChunkStore
|
||||||
|
tag *chunk.Tag
|
||||||
toEncrypt bool
|
toEncrypt bool
|
||||||
hashFunc SwarmHasher
|
hashFunc SwarmHasher
|
||||||
hashSize int // content hash size
|
hashSize int // content hash size
|
||||||
@ -44,7 +45,7 @@ type hasherStore struct {
|
|||||||
// NewHasherStore creates a hasherStore object, which implements Putter and Getter interfaces.
|
// NewHasherStore creates a hasherStore object, which implements Putter and Getter interfaces.
|
||||||
// With the HasherStore you can put and get chunk data (which is just []byte) into a ChunkStore
|
// With the HasherStore you can put and get chunk data (which is just []byte) into a ChunkStore
|
||||||
// and the hasherStore will take core of encryption/decryption of data if necessary
|
// and the hasherStore will take core of encryption/decryption of data if necessary
|
||||||
func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *hasherStore {
|
func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool, tag *chunk.Tag) *hasherStore {
|
||||||
hashSize := hashFunc().Size()
|
hashSize := hashFunc().Size()
|
||||||
refSize := int64(hashSize)
|
refSize := int64(hashSize)
|
||||||
if toEncrypt {
|
if toEncrypt {
|
||||||
@ -53,6 +54,7 @@ func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *has
|
|||||||
|
|
||||||
h := &hasherStore{
|
h := &hasherStore{
|
||||||
store: store,
|
store: store,
|
||||||
|
tag: tag,
|
||||||
toEncrypt: toEncrypt,
|
toEncrypt: toEncrypt,
|
||||||
hashFunc: hashFunc,
|
hashFunc: hashFunc,
|
||||||
hashSize: hashSize,
|
hashSize: hashSize,
|
||||||
@ -93,7 +95,7 @@ func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk, err := h.store.Get(ctx, addr)
|
chunk, err := h.store.Get(ctx, chunk.ModeGetRequest, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -239,11 +241,16 @@ func (h *hasherStore) newDataEncryption(key encryption.Key) encryption.Encryptio
|
|||||||
return encryption.New(key, int(chunk.DefaultSize), 0, sha3.NewLegacyKeccak256)
|
return encryption.New(key, int(chunk.DefaultSize), 0, sha3.NewLegacyKeccak256)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hasherStore) storeChunk(ctx context.Context, chunk Chunk) {
|
func (h *hasherStore) storeChunk(ctx context.Context, ch Chunk) {
|
||||||
atomic.AddUint64(&h.nrChunks, 1)
|
atomic.AddUint64(&h.nrChunks, 1)
|
||||||
go func() {
|
go func() {
|
||||||
|
seen, err := h.store.Put(ctx, chunk.ModePutUpload, ch)
|
||||||
|
h.tag.Inc(chunk.StateStored)
|
||||||
|
if seen {
|
||||||
|
h.tag.Inc(chunk.StateSeen)
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case h.errC <- h.store.Put(ctx, chunk):
|
case h.errC <- err:
|
||||||
case <-h.quitC:
|
case <-h.quitC:
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -21,9 +21,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHasherStore(t *testing.T) {
|
func TestHasherStore(t *testing.T) {
|
||||||
@ -43,7 +43,7 @@ func TestHasherStore(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
chunkStore := NewMapChunkStore()
|
chunkStore := NewMapChunkStore()
|
||||||
hasherStore := NewHasherStore(chunkStore, MakeHashFunc(DefaultHash), tt.toEncrypt)
|
hasherStore := NewHasherStore(chunkStore, MakeHashFunc(DefaultHash), tt.toEncrypt, chunk.NewTag(0, "test-tag", 0))
|
||||||
|
|
||||||
// Put two random chunks into the hasherStore
|
// Put two random chunks into the hasherStore
|
||||||
chunkData1 := GenerateRandomChunk(int64(tt.chunkLength)).Data()
|
chunkData1 := GenerateRandomChunk(int64(tt.chunkLength)).Data()
|
||||||
@ -107,7 +107,7 @@ func TestHasherStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if chunk data in store is encrypted or not
|
// Check if chunk data in store is encrypted or not
|
||||||
chunkInStore, err := chunkStore.Get(ctx, hash1)
|
chunkInStore, err := chunkStore.Get(ctx, chunk.ModeGetRequest, hash1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error got \"%v\"", err)
|
t.Fatalf("Expected no error got \"%v\"", err)
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,788 +0,0 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
|
||||||
ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type testDbStore struct {
|
|
||||||
*LDBStore
|
|
||||||
dir string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
|
|
||||||
dir, err := ioutil.TempDir("", "bzz-storage-test")
|
|
||||||
if err != nil {
|
|
||||||
return nil, func() {}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var db *LDBStore
|
|
||||||
storeparams := NewDefaultStoreParams()
|
|
||||||
params := NewLDBStoreParams(storeparams, dir)
|
|
||||||
params.Po = testPoFunc
|
|
||||||
|
|
||||||
if mock {
|
|
||||||
globalStore := mem.NewGlobalStore()
|
|
||||||
addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
|
|
||||||
mockStore := globalStore.NewNodeStore(addr)
|
|
||||||
|
|
||||||
db, err = NewMockDbStore(params, mockStore)
|
|
||||||
} else {
|
|
||||||
db, err = NewLDBStore(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup := func() {
|
|
||||||
if db != nil {
|
|
||||||
db.Close()
|
|
||||||
}
|
|
||||||
err = os.RemoveAll(dir)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("db cleanup failed: %v", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &testDbStore{db, dir}, cleanup, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func testPoFunc(k Address) (ret uint8) {
|
|
||||||
basekey := make([]byte, 32)
|
|
||||||
return uint8(Proximity(basekey, k[:]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDbStoreRandom(n int, mock bool, t *testing.T) {
|
|
||||||
db, cleanup, err := newTestDbStore(mock, true)
|
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
|
||||||
}
|
|
||||||
testStoreRandom(db, n, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDbStoreCorrect(n int, mock bool, t *testing.T) {
|
|
||||||
db, cleanup, err := newTestDbStore(mock, false)
|
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
|
||||||
}
|
|
||||||
testStoreCorrect(db, n, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarkAccessed(t *testing.T) {
|
|
||||||
db, cleanup, err := newTestDbStore(false, true)
|
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
h := GenerateRandomChunk(chunk.DefaultSize)
|
|
||||||
|
|
||||||
db.Put(context.Background(), h)
|
|
||||||
|
|
||||||
var index dpaDBIndex
|
|
||||||
addr := h.Address()
|
|
||||||
idxk := getIndexKey(addr)
|
|
||||||
|
|
||||||
idata, err := db.db.Get(idxk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
decodeIndex(idata, &index)
|
|
||||||
|
|
||||||
if index.Access != 0 {
|
|
||||||
t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access)
|
|
||||||
}
|
|
||||||
|
|
||||||
db.MarkAccessed(addr)
|
|
||||||
db.writeCurrentBatch()
|
|
||||||
|
|
||||||
idata, err = db.db.Get(idxk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
decodeIndex(idata, &index)
|
|
||||||
|
|
||||||
if index.Access != 1 {
|
|
||||||
t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDbStoreRandom_1(t *testing.T) {
|
|
||||||
testDbStoreRandom(1, false, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDbStoreCorrect_1(t *testing.T) {
|
|
||||||
testDbStoreCorrect(1, false, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDbStoreRandom_1k(t *testing.T) {
|
|
||||||
testDbStoreRandom(1000, false, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDbStoreCorrect_1k(t *testing.T) {
|
|
||||||
testDbStoreCorrect(1000, false, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMockDbStoreRandom_1(t *testing.T) {
|
|
||||||
testDbStoreRandom(1, true, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMockDbStoreCorrect_1(t *testing.T) {
|
|
||||||
testDbStoreCorrect(1, true, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMockDbStoreRandom_1k(t *testing.T) {
|
|
||||||
testDbStoreRandom(1000, true, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMockDbStoreCorrect_1k(t *testing.T) {
|
|
||||||
testDbStoreCorrect(1000, true, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDbStoreNotFound(t *testing.T, mock bool) {
|
|
||||||
db, cleanup, err := newTestDbStore(mock, false)
|
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Get(context.TODO(), ZeroAddr)
|
|
||||||
if err != ErrChunkNotFound {
|
|
||||||
t.Errorf("Expected ErrChunkNotFound, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDbStoreNotFound(t *testing.T) {
|
|
||||||
testDbStoreNotFound(t, false)
|
|
||||||
}
|
|
||||||
func TestMockDbStoreNotFound(t *testing.T) {
|
|
||||||
testDbStoreNotFound(t, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testIterator(t *testing.T, mock bool) {
|
|
||||||
var i int
|
|
||||||
var poc uint
|
|
||||||
chunkcount := 32
|
|
||||||
chunkkeys := NewAddressCollection(chunkcount)
|
|
||||||
chunkkeysResults := NewAddressCollection(chunkcount)
|
|
||||||
|
|
||||||
db, cleanup, err := newTestDbStore(mock, false)
|
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount)
|
|
||||||
|
|
||||||
for i = 0; i < len(chunks); i++ {
|
|
||||||
chunkkeys[i] = chunks[i].Address()
|
|
||||||
err := db.Put(context.TODO(), chunks[i])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("dbStore.Put failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i = 0; i < len(chunkkeys); i++ {
|
|
||||||
log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
|
|
||||||
}
|
|
||||||
i = 0
|
|
||||||
for poc = 0; poc <= 255; poc++ {
|
|
||||||
err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
|
|
||||||
log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
|
|
||||||
chunkkeysResults[n] = k
|
|
||||||
i++
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Iterator call failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i = 0; i < chunkcount; i++ {
|
|
||||||
if !bytes.Equal(chunkkeys[i], chunkkeysResults[i]) {
|
|
||||||
t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeysResults[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIterator(t *testing.T) {
|
|
||||||
testIterator(t, false)
|
|
||||||
}
|
|
||||||
func TestMockIterator(t *testing.T) {
|
|
||||||
testIterator(t, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkDbStorePut(n int, mock bool, b *testing.B) {
|
|
||||||
db, cleanup, err := newTestDbStore(mock, true)
|
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("init dbStore failed: %v", err)
|
|
||||||
}
|
|
||||||
benchmarkStorePut(db, n, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkDbStoreGet(n int, mock bool, b *testing.B) {
|
|
||||||
db, cleanup, err := newTestDbStore(mock, true)
|
|
||||||
defer cleanup()
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("init dbStore failed: %v", err)
|
|
||||||
}
|
|
||||||
benchmarkStoreGet(db, n, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkDbStorePut_500(b *testing.B) {
|
|
||||||
benchmarkDbStorePut(500, false, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkDbStoreGet_500(b *testing.B) {
|
|
||||||
benchmarkDbStoreGet(500, false, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkMockDbStorePut_500(b *testing.B) {
|
|
||||||
benchmarkDbStorePut(500, true, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkMockDbStoreGet_500(b *testing.B) {
|
|
||||||
benchmarkDbStoreGet(500, true, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
|
|
||||||
// retrieve them, provided we don't hit the garbage collection
|
|
||||||
func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
|
|
||||||
capacity := 50
|
|
||||||
n := 10
|
|
||||||
|
|
||||||
ldb, cleanup := newLDBStore(t)
|
|
||||||
ldb.setCapacity(uint64(capacity))
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
chunks, err := mputRandomChunks(ldb, n)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
||||||
|
|
||||||
for _, ch := range chunks {
|
|
||||||
ret, err := ldb.Get(context.TODO(), ch.Address())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(ret.Data(), ch.Data()) {
|
|
||||||
t.Fatal("expected to get the same data back, but got smth else")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ldb.entryCnt != uint64(n) {
|
|
||||||
t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
|
|
||||||
}
|
|
||||||
|
|
||||||
if ldb.accessCnt != uint64(2*n) {
|
|
||||||
t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
|
|
||||||
// retrieve only some of them, because garbage collection must have partially cleared the store
|
|
||||||
// Also tests that we can delete chunks and that we can trigger garbage collection
|
|
||||||
func TestLDBStoreCollectGarbage(t *testing.T) {
|
|
||||||
|
|
||||||
// below max ronud
|
|
||||||
initialCap := defaultMaxGCRound / 100
|
|
||||||
cap := initialCap / 2
|
|
||||||
t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
|
|
||||||
|
|
||||||
if testutil.RaceEnabled {
|
|
||||||
t.Skip("only the simplest case run as others are flaky with race")
|
|
||||||
// Note: some tests fail consistently and even locally with `-race`
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
|
|
||||||
|
|
||||||
// at max round
|
|
||||||
cap = initialCap
|
|
||||||
t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
|
|
||||||
t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
|
|
||||||
|
|
||||||
// more than max around, not on threshold
|
|
||||||
cap = initialCap + 500
|
|
||||||
t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
|
|
||||||
t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func testLDBStoreCollectGarbage(t *testing.T) {
|
|
||||||
params := strings.Split(t.Name(), "/")
|
|
||||||
capacity, err := strconv.Atoi(params[2])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
n, err := strconv.Atoi(params[3])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ldb, cleanup := newLDBStore(t)
|
|
||||||
ldb.setCapacity(uint64(capacity))
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
// retrieve the gc round target count for the db capacity
|
|
||||||
ldb.startGC(capacity)
|
|
||||||
roundTarget := ldb.gc.target
|
|
||||||
|
|
||||||
// split put counts to gc target count threshold, and wait for gc to finish in between
|
|
||||||
var allChunks []Chunk
|
|
||||||
remaining := n
|
|
||||||
for remaining > 0 {
|
|
||||||
var putCount int
|
|
||||||
if remaining < roundTarget {
|
|
||||||
putCount = remaining
|
|
||||||
} else {
|
|
||||||
putCount = roundTarget
|
|
||||||
}
|
|
||||||
remaining -= putCount
|
|
||||||
chunks, err := mputRandomChunks(ldb, putCount)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
allChunks = append(allChunks, chunks...)
|
|
||||||
ldb.lock.RLock()
|
|
||||||
log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
|
|
||||||
ldb.lock.RUnlock()
|
|
||||||
|
|
||||||
waitGc(ldb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// attempt gets on all put chunks
|
|
||||||
var missing int
|
|
||||||
for _, ch := range allChunks {
|
|
||||||
ret, err := ldb.Get(context.TODO(), ch.Address())
|
|
||||||
if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
|
|
||||||
missing++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(ret.Data(), ch.Data()) {
|
|
||||||
t.Fatal("expected to get the same data back, but got smth else")
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Trace("got back chunk", "chunk", ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
// all surplus chunks should be missing
|
|
||||||
expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
|
|
||||||
if missing != expectMissing {
|
|
||||||
t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLDBStoreAddRemove tests that we can put and then delete a given chunk
|
|
||||||
func TestLDBStoreAddRemove(t *testing.T) {
|
|
||||||
ldb, cleanup := newLDBStore(t)
|
|
||||||
ldb.setCapacity(200)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
n := 100
|
|
||||||
chunks, err := mputRandomChunks(ldb, n)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
// delete all even index chunks
|
|
||||||
if i%2 == 0 {
|
|
||||||
ldb.Delete(chunks[i].Address())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
ret, err := ldb.Get(context.TODO(), chunks[i].Address())
|
|
||||||
|
|
||||||
if i%2 == 0 {
|
|
||||||
// expect even chunks to be missing
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected chunk to be missing, but got no error")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// expect odd chunks to be retrieved successfully
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error, but got %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(ret.Data(), chunks[i].Data()) {
|
|
||||||
t.Fatal("expected to get the same data back, but got smth else")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
|
|
||||||
t.Skip("flaky with -race flag")
|
|
||||||
|
|
||||||
params := strings.Split(t.Name(), "/")
|
|
||||||
capacity, err := strconv.Atoi(params[2])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
n, err := strconv.Atoi(params[3])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ldb, cleanup := newLDBStore(t)
|
|
||||||
defer cleanup()
|
|
||||||
ldb.setCapacity(uint64(capacity))
|
|
||||||
|
|
||||||
// put capacity count number of chunks
|
|
||||||
chunks := make([]Chunk, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
c := GenerateRandomChunk(chunk.DefaultSize)
|
|
||||||
chunks[i] = c
|
|
||||||
log.Trace("generate random chunk", "idx", i, "chunk", c)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
err := ldb.Put(context.TODO(), chunks[i])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
waitGc(ldb)
|
|
||||||
|
|
||||||
// delete all chunks
|
|
||||||
// (only count the ones actually deleted, the rest will have been gc'd)
|
|
||||||
deletes := 0
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
if ldb.Delete(chunks[i].Address()) == nil {
|
|
||||||
deletes++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
||||||
|
|
||||||
if ldb.entryCnt != 0 {
|
|
||||||
t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
|
|
||||||
expAccessCnt := uint64(n)
|
|
||||||
if ldb.accessCnt != expAccessCnt {
|
|
||||||
t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// retrieve the gc round target count for the db capacity
|
|
||||||
ldb.startGC(capacity)
|
|
||||||
roundTarget := ldb.gc.target
|
|
||||||
|
|
||||||
remaining := n
|
|
||||||
var puts int
|
|
||||||
for remaining > 0 {
|
|
||||||
var putCount int
|
|
||||||
if remaining < roundTarget {
|
|
||||||
putCount = remaining
|
|
||||||
} else {
|
|
||||||
putCount = roundTarget
|
|
||||||
}
|
|
||||||
remaining -= putCount
|
|
||||||
for putCount > 0 {
|
|
||||||
ldb.Put(context.TODO(), chunks[puts])
|
|
||||||
ldb.lock.RLock()
|
|
||||||
log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
|
|
||||||
ldb.lock.RUnlock()
|
|
||||||
puts++
|
|
||||||
putCount--
|
|
||||||
}
|
|
||||||
|
|
||||||
waitGc(ldb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// expect first surplus chunks to be missing, because they have the smallest access value
|
|
||||||
expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
|
|
||||||
for i := 0; i < expectMissing; i++ {
|
|
||||||
_, err := ldb.Get(context.TODO(), chunks[i].Address())
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// expect last chunks to be present, as they have the largest access value
|
|
||||||
for i := expectMissing; i < n; i++ {
|
|
||||||
ret, err := ldb.Get(context.TODO(), chunks[i].Address())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("chunk %v: expected no error, but got %s", i, err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(ret.Data(), chunks[i].Data()) {
|
|
||||||
t.Fatal("expected to get the same data back, but got smth else")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
|
|
||||||
func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
|
|
||||||
|
|
||||||
capacity := defaultMaxGCRound / 100 * 2
|
|
||||||
n := capacity - 1
|
|
||||||
|
|
||||||
ldb, cleanup := newLDBStore(t)
|
|
||||||
ldb.setCapacity(uint64(capacity))
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
chunks, err := mputRandomChunks(ldb, n)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
||||||
|
|
||||||
// set first added capacity/2 chunks to highest accesscount
|
|
||||||
for i := 0; i < capacity/2; i++ {
|
|
||||||
_, err := ldb.Get(context.TODO(), chunks[i].Address())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err = mputRandomChunks(ldb, 2)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for garbage collection to kick in on the responsible actor
|
|
||||||
waitGc(ldb)
|
|
||||||
|
|
||||||
var missing int
|
|
||||||
for i, ch := range chunks[2 : capacity/2] {
|
|
||||||
ret, err := ldb.Get(context.TODO(), ch.Address())
|
|
||||||
if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
|
|
||||||
t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(ret.Data(), ch.Data()) {
|
|
||||||
t.Fatal("expected to get the same data back, but got smth else")
|
|
||||||
}
|
|
||||||
log.Trace("got back chunk", "chunk", ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCleanIndex(t *testing.T) {
|
|
||||||
if testutil.RaceEnabled {
|
|
||||||
t.Skip("disabled because it times out with race detector")
|
|
||||||
}
|
|
||||||
|
|
||||||
capacity := 5000
|
|
||||||
n := 3
|
|
||||||
|
|
||||||
ldb, cleanup := newLDBStore(t)
|
|
||||||
ldb.setCapacity(uint64(capacity))
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
chunks, err := mputRandomChunks(ldb, n)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove the data of the first chunk
|
|
||||||
po := ldb.po(chunks[0].Address()[:])
|
|
||||||
dataKey := make([]byte, 10)
|
|
||||||
dataKey[0] = keyData
|
|
||||||
dataKey[1] = byte(po)
|
|
||||||
// dataKey[2:10] = first chunk has storageIdx 0 on [2:10]
|
|
||||||
if _, err := ldb.db.Get(dataKey); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := ldb.db.Delete(dataKey); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove the gc index row for the first chunk
|
|
||||||
gcFirstCorrectKey := make([]byte, 9)
|
|
||||||
gcFirstCorrectKey[0] = keyGCIdx
|
|
||||||
if err := ldb.db.Delete(gcFirstCorrectKey); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// warp the gc data of the second chunk
|
|
||||||
// this data should be correct again after the clean
|
|
||||||
gcSecondCorrectKey := make([]byte, 9)
|
|
||||||
gcSecondCorrectKey[0] = keyGCIdx
|
|
||||||
binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1))
|
|
||||||
gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1)
|
|
||||||
copy(warpedGCVal[1:], gcSecondCorrectVal)
|
|
||||||
if err := ldb.db.Delete(gcSecondCorrectKey); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ldb.CleanGCIndex(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// the index without corresponding data should have been deleted
|
|
||||||
idxKey := make([]byte, 33)
|
|
||||||
idxKey[0] = keyIndex
|
|
||||||
copy(idxKey[1:], chunks[0].Address())
|
|
||||||
if _, err := ldb.db.Get(idxKey); err == nil {
|
|
||||||
t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// the two other indices should be present
|
|
||||||
copy(idxKey[1:], chunks[1].Address())
|
|
||||||
if _, err := ldb.db.Get(idxKey); err != nil {
|
|
||||||
t.Fatalf("expected chunk 1 idx to be present: %v", idxKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(idxKey[1:], chunks[2].Address())
|
|
||||||
if _, err := ldb.db.Get(idxKey); err != nil {
|
|
||||||
t.Fatalf("expected chunk 2 idx to be present: %v", idxKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// first gc index should still be gone
|
|
||||||
if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil {
|
|
||||||
t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// second gc index should still be fixed
|
|
||||||
if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
|
|
||||||
t.Fatalf("expected gc 1 idx to be present: %v", idxKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// third gc index should be unchanged
|
|
||||||
binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2))
|
|
||||||
if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
|
|
||||||
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := ldb.db.Get(keyEntryCnt)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// entrycount should now be one less
|
|
||||||
entryCount := binary.BigEndian.Uint64(c)
|
|
||||||
if entryCount != 2 {
|
|
||||||
t.Fatalf("expected entrycnt to be 2, was %d", c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// the chunks might accidentally be in the same bin
|
|
||||||
// if so that bin counter will now be 2 - the highest added index.
|
|
||||||
// if not, the total of them will be 3
|
|
||||||
poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())}
|
|
||||||
if poBins[0] == poBins[1] {
|
|
||||||
poBins = poBins[:1]
|
|
||||||
}
|
|
||||||
|
|
||||||
var binTotal uint64
|
|
||||||
var currentBin [2]byte
|
|
||||||
currentBin[0] = keyDistanceCnt
|
|
||||||
if len(poBins) == 1 {
|
|
||||||
currentBin[1] = poBins[0]
|
|
||||||
c, err := ldb.db.Get(currentBin[:])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
||||||
}
|
|
||||||
binCount := binary.BigEndian.Uint64(c)
|
|
||||||
if binCount != 2 {
|
|
||||||
t.Fatalf("expected entrycnt to be 2, was %d", binCount)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for _, bin := range poBins {
|
|
||||||
currentBin[1] = bin
|
|
||||||
c, err := ldb.db.Get(currentBin[:])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
||||||
}
|
|
||||||
binCount := binary.BigEndian.Uint64(c)
|
|
||||||
binTotal += binCount
|
|
||||||
|
|
||||||
}
|
|
||||||
if binTotal != 3 {
|
|
||||||
t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that the iterator quits properly
|
|
||||||
chunks, err = mputRandomChunks(ldb, 4100)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
po = ldb.po(chunks[4099].Address()[:])
|
|
||||||
dataKey = make([]byte, 10)
|
|
||||||
dataKey[0] = keyData
|
|
||||||
dataKey[1] = byte(po)
|
|
||||||
binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
|
|
||||||
if _, err := ldb.db.Get(dataKey); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := ldb.db.Delete(dataKey); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ldb.CleanGCIndex(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// entrycount should now be one less of added chunks
|
|
||||||
c, err = ldb.db.Get(keyEntryCnt)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
|
|
||||||
}
|
|
||||||
entryCount = binary.BigEndian.Uint64(c)
|
|
||||||
if entryCount != 4099+2 {
|
|
||||||
t.Fatalf("expected entrycnt to be 2, was %d", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: waitGc does not guarantee that we wait 1 GC round; it only
|
|
||||||
// guarantees that if the GC is running we wait for that run to finish
|
|
||||||
// ticket: https://github.com/ethersphere/go-ethereum/issues/1151
|
|
||||||
func waitGc(ldb *LDBStore) {
|
|
||||||
<-ldb.gc.runC
|
|
||||||
ldb.gc.runC <- struct{}{}
|
|
||||||
}
|
|
@ -1,251 +0,0 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LocalStoreParams struct {
|
|
||||||
*StoreParams
|
|
||||||
ChunkDbPath string
|
|
||||||
Validators []ChunkValidator `toml:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDefaultLocalStoreParams() *LocalStoreParams {
|
|
||||||
return &LocalStoreParams{
|
|
||||||
StoreParams: NewDefaultStoreParams(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//this can only finally be set after all config options (file, cmd line, env vars)
|
|
||||||
//have been evaluated
|
|
||||||
func (p *LocalStoreParams) Init(path string) {
|
|
||||||
if p.ChunkDbPath == "" {
|
|
||||||
p.ChunkDbPath = filepath.Join(path, "chunks")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LocalStore is a combination of inmemory db over a disk persisted db
|
|
||||||
// implements a Get/Put with fallback (caching) logic using any 2 ChunkStores
|
|
||||||
type LocalStore struct {
|
|
||||||
Validators []ChunkValidator
|
|
||||||
memStore *MemStore
|
|
||||||
DbStore *LDBStore
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// This constructor uses MemStore and DbStore as components
|
|
||||||
func NewLocalStore(params *LocalStoreParams, mockStore *mock.NodeStore) (*LocalStore, error) {
|
|
||||||
ldbparams := NewLDBStoreParams(params.StoreParams, params.ChunkDbPath)
|
|
||||||
dbStore, err := NewMockDbStore(ldbparams, mockStore)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &LocalStore{
|
|
||||||
memStore: NewMemStore(params.StoreParams, dbStore),
|
|
||||||
DbStore: dbStore,
|
|
||||||
Validators: params.Validators,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error) {
|
|
||||||
ldbparams := NewLDBStoreParams(params.StoreParams, params.ChunkDbPath)
|
|
||||||
dbStore, err := NewLDBStore(ldbparams)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
localStore := &LocalStore{
|
|
||||||
memStore: NewMemStore(params.StoreParams, dbStore),
|
|
||||||
DbStore: dbStore,
|
|
||||||
Validators: params.Validators,
|
|
||||||
}
|
|
||||||
return localStore, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isValid returns true if chunk passes any of the LocalStore Validators.
|
|
||||||
// isValid also returns true if LocalStore has no Validators.
|
|
||||||
func (ls *LocalStore) isValid(chunk Chunk) bool {
|
|
||||||
// by default chunks are valid. if we have 0 validators, then all chunks are valid.
|
|
||||||
valid := true
|
|
||||||
|
|
||||||
// ls.Validators contains a list of one validator per chunk type.
|
|
||||||
// if one validator succeeds, then the chunk is valid
|
|
||||||
for _, v := range ls.Validators {
|
|
||||||
if valid = v.Validate(chunk); valid {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return valid
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put is responsible for doing validation and storage of the chunk
|
|
||||||
// by using configured ChunkValidators, MemStore and LDBStore.
|
|
||||||
// If the chunk is not valid, its GetErrored function will
|
|
||||||
// return ErrChunkInvalid.
|
|
||||||
// This method will check if the chunk is already in the MemStore
|
|
||||||
// and it will return it if it is. If there is an error from
|
|
||||||
// the MemStore.Get, it will be returned by calling GetErrored
|
|
||||||
// on the chunk.
|
|
||||||
// This method is responsible for closing Chunk.ReqC channel
|
|
||||||
// when the chunk is stored in memstore.
|
|
||||||
// After the LDBStore.Put, it is ensured that the MemStore
|
|
||||||
// contains the chunk with the same data, but nil ReqC channel.
|
|
||||||
func (ls *LocalStore) Put(ctx context.Context, chunk Chunk) error {
|
|
||||||
if !ls.isValid(chunk) {
|
|
||||||
return ErrChunkInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Trace("localstore.put", "key", chunk.Address())
|
|
||||||
ls.mu.Lock()
|
|
||||||
defer ls.mu.Unlock()
|
|
||||||
|
|
||||||
_, err := ls.memStore.Get(ctx, chunk.Address())
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err != nil && err != ErrChunkNotFound {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ls.memStore.Put(ctx, chunk)
|
|
||||||
err = ls.DbStore.Put(ctx, chunk)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has queries the underlying DbStore if a chunk with the given address
|
|
||||||
// is being stored there.
|
|
||||||
// Returns true if it is stored, false if not
|
|
||||||
func (ls *LocalStore) Has(ctx context.Context, addr Address) bool {
|
|
||||||
return ls.DbStore.Has(ctx, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get(chunk *Chunk) looks up a chunk in the local stores
|
|
||||||
// This method is blocking until the chunk is retrieved
|
|
||||||
// so additional timeout may be needed to wrap this call if
|
|
||||||
// ChunkStores are remote and can have long latency
|
|
||||||
func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk Chunk, err error) {
|
|
||||||
ls.mu.Lock()
|
|
||||||
defer ls.mu.Unlock()
|
|
||||||
|
|
||||||
return ls.get(ctx, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk Chunk, err error) {
|
|
||||||
chunk, err = ls.memStore.Get(ctx, addr)
|
|
||||||
|
|
||||||
if err != nil && err != ErrChunkNotFound {
|
|
||||||
metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
metrics.GetOrRegisterCounter("localstore.get.cachehit", nil).Inc(1)
|
|
||||||
go ls.DbStore.MarkAccessed(addr)
|
|
||||||
return chunk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics.GetOrRegisterCounter("localstore.get.cachemiss", nil).Inc(1)
|
|
||||||
chunk, err = ls.DbStore.Get(ctx, addr)
|
|
||||||
if err != nil {
|
|
||||||
metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ls.memStore.Put(ctx, chunk)
|
|
||||||
return chunk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ls *LocalStore) FetchFunc(ctx context.Context, addr Address) func(context.Context) error {
|
|
||||||
ls.mu.Lock()
|
|
||||||
defer ls.mu.Unlock()
|
|
||||||
|
|
||||||
_, err := ls.get(ctx, addr)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return func(context.Context) error {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ls *LocalStore) BinIndex(po uint8) uint64 {
|
|
||||||
return ls.DbStore.BinIndex(po)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ls *LocalStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error {
|
|
||||||
return ls.DbStore.SyncIterator(from, to, po, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the local store
|
|
||||||
func (ls *LocalStore) Close() {
|
|
||||||
ls.DbStore.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Migrate checks the datastore schema vs the runtime schema and runs
|
|
||||||
// migrations if they don't match
|
|
||||||
func (ls *LocalStore) Migrate() error {
|
|
||||||
actualDbSchema, err := ls.DbStore.GetSchema()
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if actualDbSchema == CurrentDbSchema {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug("running migrations for", "schema", actualDbSchema, "runtime-schema", CurrentDbSchema)
|
|
||||||
|
|
||||||
if actualDbSchema == DbSchemaNone {
|
|
||||||
ls.migrateFromNoneToPurity()
|
|
||||||
actualDbSchema = DbSchemaPurity
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ls.DbStore.PutSchema(actualDbSchema); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if actualDbSchema == DbSchemaPurity {
|
|
||||||
if err := ls.migrateFromPurityToHalloween(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
actualDbSchema = DbSchemaHalloween
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ls.DbStore.PutSchema(actualDbSchema); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ls *LocalStore) migrateFromNoneToPurity() {
|
|
||||||
// delete chunks that are not valid, i.e. chunks that do not pass
|
|
||||||
// any of the ls.Validators
|
|
||||||
ls.DbStore.Cleanup(func(c Chunk) bool {
|
|
||||||
return !ls.isValid(c)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ls *LocalStore) migrateFromPurityToHalloween() error {
|
|
||||||
return ls.DbStore.CleanGCIndex()
|
|
||||||
}
|
|
204
swarm/storage/localstore/export.go
Normal file
204
swarm/storage/localstore/export.go
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package localstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/shed"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// filename in tar archive that holds the information
|
||||||
|
// about exported data format version
|
||||||
|
exportVersionFilename = ".swarm-export-version"
|
||||||
|
// legacy version for previous LDBStore
|
||||||
|
legacyExportVersion = "1"
|
||||||
|
// current export format version
|
||||||
|
currentExportVersion = "2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Export writes a tar structured data to the writer of
|
||||||
|
// all chunks in the retrieval data index. It returns the
|
||||||
|
// number of chunks exported.
|
||||||
|
func (db *DB) Export(w io.Writer) (count int64, err error) {
|
||||||
|
tw := tar.NewWriter(w)
|
||||||
|
defer tw.Close()
|
||||||
|
|
||||||
|
if err := tw.WriteHeader(&tar.Header{
|
||||||
|
Name: exportVersionFilename,
|
||||||
|
Mode: 0644,
|
||||||
|
Size: int64(len(currentExportVersion)),
|
||||||
|
}); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write([]byte(currentExportVersion)); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = db.retrievalDataIndex.Iterate(func(item shed.Item) (stop bool, err error) {
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: hex.EncodeToString(item.Address),
|
||||||
|
Mode: 0644,
|
||||||
|
Size: int64(len(item.Data)),
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write(item.Data); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
return false, nil
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Import reads a tar structured data from the reader and
|
||||||
|
// stores chunks in the database. It returns the number of
|
||||||
|
// chunks imported.
|
||||||
|
func (db *DB) Import(r io.Reader, legacy bool) (count int64, err error) {
|
||||||
|
tr := tar.NewReader(r)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
errC := make(chan error)
|
||||||
|
doneC := make(chan struct{})
|
||||||
|
tokenPool := make(chan struct{}, 100)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
go func() {
|
||||||
|
var (
|
||||||
|
firstFile = true
|
||||||
|
// if exportVersionFilename file is not present
|
||||||
|
// assume legacy version
|
||||||
|
version = legacyExportVersion
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case errC <- err:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if firstFile {
|
||||||
|
firstFile = false
|
||||||
|
if hdr.Name == exportVersionFilename {
|
||||||
|
data, err := ioutil.ReadAll(tr)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case errC <- err:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
version = string(data)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(hdr.Name) != 64 {
|
||||||
|
log.Warn("ignoring non-chunk file", "name", hdr.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
keybytes, err := hex.DecodeString(hdr.Name)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(tr)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case errC <- err:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
key := chunk.Address(keybytes)
|
||||||
|
|
||||||
|
var ch chunk.Chunk
|
||||||
|
switch version {
|
||||||
|
case legacyExportVersion:
|
||||||
|
// LDBStore Export exported chunk data prefixed with the chunk key.
|
||||||
|
// That is not necessary, as the key is in the chunk filename,
|
||||||
|
// but backward compatibility needs to be preserved.
|
||||||
|
ch = chunk.NewChunk(key, data[32:])
|
||||||
|
case currentExportVersion:
|
||||||
|
ch = chunk.NewChunk(key, data)
|
||||||
|
default:
|
||||||
|
select {
|
||||||
|
case errC <- fmt.Errorf("unsupported export data version %q", version):
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tokenPool <- struct{}{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := db.Put(ctx, chunk.ModePutUpload, ch)
|
||||||
|
select {
|
||||||
|
case errC <- err:
|
||||||
|
case <-ctx.Done():
|
||||||
|
wg.Done()
|
||||||
|
<-tokenPool
|
||||||
|
default:
|
||||||
|
_, err := db.Put(ctx, chunk.ModePutUpload, ch)
|
||||||
|
if err != nil {
|
||||||
|
errC <- err
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
<-tokenPool
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
close(doneC)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// wait for all chunks to be stored
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case err := <-errC:
|
||||||
|
if err != nil {
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return count, ctx.Err()
|
||||||
|
default:
|
||||||
|
select {
|
||||||
|
case <-doneC:
|
||||||
|
return count, nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
80
swarm/storage/localstore/export_test.go
Normal file
80
swarm/storage/localstore/export_test.go
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package localstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestExportImport constructs two databases, one to put and export
|
||||||
|
// chunks and another one to import and validate that all chunks are
|
||||||
|
// imported.
|
||||||
|
func TestExportImport(t *testing.T) {
|
||||||
|
db1, cleanup1 := newTestDB(t, nil)
|
||||||
|
defer cleanup1()
|
||||||
|
|
||||||
|
var chunkCount = 100
|
||||||
|
|
||||||
|
chunks := make(map[string][]byte, chunkCount)
|
||||||
|
for i := 0; i < chunkCount; i++ {
|
||||||
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
|
_, err := db1.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
chunks[string(ch.Address())] = ch.Data()
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
c, err := db1.Export(&buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
wantChunksCount := int64(len(chunks))
|
||||||
|
if c != wantChunksCount {
|
||||||
|
t.Errorf("got export count %v, want %v", c, wantChunksCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
db2, cleanup2 := newTestDB(t, nil)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
c, err = db2.Import(&buf, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if c != wantChunksCount {
|
||||||
|
t.Errorf("got import count %v, want %v", c, wantChunksCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
for a, want := range chunks {
|
||||||
|
addr := chunk.Address([]byte(a))
|
||||||
|
ch, err := db2.Get(context.Background(), chunk.ModeGetRequest, addr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
got := ch.Data()
|
||||||
|
if !bytes.Equal(got, want) {
|
||||||
|
t.Fatalf("chunk %s: got data %x, want %x", addr.Hex(), got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -17,7 +17,10 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/shed"
|
"github.com/ethereum/go-ethereum/swarm/shed"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
)
|
)
|
||||||
@ -75,6 +78,15 @@ func (db *DB) collectGarbageWorker() {
|
|||||||
// the rest of the garbage as the batch size limit is reached.
|
// the rest of the garbage as the batch size limit is reached.
|
||||||
// This function is called in collectGarbageWorker.
|
// This function is called in collectGarbageWorker.
|
||||||
func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
|
func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
|
||||||
|
metricName := "localstore.gc"
|
||||||
|
metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
|
||||||
|
defer totalTimeMetric(metricName, time.Now())
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
target := db.gcTarget()
|
target := db.gcTarget()
|
||||||
|
|
||||||
@ -86,12 +98,17 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, true, err
|
return 0, true, err
|
||||||
}
|
}
|
||||||
|
metrics.GetOrRegisterGauge(metricName+".gcsize", nil).Update(int64(gcSize))
|
||||||
|
|
||||||
done = true
|
done = true
|
||||||
err = db.gcIndex.Iterate(func(item shed.Item) (stop bool, err error) {
|
err = db.gcIndex.Iterate(func(item shed.Item) (stop bool, err error) {
|
||||||
if gcSize-collectedCount <= target {
|
if gcSize-collectedCount <= target {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.GetOrRegisterGauge(metricName+".storets", nil).Update(item.StoreTimestamp)
|
||||||
|
metrics.GetOrRegisterGauge(metricName+".accessts", nil).Update(item.AccessTimestamp)
|
||||||
|
|
||||||
// delete from retrieve, pull, gc
|
// delete from retrieve, pull, gc
|
||||||
db.retrievalDataIndex.DeleteInBatch(batch, item)
|
db.retrievalDataIndex.DeleteInBatch(batch, item)
|
||||||
db.retrievalAccessIndex.DeleteInBatch(batch, item)
|
db.retrievalAccessIndex.DeleteInBatch(batch, item)
|
||||||
@ -109,11 +126,13 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, err
|
return 0, false, err
|
||||||
}
|
}
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".collected-count", nil).Inc(int64(collectedCount))
|
||||||
|
|
||||||
db.gcSize.PutInBatch(batch, gcSize-collectedCount)
|
db.gcSize.PutInBatch(batch, gcSize-collectedCount)
|
||||||
|
|
||||||
err = db.shed.WriteBatch(batch)
|
err = db.shed.WriteBatch(batch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".writebatch.err", nil).Inc(1)
|
||||||
return 0, false, err
|
return 0, false, err
|
||||||
}
|
}
|
||||||
return collectedCount, done, nil
|
return collectedCount, done, nil
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
@ -63,26 +64,23 @@ func testDB_collectGarbageWorker(t *testing.T) {
|
|||||||
})()
|
})()
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
syncer := db.NewSetter(ModeSetSync)
|
|
||||||
|
|
||||||
addrs := make([]chunk.Address, 0)
|
addrs := make([]chunk.Address, 0)
|
||||||
|
|
||||||
// upload random chunks
|
// upload random chunks
|
||||||
for i := 0; i < chunkCount; i++ {
|
for i := 0; i < chunkCount; i++ {
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = syncer.Set(chunk.Address())
|
err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
addrs = append(addrs, chunk.Address())
|
addrs = append(addrs, ch.Address())
|
||||||
}
|
}
|
||||||
|
|
||||||
gcTarget := db.gcTarget()
|
gcTarget := db.gcTarget()
|
||||||
@ -110,7 +108,7 @@ func testDB_collectGarbageWorker(t *testing.T) {
|
|||||||
|
|
||||||
// the first synced chunk should be removed
|
// the first synced chunk should be removed
|
||||||
t.Run("get the first synced chunk", func(t *testing.T) {
|
t.Run("get the first synced chunk", func(t *testing.T) {
|
||||||
_, err := db.NewGetter(ModeGetRequest).Get(addrs[0])
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[0])
|
||||||
if err != chunk.ErrChunkNotFound {
|
if err != chunk.ErrChunkNotFound {
|
||||||
t.Errorf("got error %v, want %v", err, chunk.ErrChunkNotFound)
|
t.Errorf("got error %v, want %v", err, chunk.ErrChunkNotFound)
|
||||||
}
|
}
|
||||||
@ -118,7 +116,7 @@ func testDB_collectGarbageWorker(t *testing.T) {
|
|||||||
|
|
||||||
// last synced chunk should not be removed
|
// last synced chunk should not be removed
|
||||||
t.Run("get most recent synced chunk", func(t *testing.T) {
|
t.Run("get most recent synced chunk", func(t *testing.T) {
|
||||||
_, err := db.NewGetter(ModeGetRequest).Get(addrs[len(addrs)-1])
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[len(addrs)-1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -134,9 +132,6 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
|
|||||||
})
|
})
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
syncer := db.NewSetter(ModeSetSync)
|
|
||||||
|
|
||||||
testHookCollectGarbageChan := make(chan uint64)
|
testHookCollectGarbageChan := make(chan uint64)
|
||||||
defer setTestHookCollectGarbage(func(collectedCount uint64) {
|
defer setTestHookCollectGarbage(func(collectedCount uint64) {
|
||||||
testHookCollectGarbageChan <- collectedCount
|
testHookCollectGarbageChan <- collectedCount
|
||||||
@ -146,19 +141,19 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
|
|||||||
|
|
||||||
// upload random chunks just up to the capacity
|
// upload random chunks just up to the capacity
|
||||||
for i := 0; i < int(db.capacity)-1; i++ {
|
for i := 0; i < int(db.capacity)-1; i++ {
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = syncer.Set(chunk.Address())
|
err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
addrs = append(addrs, chunk.Address())
|
addrs = append(addrs, ch.Address())
|
||||||
}
|
}
|
||||||
|
|
||||||
// set update gc test hook to signal when
|
// set update gc test hook to signal when
|
||||||
@ -172,7 +167,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
|
|||||||
// request the latest synced chunk
|
// request the latest synced chunk
|
||||||
// to prioritize it in the gc index
|
// to prioritize it in the gc index
|
||||||
// not to be collected
|
// not to be collected
|
||||||
_, err := db.NewGetter(ModeGetRequest).Get(addrs[0])
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -191,11 +186,11 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
|
|||||||
// upload and sync another chunk to trigger
|
// upload and sync another chunk to trigger
|
||||||
// garbage collection
|
// garbage collection
|
||||||
ch := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
err = uploader.Put(ch)
|
_, err = db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = syncer.Set(ch.Address())
|
err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -235,7 +230,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
|
|||||||
|
|
||||||
// requested chunk should not be removed
|
// requested chunk should not be removed
|
||||||
t.Run("get requested chunk", func(t *testing.T) {
|
t.Run("get requested chunk", func(t *testing.T) {
|
||||||
_, err := db.NewGetter(ModeGetRequest).Get(addrs[0])
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -243,7 +238,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
|
|||||||
|
|
||||||
// the second synced chunk should be removed
|
// the second synced chunk should be removed
|
||||||
t.Run("get gc-ed chunk", func(t *testing.T) {
|
t.Run("get gc-ed chunk", func(t *testing.T) {
|
||||||
_, err := db.NewGetter(ModeGetRequest).Get(addrs[1])
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[1])
|
||||||
if err != chunk.ErrChunkNotFound {
|
if err != chunk.ErrChunkNotFound {
|
||||||
t.Errorf("got error %v, want %v", err, chunk.ErrChunkNotFound)
|
t.Errorf("got error %v, want %v", err, chunk.ErrChunkNotFound)
|
||||||
}
|
}
|
||||||
@ -251,7 +246,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
|
|||||||
|
|
||||||
// last synced chunk should not be removed
|
// last synced chunk should not be removed
|
||||||
t.Run("get most recent synced chunk", func(t *testing.T) {
|
t.Run("get most recent synced chunk", func(t *testing.T) {
|
||||||
_, err := db.NewGetter(ModeGetRequest).Get(addrs[len(addrs)-1])
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[len(addrs)-1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -275,20 +270,17 @@ func TestDB_gcSize(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
syncer := db.NewSetter(ModeSetSync)
|
|
||||||
|
|
||||||
count := 100
|
count := 100
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = syncer.Set(chunk.Address())
|
err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ package localstore
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -35,29 +36,22 @@ func TestDB_pullIndex(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
|
|
||||||
chunkCount := 50
|
chunkCount := 50
|
||||||
|
|
||||||
chunks := make([]testIndexChunk, chunkCount)
|
chunks := make([]testIndexChunk, chunkCount)
|
||||||
|
|
||||||
// upload random chunks
|
// upload random chunks
|
||||||
for i := 0; i < chunkCount; i++ {
|
for i := 0; i < chunkCount; i++ {
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunks[i] = testIndexChunk{
|
chunks[i] = testIndexChunk{
|
||||||
Chunk: chunk,
|
Chunk: ch,
|
||||||
// this timestamp is not the same as in
|
binID: uint64(i),
|
||||||
// the index, but given that uploads
|
|
||||||
// are sequential and that only ordering
|
|
||||||
// of events matter, this information is
|
|
||||||
// sufficient
|
|
||||||
storeTimestamp: now(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,10 +64,10 @@ func TestDB_pullIndex(t *testing.T) {
|
|||||||
if poi > poj {
|
if poi > poj {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if chunks[i].storeTimestamp < chunks[j].storeTimestamp {
|
if chunks[i].binID < chunks[j].binID {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if chunks[i].storeTimestamp > chunks[j].storeTimestamp {
|
if chunks[i].binID > chunks[j].binID {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return bytes.Compare(chunks[i].Address(), chunks[j].Address()) == -1
|
return bytes.Compare(chunks[i].Address(), chunks[j].Address()) == -1
|
||||||
@ -87,23 +81,21 @@ func TestDB_gcIndex(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
|
|
||||||
chunkCount := 50
|
chunkCount := 50
|
||||||
|
|
||||||
chunks := make([]testIndexChunk, chunkCount)
|
chunks := make([]testIndexChunk, chunkCount)
|
||||||
|
|
||||||
// upload random chunks
|
// upload random chunks
|
||||||
for i := 0; i < chunkCount; i++ {
|
for i := 0; i < chunkCount; i++ {
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunks[i] = testIndexChunk{
|
chunks[i] = testIndexChunk{
|
||||||
Chunk: chunk,
|
Chunk: ch,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,9 +115,9 @@ func TestDB_gcIndex(t *testing.T) {
|
|||||||
})()
|
})()
|
||||||
|
|
||||||
t.Run("request unsynced", func(t *testing.T) {
|
t.Run("request unsynced", func(t *testing.T) {
|
||||||
chunk := chunks[1]
|
ch := chunks[1]
|
||||||
|
|
||||||
_, err := db.NewGetter(ModeGetRequest).Get(chunk.Address())
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -140,9 +132,9 @@ func TestDB_gcIndex(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("sync one chunk", func(t *testing.T) {
|
t.Run("sync one chunk", func(t *testing.T) {
|
||||||
chunk := chunks[0]
|
ch := chunks[0]
|
||||||
|
|
||||||
err := db.NewSetter(ModeSetSync).Set(chunk.Address())
|
err := db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -154,10 +146,8 @@ func TestDB_gcIndex(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("sync all chunks", func(t *testing.T) {
|
t.Run("sync all chunks", func(t *testing.T) {
|
||||||
setter := db.NewSetter(ModeSetSync)
|
|
||||||
|
|
||||||
for i := range chunks {
|
for i := range chunks {
|
||||||
err := setter.Set(chunks[i].Address())
|
err := db.Set(context.Background(), chunk.ModeSetSync, chunks[i].Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -171,7 +161,7 @@ func TestDB_gcIndex(t *testing.T) {
|
|||||||
t.Run("request one chunk", func(t *testing.T) {
|
t.Run("request one chunk", func(t *testing.T) {
|
||||||
i := 6
|
i := 6
|
||||||
|
|
||||||
_, err := db.NewGetter(ModeGetRequest).Get(chunks[i].Address())
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, chunks[i].Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -189,14 +179,13 @@ func TestDB_gcIndex(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("random chunk request", func(t *testing.T) {
|
t.Run("random chunk request", func(t *testing.T) {
|
||||||
requester := db.NewGetter(ModeGetRequest)
|
|
||||||
|
|
||||||
rand.Shuffle(len(chunks), func(i, j int) {
|
rand.Shuffle(len(chunks), func(i, j int) {
|
||||||
chunks[i], chunks[j] = chunks[j], chunks[i]
|
chunks[i], chunks[j] = chunks[j], chunks[i]
|
||||||
})
|
})
|
||||||
|
|
||||||
for _, chunk := range chunks {
|
for _, ch := range chunks {
|
||||||
_, err := requester.Get(chunk.Address())
|
_, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -212,7 +201,7 @@ func TestDB_gcIndex(t *testing.T) {
|
|||||||
t.Run("remove one chunk", func(t *testing.T) {
|
t.Run("remove one chunk", func(t *testing.T) {
|
||||||
i := 3
|
i := 3
|
||||||
|
|
||||||
err := db.NewSetter(modeSetRemove).Set(chunks[i].Address())
|
err := db.Set(context.Background(), chunk.ModeSetRemove, chunks[i].Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -23,11 +23,15 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/shed"
|
"github.com/ethereum/go-ethereum/swarm/shed"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DB implements chunk.Store.
|
||||||
|
var _ chunk.Store = &DB{}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrInvalidMode is retuned when an unknown Mode
|
// ErrInvalidMode is retuned when an unknown Mode
|
||||||
// is provided to the function.
|
// is provided to the function.
|
||||||
@ -69,6 +73,10 @@ type DB struct {
|
|||||||
pullTriggers map[uint8][]chan struct{}
|
pullTriggers map[uint8][]chan struct{}
|
||||||
pullTriggersMu sync.RWMutex
|
pullTriggersMu sync.RWMutex
|
||||||
|
|
||||||
|
// binIDs stores the latest chunk serial ID for every
|
||||||
|
// proximity order bin
|
||||||
|
binIDs shed.Uint64Vector
|
||||||
|
|
||||||
// garbage collection index
|
// garbage collection index
|
||||||
gcIndex shed.Index
|
gcIndex shed.Index
|
||||||
|
|
||||||
@ -124,7 +132,10 @@ type Options struct {
|
|||||||
// One goroutine for writing batches is created.
|
// One goroutine for writing batches is created.
|
||||||
func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
|
func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
|
||||||
if o == nil {
|
if o == nil {
|
||||||
o = new(Options)
|
// default options
|
||||||
|
o = &Options{
|
||||||
|
Capacity: 5000000,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
db = &DB{
|
db = &DB{
|
||||||
capacity: o.Capacity,
|
capacity: o.Capacity,
|
||||||
@ -148,11 +159,23 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Identify current storage schema by arbitrary name.
|
// Identify current storage schema by arbitrary name.
|
||||||
db.schemaName, err = db.shed.NewStringField("schema-name")
|
db.schemaName, err = db.shed.NewStringField("schema-name")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
schemaName, err := db.schemaName.Get()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if schemaName == "" {
|
||||||
|
// initial new localstore run
|
||||||
|
err := db.schemaName.Put(DbSchemaSanctuary)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
// Persist gc size.
|
// Persist gc size.
|
||||||
db.gcSize, err = db.shed.NewUint64Field("gc-size")
|
db.gcSize, err = db.shed.NewUint64Field("gc-size")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -165,8 +188,9 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
|
|||||||
)
|
)
|
||||||
if o.MockStore != nil {
|
if o.MockStore != nil {
|
||||||
encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
|
encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
|
||||||
b := make([]byte, 8)
|
b := make([]byte, 16)
|
||||||
binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
|
binary.BigEndian.PutUint64(b[:8], fields.BinID)
|
||||||
|
binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
|
||||||
err = o.MockStore.Put(fields.Address, fields.Data)
|
err = o.MockStore.Put(fields.Address, fields.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -174,25 +198,28 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
|
|||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
|
decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
|
||||||
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
|
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
|
||||||
|
e.BinID = binary.BigEndian.Uint64(value[:8])
|
||||||
e.Data, err = o.MockStore.Get(keyItem.Address)
|
e.Data, err = o.MockStore.Get(keyItem.Address)
|
||||||
return e, err
|
return e, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
|
encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
|
||||||
b := make([]byte, 8)
|
b := make([]byte, 16)
|
||||||
binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
|
binary.BigEndian.PutUint64(b[:8], fields.BinID)
|
||||||
|
binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
|
||||||
value = append(b, fields.Data...)
|
value = append(b, fields.Data...)
|
||||||
return value, nil
|
return value, nil
|
||||||
}
|
}
|
||||||
decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
|
decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
|
||||||
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
|
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
|
||||||
e.Data = value[8:]
|
e.BinID = binary.BigEndian.Uint64(value[:8])
|
||||||
|
e.Data = value[16:]
|
||||||
return e, nil
|
return e, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Index storing actual chunk address, data and store timestamp.
|
// Index storing actual chunk address, data and bin id.
|
||||||
db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|Data", shed.IndexFuncs{
|
db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|Data", shed.IndexFuncs{
|
||||||
EncodeKey: func(fields shed.Item) (key []byte, err error) {
|
EncodeKey: func(fields shed.Item) (key []byte, err error) {
|
||||||
return fields.Address, nil
|
return fields.Address, nil
|
||||||
},
|
},
|
||||||
@ -230,33 +257,37 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// pull index allows history and live syncing per po bin
|
// pull index allows history and live syncing per po bin
|
||||||
db.pullIndex, err = db.shed.NewIndex("PO|StoredTimestamp|Hash->nil", shed.IndexFuncs{
|
db.pullIndex, err = db.shed.NewIndex("PO|BinID->Hash", shed.IndexFuncs{
|
||||||
EncodeKey: func(fields shed.Item) (key []byte, err error) {
|
EncodeKey: func(fields shed.Item) (key []byte, err error) {
|
||||||
key = make([]byte, 41)
|
key = make([]byte, 41)
|
||||||
key[0] = db.po(fields.Address)
|
key[0] = db.po(fields.Address)
|
||||||
binary.BigEndian.PutUint64(key[1:9], uint64(fields.StoreTimestamp))
|
binary.BigEndian.PutUint64(key[1:9], fields.BinID)
|
||||||
copy(key[9:], fields.Address[:])
|
|
||||||
return key, nil
|
return key, nil
|
||||||
},
|
},
|
||||||
DecodeKey: func(key []byte) (e shed.Item, err error) {
|
DecodeKey: func(key []byte) (e shed.Item, err error) {
|
||||||
e.Address = key[9:]
|
e.BinID = binary.BigEndian.Uint64(key[1:9])
|
||||||
e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[1:9]))
|
|
||||||
return e, nil
|
return e, nil
|
||||||
},
|
},
|
||||||
EncodeValue: func(fields shed.Item) (value []byte, err error) {
|
EncodeValue: func(fields shed.Item) (value []byte, err error) {
|
||||||
return nil, nil
|
return fields.Address, nil
|
||||||
},
|
},
|
||||||
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
|
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
|
||||||
|
e.Address = value
|
||||||
return e, nil
|
return e, nil
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// create a vector for bin IDs
|
||||||
|
db.binIDs, err = db.shed.NewUint64Vector("bin-ids")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
// create a pull syncing triggers used by SubscribePull function
|
// create a pull syncing triggers used by SubscribePull function
|
||||||
db.pullTriggers = make(map[uint8][]chan struct{})
|
db.pullTriggers = make(map[uint8][]chan struct{})
|
||||||
// push index contains as yet unsynced chunks
|
// push index contains as yet unsynced chunks
|
||||||
db.pushIndex, err = db.shed.NewIndex("StoredTimestamp|Hash->nil", shed.IndexFuncs{
|
db.pushIndex, err = db.shed.NewIndex("StoreTimestamp|Hash->Tags", shed.IndexFuncs{
|
||||||
EncodeKey: func(fields shed.Item) (key []byte, err error) {
|
EncodeKey: func(fields shed.Item) (key []byte, err error) {
|
||||||
key = make([]byte, 40)
|
key = make([]byte, 40)
|
||||||
binary.BigEndian.PutUint64(key[:8], uint64(fields.StoreTimestamp))
|
binary.BigEndian.PutUint64(key[:8], uint64(fields.StoreTimestamp))
|
||||||
@ -281,17 +312,17 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
|
|||||||
// create a push syncing triggers used by SubscribePush function
|
// create a push syncing triggers used by SubscribePush function
|
||||||
db.pushTriggers = make([]chan struct{}, 0)
|
db.pushTriggers = make([]chan struct{}, 0)
|
||||||
// gc index for removable chunk ordered by ascending last access time
|
// gc index for removable chunk ordered by ascending last access time
|
||||||
db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|StoredTimestamp|Hash->nil", shed.IndexFuncs{
|
db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|BinID|Hash->nil", shed.IndexFuncs{
|
||||||
EncodeKey: func(fields shed.Item) (key []byte, err error) {
|
EncodeKey: func(fields shed.Item) (key []byte, err error) {
|
||||||
b := make([]byte, 16, 16+len(fields.Address))
|
b := make([]byte, 16, 16+len(fields.Address))
|
||||||
binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
|
binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
|
||||||
binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
|
binary.BigEndian.PutUint64(b[8:16], fields.BinID)
|
||||||
key = append(b, fields.Address...)
|
key = append(b, fields.Address...)
|
||||||
return key, nil
|
return key, nil
|
||||||
},
|
},
|
||||||
DecodeKey: func(key []byte) (e shed.Item, err error) {
|
DecodeKey: func(key []byte) (e shed.Item, err error) {
|
||||||
e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
|
e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
|
||||||
e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[8:16]))
|
e.BinID = binary.BigEndian.Uint64(key[8:16])
|
||||||
e.Address = key[16:]
|
e.Address = key[16:]
|
||||||
return e, nil
|
return e, nil
|
||||||
},
|
},
|
||||||
@ -358,3 +389,12 @@ func init() {
|
|||||||
return time.Now().UTC().UnixNano()
|
return time.Now().UTC().UnixNano()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// totalTimeMetric logs a message about time between provided start time
|
||||||
|
// and the time when the function is called and sends a resetting timer metric
|
||||||
|
// with provided name appended with ".total-time".
|
||||||
|
func totalTimeMetric(name string, start time.Time) {
|
||||||
|
totalTime := time.Since(start)
|
||||||
|
log.Trace(name+" total time", "time", totalTime)
|
||||||
|
metrics.GetOrRegisterResettingTimer(name+".total-time", nil).Update(totalTime)
|
||||||
|
}
|
||||||
|
@ -18,6 +18,7 @@ package localstore
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -59,23 +60,23 @@ func TestDB(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := db.NewPutter(ModePutUpload).Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
got, err := db.NewGetter(ModeGetRequest).Get(chunk.Address())
|
got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(got.Address(), chunk.Address()) {
|
if !bytes.Equal(got.Address(), ch.Address()) {
|
||||||
t.Errorf("got address %x, want %x", got.Address(), chunk.Address())
|
t.Errorf("got address %x, want %x", got.Address(), ch.Address())
|
||||||
}
|
}
|
||||||
if !bytes.Equal(got.Data(), chunk.Data()) {
|
if !bytes.Equal(got.Data(), ch.Data()) {
|
||||||
t.Errorf("got data %x, want %x", got.Data(), chunk.Data())
|
t.Errorf("got data %x, want %x", got.Data(), ch.Data())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,19 +114,17 @@ func TestDB_updateGCSem(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := db.NewPutter(ModePutUpload).Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
getter := db.NewGetter(ModeGetRequest)
|
|
||||||
|
|
||||||
// get more chunks then maxParallelUpdateGC
|
// get more chunks then maxParallelUpdateGC
|
||||||
// in time shorter then updateGCSleep
|
// in time shorter then updateGCSleep
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
_, err = getter.Get(chunk.Address())
|
_, err = db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -237,71 +236,71 @@ func newRetrieveIndexesTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTim
|
|||||||
|
|
||||||
// newRetrieveIndexesTestWithAccess returns a test function that validates if the right
|
// newRetrieveIndexesTestWithAccess returns a test function that validates if the right
|
||||||
// chunk values are in the retrieval indexes when access time must be stored.
|
// chunk values are in the retrieval indexes when access time must be stored.
|
||||||
func newRetrieveIndexesTestWithAccess(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
|
func newRetrieveIndexesTestWithAccess(db *DB, ch chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
item, err := db.retrievalDataIndex.Get(addressToItem(chunk.Address()))
|
item, err := db.retrievalDataIndex.Get(addressToItem(ch.Address()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
validateItem(t, item, chunk.Address(), chunk.Data(), storeTimestamp, 0)
|
validateItem(t, item, ch.Address(), ch.Data(), storeTimestamp, 0)
|
||||||
|
|
||||||
if accessTimestamp > 0 {
|
if accessTimestamp > 0 {
|
||||||
item, err = db.retrievalAccessIndex.Get(addressToItem(chunk.Address()))
|
item, err = db.retrievalAccessIndex.Get(addressToItem(ch.Address()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
validateItem(t, item, chunk.Address(), nil, 0, accessTimestamp)
|
validateItem(t, item, ch.Address(), nil, 0, accessTimestamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPullIndexTest returns a test function that validates if the right
|
// newPullIndexTest returns a test function that validates if the right
|
||||||
// chunk values are in the pull index.
|
// chunk values are in the pull index.
|
||||||
func newPullIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
|
func newPullIndexTest(db *DB, ch chunk.Chunk, binID uint64, wantError error) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
item, err := db.pullIndex.Get(shed.Item{
|
item, err := db.pullIndex.Get(shed.Item{
|
||||||
Address: chunk.Address(),
|
Address: ch.Address(),
|
||||||
StoreTimestamp: storeTimestamp,
|
BinID: binID,
|
||||||
})
|
})
|
||||||
if err != wantError {
|
if err != wantError {
|
||||||
t.Errorf("got error %v, want %v", err, wantError)
|
t.Errorf("got error %v, want %v", err, wantError)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
validateItem(t, item, chunk.Address(), nil, storeTimestamp, 0)
|
validateItem(t, item, ch.Address(), nil, 0, 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPushIndexTest returns a test function that validates if the right
|
// newPushIndexTest returns a test function that validates if the right
|
||||||
// chunk values are in the push index.
|
// chunk values are in the push index.
|
||||||
func newPushIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
|
func newPushIndexTest(db *DB, ch chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
item, err := db.pushIndex.Get(shed.Item{
|
item, err := db.pushIndex.Get(shed.Item{
|
||||||
Address: chunk.Address(),
|
Address: ch.Address(),
|
||||||
StoreTimestamp: storeTimestamp,
|
StoreTimestamp: storeTimestamp,
|
||||||
})
|
})
|
||||||
if err != wantError {
|
if err != wantError {
|
||||||
t.Errorf("got error %v, want %v", err, wantError)
|
t.Errorf("got error %v, want %v", err, wantError)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
validateItem(t, item, chunk.Address(), nil, storeTimestamp, 0)
|
validateItem(t, item, ch.Address(), nil, storeTimestamp, 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newGCIndexTest returns a test function that validates if the right
|
// newGCIndexTest returns a test function that validates if the right
|
||||||
// chunk values are in the push index.
|
// chunk values are in the push index.
|
||||||
func newGCIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
|
func newGCIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64, binID uint64) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
item, err := db.gcIndex.Get(shed.Item{
|
item, err := db.gcIndex.Get(shed.Item{
|
||||||
Address: chunk.Address(),
|
Address: chunk.Address(),
|
||||||
StoreTimestamp: storeTimestamp,
|
BinID: binID,
|
||||||
AccessTimestamp: accessTimestamp,
|
AccessTimestamp: accessTimestamp,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
validateItem(t, item, chunk.Address(), nil, storeTimestamp, accessTimestamp)
|
validateItem(t, item, chunk.Address(), nil, 0, accessTimestamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,7 +348,7 @@ func newIndexGCSizeTest(db *DB) func(t *testing.T) {
|
|||||||
// in database. It is used for index values validations.
|
// in database. It is used for index values validations.
|
||||||
type testIndexChunk struct {
|
type testIndexChunk struct {
|
||||||
chunk.Chunk
|
chunk.Chunk
|
||||||
storeTimestamp int64
|
binID uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// testItemsOrder tests the order of chunks in the index. If sortFunc is not nil,
|
// testItemsOrder tests the order of chunks in the index. If sortFunc is not nil,
|
||||||
|
@ -17,45 +17,35 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/shed"
|
"github.com/ethereum/go-ethereum/swarm/shed"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ModeGet enumerates different Getter modes.
|
|
||||||
type ModeGet int
|
|
||||||
|
|
||||||
// Getter modes.
|
|
||||||
const (
|
|
||||||
// ModeGetRequest: when accessed for retrieval
|
|
||||||
ModeGetRequest ModeGet = iota
|
|
||||||
// ModeGetSync: when accessed for syncing or proof of custody request
|
|
||||||
ModeGetSync
|
|
||||||
)
|
|
||||||
|
|
||||||
// Getter provides Get method to retrieve Chunks
|
|
||||||
// from database.
|
|
||||||
type Getter struct {
|
|
||||||
db *DB
|
|
||||||
mode ModeGet
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGetter returns a new Getter on database
|
|
||||||
// with a specific Mode.
|
|
||||||
func (db *DB) NewGetter(mode ModeGet) *Getter {
|
|
||||||
return &Getter{
|
|
||||||
mode: mode,
|
|
||||||
db: db,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a chunk from the database. If the chunk is
|
// Get returns a chunk from the database. If the chunk is
|
||||||
// not found chunk.ErrChunkNotFound will be returned.
|
// not found chunk.ErrChunkNotFound will be returned.
|
||||||
// All required indexes will be updated required by the
|
// All required indexes will be updated required by the
|
||||||
// Getter Mode.
|
// Getter Mode. Get is required to implement chunk.Store
|
||||||
func (g *Getter) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
|
// interface.
|
||||||
out, err := g.db.get(g.mode, addr)
|
func (db *DB) Get(ctx context.Context, mode chunk.ModeGet, addr chunk.Address) (ch chunk.Chunk, err error) {
|
||||||
|
metricName := fmt.Sprintf("localstore.Get.%s", mode)
|
||||||
|
|
||||||
|
metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
|
||||||
|
defer totalTimeMetric(metricName, time.Now())
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
out, err := db.get(mode, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == leveldb.ErrNotFound {
|
if err == leveldb.ErrNotFound {
|
||||||
return nil, chunk.ErrChunkNotFound
|
return nil, chunk.ErrChunkNotFound
|
||||||
@ -67,7 +57,7 @@ func (g *Getter) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
|
|||||||
|
|
||||||
// get returns Item from the retrieval index
|
// get returns Item from the retrieval index
|
||||||
// and updates other indexes.
|
// and updates other indexes.
|
||||||
func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
|
func (db *DB) get(mode chunk.ModeGet, addr chunk.Address) (out shed.Item, err error) {
|
||||||
item := addressToItem(addr)
|
item := addressToItem(addr)
|
||||||
|
|
||||||
out, err = db.retrievalDataIndex.Get(item)
|
out, err = db.retrievalDataIndex.Get(item)
|
||||||
@ -76,7 +66,7 @@ func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
|
|||||||
}
|
}
|
||||||
switch mode {
|
switch mode {
|
||||||
// update the access timestamp and gc index
|
// update the access timestamp and gc index
|
||||||
case ModeGetRequest:
|
case chunk.ModeGetRequest:
|
||||||
if db.updateGCSem != nil {
|
if db.updateGCSem != nil {
|
||||||
// wait before creating new goroutines
|
// wait before creating new goroutines
|
||||||
// if updateGCSem buffer id full
|
// if updateGCSem buffer id full
|
||||||
@ -90,8 +80,14 @@ func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
|
|||||||
// for a new goroutine
|
// for a new goroutine
|
||||||
defer func() { <-db.updateGCSem }()
|
defer func() { <-db.updateGCSem }()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metricName := "localstore.updateGC"
|
||||||
|
metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
|
||||||
|
defer totalTimeMetric(metricName, time.Now())
|
||||||
|
|
||||||
err := db.updateGC(out)
|
err := db.updateGC(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
|
||||||
log.Error("localstore update gc", "err", err)
|
log.Error("localstore update gc", "err", err)
|
||||||
}
|
}
|
||||||
// if gc update hook is defined, call it
|
// if gc update hook is defined, call it
|
||||||
@ -101,7 +97,8 @@ func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// no updates to indexes
|
// no updates to indexes
|
||||||
case ModeGetSync:
|
case chunk.ModeGetSync:
|
||||||
|
case chunk.ModeGetLookup:
|
||||||
default:
|
default:
|
||||||
return out, ErrInvalidMode
|
return out, ErrInvalidMode
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,11 @@ package localstore
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestModeGetRequest validates ModeGetRequest index values on the provided DB.
|
// TestModeGetRequest validates ModeGetRequest index values on the provided DB.
|
||||||
@ -32,15 +35,13 @@ func TestModeGetRequest(t *testing.T) {
|
|||||||
return uploadTimestamp
|
return uploadTimestamp
|
||||||
})()
|
})()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := db.NewPutter(ModePutUpload).Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
requester := db.NewGetter(ModeGetRequest)
|
|
||||||
|
|
||||||
// set update gc test hook to signal when
|
// set update gc test hook to signal when
|
||||||
// update gc goroutine is done by sending to
|
// update gc goroutine is done by sending to
|
||||||
// testHookUpdateGCChan channel, which is
|
// testHookUpdateGCChan channel, which is
|
||||||
@ -52,22 +53,22 @@ func TestModeGetRequest(t *testing.T) {
|
|||||||
})()
|
})()
|
||||||
|
|
||||||
t.Run("get unsynced", func(t *testing.T) {
|
t.Run("get unsynced", func(t *testing.T) {
|
||||||
got, err := requester.Get(chunk.Address())
|
got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// wait for update gc goroutine to be done
|
// wait for update gc goroutine to be done
|
||||||
<-testHookUpdateGCChan
|
<-testHookUpdateGCChan
|
||||||
|
|
||||||
if !bytes.Equal(got.Address(), chunk.Address()) {
|
if !bytes.Equal(got.Address(), ch.Address()) {
|
||||||
t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
|
t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(got.Data(), chunk.Data()) {
|
if !bytes.Equal(got.Data(), ch.Data()) {
|
||||||
t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
|
t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, 0))
|
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, 0))
|
||||||
|
|
||||||
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
|
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
|
||||||
|
|
||||||
@ -75,30 +76,30 @@ func TestModeGetRequest(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// set chunk to synced state
|
// set chunk to synced state
|
||||||
err = db.NewSetter(ModeSetSync).Set(chunk.Address())
|
err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("first get", func(t *testing.T) {
|
t.Run("first get", func(t *testing.T) {
|
||||||
got, err := requester.Get(chunk.Address())
|
got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// wait for update gc goroutine to be done
|
// wait for update gc goroutine to be done
|
||||||
<-testHookUpdateGCChan
|
<-testHookUpdateGCChan
|
||||||
|
|
||||||
if !bytes.Equal(got.Address(), chunk.Address()) {
|
if !bytes.Equal(got.Address(), ch.Address()) {
|
||||||
t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
|
t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(got.Data(), chunk.Data()) {
|
if !bytes.Equal(got.Data(), ch.Data()) {
|
||||||
t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
|
t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, uploadTimestamp))
|
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, uploadTimestamp))
|
||||||
|
|
||||||
t.Run("gc index", newGCIndexTest(db, chunk, uploadTimestamp, uploadTimestamp))
|
t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, uploadTimestamp, 1))
|
||||||
|
|
||||||
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
||||||
|
|
||||||
@ -111,24 +112,24 @@ func TestModeGetRequest(t *testing.T) {
|
|||||||
return accessTimestamp
|
return accessTimestamp
|
||||||
})()
|
})()
|
||||||
|
|
||||||
got, err := requester.Get(chunk.Address())
|
got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// wait for update gc goroutine to be done
|
// wait for update gc goroutine to be done
|
||||||
<-testHookUpdateGCChan
|
<-testHookUpdateGCChan
|
||||||
|
|
||||||
if !bytes.Equal(got.Address(), chunk.Address()) {
|
if !bytes.Equal(got.Address(), ch.Address()) {
|
||||||
t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
|
t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(got.Data(), chunk.Data()) {
|
if !bytes.Equal(got.Data(), ch.Data()) {
|
||||||
t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
|
t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, accessTimestamp))
|
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, accessTimestamp))
|
||||||
|
|
||||||
t.Run("gc index", newGCIndexTest(db, chunk, uploadTimestamp, accessTimestamp))
|
t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, accessTimestamp, 1))
|
||||||
|
|
||||||
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
||||||
|
|
||||||
@ -146,27 +147,27 @@ func TestModeGetSync(t *testing.T) {
|
|||||||
return uploadTimestamp
|
return uploadTimestamp
|
||||||
})()
|
})()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := db.NewPutter(ModePutUpload).Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
got, err := db.NewGetter(ModeGetSync).Get(chunk.Address())
|
got, err := db.Get(context.Background(), chunk.ModeGetSync, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(got.Address(), chunk.Address()) {
|
if !bytes.Equal(got.Address(), ch.Address()) {
|
||||||
t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
|
t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(got.Data(), chunk.Data()) {
|
if !bytes.Equal(got.Data(), ch.Data()) {
|
||||||
t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
|
t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, 0))
|
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, 0))
|
||||||
|
|
||||||
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
|
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
|
||||||
|
|
||||||
|
@ -17,23 +17,23 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Hasser provides Has method to retrieve Chunks
|
|
||||||
// from database.
|
|
||||||
type Hasser struct {
|
|
||||||
db *DB
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHasser returns a new Hasser on database.
|
|
||||||
func (db *DB) NewHasser() *Hasser {
|
|
||||||
return &Hasser{
|
|
||||||
db: db,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has returns true if the chunk is stored in database.
|
// Has returns true if the chunk is stored in database.
|
||||||
func (h *Hasser) Has(addr chunk.Address) (bool, error) {
|
func (db *DB) Has(ctx context.Context, addr chunk.Address) (bool, error) {
|
||||||
return h.db.retrievalDataIndex.Has(addressToItem(addr))
|
metricName := "localstore.Has"
|
||||||
|
|
||||||
|
metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
|
||||||
|
defer totalTimeMetric(metricName, time.Now())
|
||||||
|
|
||||||
|
has, err := db.retrievalDataIndex.Has(addressToItem(addr))
|
||||||
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
|
||||||
|
}
|
||||||
|
return has, err
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,10 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestHas validates that Hasser is returning true for
|
// TestHas validates that Hasser is returning true for
|
||||||
@ -26,16 +29,14 @@ func TestHas(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := db.NewPutter(ModePutUpload).Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
hasser := db.NewHasser()
|
has, err := db.Has(context.Background(), ch.Address())
|
||||||
|
|
||||||
has, err := hasser.Has(chunk.Address())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -45,7 +46,7 @@ func TestHas(t *testing.T) {
|
|||||||
|
|
||||||
missingChunk := generateTestRandomChunk()
|
missingChunk := generateTestRandomChunk()
|
||||||
|
|
||||||
has, err = hasser.Has(missingChunk.Address())
|
has, err = db.Has(context.Background(), missingChunk.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -17,44 +17,31 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/shed"
|
"github.com/ethereum/go-ethereum/swarm/shed"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ModePut enumerates different Putter modes.
|
|
||||||
type ModePut int
|
|
||||||
|
|
||||||
// Putter modes.
|
|
||||||
const (
|
|
||||||
// ModePutRequest: when a chunk is received as a result of retrieve request and delivery
|
|
||||||
ModePutRequest ModePut = iota
|
|
||||||
// ModePutSync: when a chunk is received via syncing
|
|
||||||
ModePutSync
|
|
||||||
// ModePutUpload: when a chunk is created by local upload
|
|
||||||
ModePutUpload
|
|
||||||
)
|
|
||||||
|
|
||||||
// Putter provides Put method to store Chunks
|
|
||||||
// to database.
|
|
||||||
type Putter struct {
|
|
||||||
db *DB
|
|
||||||
mode ModePut
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPutter returns a new Putter on database
|
|
||||||
// with a specific Mode.
|
|
||||||
func (db *DB) NewPutter(mode ModePut) *Putter {
|
|
||||||
return &Putter{
|
|
||||||
mode: mode,
|
|
||||||
db: db,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put stores the Chunk to database and depending
|
// Put stores the Chunk to database and depending
|
||||||
// on the Putter mode, it updates required indexes.
|
// on the Putter mode, it updates required indexes.
|
||||||
func (p *Putter) Put(ch chunk.Chunk) (err error) {
|
// Put is required to implement chunk.Store
|
||||||
return p.db.put(p.mode, chunkToItem(ch))
|
// interface.
|
||||||
|
func (db *DB) Put(ctx context.Context, mode chunk.ModePut, ch chunk.Chunk) (exists bool, err error) {
|
||||||
|
metricName := fmt.Sprintf("localstore.Put.%s", mode)
|
||||||
|
|
||||||
|
metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
|
||||||
|
defer totalTimeMetric(metricName, time.Now())
|
||||||
|
|
||||||
|
exists, err = db.put(mode, chunkToItem(ch))
|
||||||
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
|
||||||
|
}
|
||||||
|
return exists, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// put stores Item to database and updates other
|
// put stores Item to database and updates other
|
||||||
@ -62,7 +49,7 @@ func (p *Putter) Put(ch chunk.Chunk) (err error) {
|
|||||||
// of this function for the same address in parallel.
|
// of this function for the same address in parallel.
|
||||||
// Item fields Address and Data must not be
|
// Item fields Address and Data must not be
|
||||||
// with their nil values.
|
// with their nil values.
|
||||||
func (db *DB) put(mode ModePut, item shed.Item) (err error) {
|
func (db *DB) put(mode chunk.ModePut, item shed.Item) (exists bool, err error) {
|
||||||
// protect parallel updates
|
// protect parallel updates
|
||||||
db.batchMu.Lock()
|
db.batchMu.Lock()
|
||||||
defer db.batchMu.Unlock()
|
defer db.batchMu.Unlock()
|
||||||
@ -76,7 +63,7 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
|
|||||||
var triggerPushFeed bool // signal push feed subscriptions to iterate
|
var triggerPushFeed bool // signal push feed subscriptions to iterate
|
||||||
|
|
||||||
switch mode {
|
switch mode {
|
||||||
case ModePutRequest:
|
case chunk.ModePutRequest:
|
||||||
// put to indexes: retrieve, gc; it does not enter the syncpool
|
// put to indexes: retrieve, gc; it does not enter the syncpool
|
||||||
|
|
||||||
// check if the chunk already is in the database
|
// check if the chunk already is in the database
|
||||||
@ -84,20 +71,25 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
|
|||||||
i, err := db.retrievalAccessIndex.Get(item)
|
i, err := db.retrievalAccessIndex.Get(item)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
|
exists = true
|
||||||
item.AccessTimestamp = i.AccessTimestamp
|
item.AccessTimestamp = i.AccessTimestamp
|
||||||
case leveldb.ErrNotFound:
|
case leveldb.ErrNotFound:
|
||||||
|
exists = false
|
||||||
// no chunk accesses
|
// no chunk accesses
|
||||||
default:
|
default:
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
i, err = db.retrievalDataIndex.Get(item)
|
i, err = db.retrievalDataIndex.Get(item)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
|
exists = true
|
||||||
item.StoreTimestamp = i.StoreTimestamp
|
item.StoreTimestamp = i.StoreTimestamp
|
||||||
|
item.BinID = i.BinID
|
||||||
case leveldb.ErrNotFound:
|
case leveldb.ErrNotFound:
|
||||||
// no chunk accesses
|
// no chunk accesses
|
||||||
|
exists = false
|
||||||
default:
|
default:
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
if item.AccessTimestamp != 0 {
|
if item.AccessTimestamp != 0 {
|
||||||
// delete current entry from the gc index
|
// delete current entry from the gc index
|
||||||
@ -107,6 +99,12 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
|
|||||||
if item.StoreTimestamp == 0 {
|
if item.StoreTimestamp == 0 {
|
||||||
item.StoreTimestamp = now()
|
item.StoreTimestamp = now()
|
||||||
}
|
}
|
||||||
|
if item.BinID == 0 {
|
||||||
|
item.BinID, err = db.binIDs.IncInBatch(batch, uint64(db.po(item.Address)))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
// update access timestamp
|
// update access timestamp
|
||||||
item.AccessTimestamp = now()
|
item.AccessTimestamp = now()
|
||||||
// update retrieve access index
|
// update retrieve access index
|
||||||
@ -117,36 +115,56 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
|
|||||||
|
|
||||||
db.retrievalDataIndex.PutInBatch(batch, item)
|
db.retrievalDataIndex.PutInBatch(batch, item)
|
||||||
|
|
||||||
case ModePutUpload:
|
case chunk.ModePutUpload:
|
||||||
// put to indexes: retrieve, push, pull
|
// put to indexes: retrieve, push, pull
|
||||||
|
|
||||||
item.StoreTimestamp = now()
|
exists, err = db.retrievalDataIndex.Has(item)
|
||||||
db.retrievalDataIndex.PutInBatch(batch, item)
|
if err != nil {
|
||||||
db.pullIndex.PutInBatch(batch, item)
|
return false, err
|
||||||
triggerPullFeed = true
|
}
|
||||||
db.pushIndex.PutInBatch(batch, item)
|
if !exists {
|
||||||
triggerPushFeed = true
|
item.StoreTimestamp = now()
|
||||||
|
item.BinID, err = db.binIDs.IncInBatch(batch, uint64(db.po(item.Address)))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
db.retrievalDataIndex.PutInBatch(batch, item)
|
||||||
|
db.pullIndex.PutInBatch(batch, item)
|
||||||
|
triggerPullFeed = true
|
||||||
|
db.pushIndex.PutInBatch(batch, item)
|
||||||
|
triggerPushFeed = true
|
||||||
|
}
|
||||||
|
|
||||||
case ModePutSync:
|
case chunk.ModePutSync:
|
||||||
// put to indexes: retrieve, pull
|
// put to indexes: retrieve, pull
|
||||||
|
|
||||||
item.StoreTimestamp = now()
|
exists, err = db.retrievalDataIndex.Has(item)
|
||||||
db.retrievalDataIndex.PutInBatch(batch, item)
|
if err != nil {
|
||||||
db.pullIndex.PutInBatch(batch, item)
|
return exists, err
|
||||||
triggerPullFeed = true
|
}
|
||||||
|
if !exists {
|
||||||
|
item.StoreTimestamp = now()
|
||||||
|
item.BinID, err = db.binIDs.IncInBatch(batch, uint64(db.po(item.Address)))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
db.retrievalDataIndex.PutInBatch(batch, item)
|
||||||
|
db.pullIndex.PutInBatch(batch, item)
|
||||||
|
triggerPullFeed = true
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return ErrInvalidMode
|
return false, ErrInvalidMode
|
||||||
}
|
}
|
||||||
|
|
||||||
err = db.incGCSizeInBatch(batch, gcSizeChange)
|
err = db.incGCSizeInBatch(batch, gcSizeChange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = db.shed.WriteBatch(batch)
|
err = db.shed.WriteBatch(batch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
if triggerPullFeed {
|
if triggerPullFeed {
|
||||||
db.triggerPullSubscriptions(db.po(item.Address))
|
db.triggerPullSubscriptions(db.po(item.Address))
|
||||||
@ -154,5 +172,5 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
|
|||||||
if triggerPushFeed {
|
if triggerPushFeed {
|
||||||
db.triggerPushSubscriptions()
|
db.triggerPushSubscriptions()
|
||||||
}
|
}
|
||||||
return nil
|
return exists, nil
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ package localstore
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -31,9 +32,7 @@ func TestModePutRequest(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
putter := db.NewPutter(ModePutRequest)
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
|
||||||
|
|
||||||
// keep the record when the chunk is stored
|
// keep the record when the chunk is stored
|
||||||
var storeTimestamp int64
|
var storeTimestamp int64
|
||||||
@ -46,12 +45,12 @@ func TestModePutRequest(t *testing.T) {
|
|||||||
|
|
||||||
storeTimestamp = wantTimestamp
|
storeTimestamp = wantTimestamp
|
||||||
|
|
||||||
err := putter.Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutRequest, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, wantTimestamp, wantTimestamp))
|
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, wantTimestamp, wantTimestamp))
|
||||||
|
|
||||||
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
||||||
|
|
||||||
@ -64,12 +63,12 @@ func TestModePutRequest(t *testing.T) {
|
|||||||
return wantTimestamp
|
return wantTimestamp
|
||||||
})()
|
})()
|
||||||
|
|
||||||
err := putter.Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutRequest, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, storeTimestamp, wantTimestamp))
|
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, storeTimestamp, wantTimestamp))
|
||||||
|
|
||||||
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
||||||
|
|
||||||
@ -87,16 +86,16 @@ func TestModePutSync(t *testing.T) {
|
|||||||
return wantTimestamp
|
return wantTimestamp
|
||||||
})()
|
})()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := db.NewPutter(ModePutSync).Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutSync, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", newRetrieveIndexesTest(db, chunk, wantTimestamp, 0))
|
t.Run("retrieve indexes", newRetrieveIndexesTest(db, ch, wantTimestamp, 0))
|
||||||
|
|
||||||
t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
|
t.Run("pull index", newPullIndexTest(db, ch, 1, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestModePutUpload validates ModePutUpload index values on the provided DB.
|
// TestModePutUpload validates ModePutUpload index values on the provided DB.
|
||||||
@ -109,18 +108,18 @@ func TestModePutUpload(t *testing.T) {
|
|||||||
return wantTimestamp
|
return wantTimestamp
|
||||||
})()
|
})()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := db.NewPutter(ModePutUpload).Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", newRetrieveIndexesTest(db, chunk, wantTimestamp, 0))
|
t.Run("retrieve indexes", newRetrieveIndexesTest(db, ch, wantTimestamp, 0))
|
||||||
|
|
||||||
t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
|
t.Run("pull index", newPullIndexTest(db, ch, 1, nil))
|
||||||
|
|
||||||
t.Run("push index", newPushIndexTest(db, chunk, wantTimestamp, nil))
|
t.Run("push index", newPushIndexTest(db, ch, wantTimestamp, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestModePutUpload_parallel uploads chunks in parallel
|
// TestModePutUpload_parallel uploads chunks in parallel
|
||||||
@ -140,14 +139,13 @@ func TestModePutUpload_parallel(t *testing.T) {
|
|||||||
// start uploader workers
|
// start uploader workers
|
||||||
for i := 0; i < workerCount; i++ {
|
for i := 0; i < workerCount; i++ {
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case chunk, ok := <-chunkChan:
|
case ch, ok := <-chunkChan:
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err := uploader.Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
select {
|
select {
|
||||||
case errChan <- err:
|
case errChan <- err:
|
||||||
case <-doneChan:
|
case <-doneChan:
|
||||||
@ -188,21 +186,85 @@ func TestModePutUpload_parallel(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get every chunk and validate its data
|
// get every chunk and validate its data
|
||||||
getter := db.NewGetter(ModeGetRequest)
|
|
||||||
|
|
||||||
chunksMu.Lock()
|
chunksMu.Lock()
|
||||||
defer chunksMu.Unlock()
|
defer chunksMu.Unlock()
|
||||||
for _, chunk := range chunks {
|
for _, ch := range chunks {
|
||||||
got, err := getter.Get(chunk.Address())
|
got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(got.Data(), chunk.Data()) {
|
if !bytes.Equal(got.Data(), ch.Data()) {
|
||||||
t.Fatalf("got chunk %s data %x, want %x", chunk.Address().Hex(), got.Data(), chunk.Data())
|
t.Fatalf("got chunk %s data %x, want %x", ch.Address().Hex(), got.Data(), ch.Data())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestModePut_sameChunk puts the same chunk multiple times
|
||||||
|
// and validates that all relevant indexes have only one item
|
||||||
|
// in them.
|
||||||
|
func TestModePut_sameChunk(t *testing.T) {
|
||||||
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
mode chunk.ModePut
|
||||||
|
pullIndex bool
|
||||||
|
pushIndex bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "ModePutRequest",
|
||||||
|
mode: chunk.ModePutRequest,
|
||||||
|
pullIndex: false,
|
||||||
|
pushIndex: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ModePutUpload",
|
||||||
|
mode: chunk.ModePutUpload,
|
||||||
|
pullIndex: true,
|
||||||
|
pushIndex: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ModePutSync",
|
||||||
|
mode: chunk.ModePutSync,
|
||||||
|
pullIndex: true,
|
||||||
|
pushIndex: false,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
|
defer cleanupFunc()
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
exists, err := db.Put(context.Background(), tc.mode, ch)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
switch exists {
|
||||||
|
case false:
|
||||||
|
if i != 0 {
|
||||||
|
t.Fatal("should not exist only on first Put")
|
||||||
|
}
|
||||||
|
case true:
|
||||||
|
if i == 0 {
|
||||||
|
t.Fatal("should exist on all cases other than the first one")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
count := func(b bool) (c int) {
|
||||||
|
if b {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
newItemsCountTest(db.retrievalDataIndex, 1)(t)
|
||||||
|
newItemsCountTest(db.pullIndex, count(tc.pullIndex))(t)
|
||||||
|
newItemsCountTest(db.pushIndex, count(tc.pushIndex))(t)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// BenchmarkPutUpload runs a series of benchmarks that upload
|
// BenchmarkPutUpload runs a series of benchmarks that upload
|
||||||
// a specific number of chunks in parallel.
|
// a specific number of chunks in parallel.
|
||||||
//
|
//
|
||||||
@ -270,7 +332,6 @@ func benchmarkPutUpload(b *testing.B, o *Options, count, maxParallelUploads int)
|
|||||||
db, cleanupFunc := newTestDB(b, o)
|
db, cleanupFunc := newTestDB(b, o)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
chunks := make([]chunk.Chunk, count)
|
chunks := make([]chunk.Chunk, count)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
chunks[i] = generateTestRandomChunk()
|
chunks[i] = generateTestRandomChunk()
|
||||||
@ -286,7 +347,8 @@ func benchmarkPutUpload(b *testing.B, o *Options, count, maxParallelUploads int)
|
|||||||
go func(i int) {
|
go func(i int) {
|
||||||
defer func() { <-sem }()
|
defer func() { <-sem }()
|
||||||
|
|
||||||
errs <- uploader.Put(chunks[i])
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, chunks[i])
|
||||||
|
errs <- err
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -17,51 +17,37 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ModeSet enumerates different Setter modes.
|
|
||||||
type ModeSet int
|
|
||||||
|
|
||||||
// Setter modes.
|
|
||||||
const (
|
|
||||||
// ModeSetAccess: when an update request is received for a chunk or chunk is retrieved for delivery
|
|
||||||
ModeSetAccess ModeSet = iota
|
|
||||||
// ModeSetSync: when push sync receipt is received
|
|
||||||
ModeSetSync
|
|
||||||
// modeSetRemove: when GC-d
|
|
||||||
// unexported as no external packages should remove chunks from database
|
|
||||||
modeSetRemove
|
|
||||||
)
|
|
||||||
|
|
||||||
// Setter sets the state of a particular
|
|
||||||
// Chunk in database by changing indexes.
|
|
||||||
type Setter struct {
|
|
||||||
db *DB
|
|
||||||
mode ModeSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSetter returns a new Setter on database
|
|
||||||
// with a specific Mode.
|
|
||||||
func (db *DB) NewSetter(mode ModeSet) *Setter {
|
|
||||||
return &Setter{
|
|
||||||
mode: mode,
|
|
||||||
db: db,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set updates database indexes for a specific
|
// Set updates database indexes for a specific
|
||||||
// chunk represented by the address.
|
// chunk represented by the address.
|
||||||
func (s *Setter) Set(addr chunk.Address) (err error) {
|
// Set is required to implement chunk.Store
|
||||||
return s.db.set(s.mode, addr)
|
// interface.
|
||||||
|
func (db *DB) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
|
||||||
|
metricName := fmt.Sprintf("localstore.Set.%s", mode)
|
||||||
|
|
||||||
|
metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
|
||||||
|
defer totalTimeMetric(metricName, time.Now())
|
||||||
|
|
||||||
|
err = db.set(mode, addr)
|
||||||
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// set updates database indexes for a specific
|
// set updates database indexes for a specific
|
||||||
// chunk represented by the address.
|
// chunk represented by the address.
|
||||||
// It acquires lockAddr to protect two calls
|
// It acquires lockAddr to protect two calls
|
||||||
// of this function for the same address in parallel.
|
// of this function for the same address in parallel.
|
||||||
func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
|
func (db *DB) set(mode chunk.ModeSet, addr chunk.Address) (err error) {
|
||||||
// protect parallel updates
|
// protect parallel updates
|
||||||
db.batchMu.Lock()
|
db.batchMu.Lock()
|
||||||
defer db.batchMu.Unlock()
|
defer db.batchMu.Unlock()
|
||||||
@ -76,7 +62,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
|
|||||||
item := addressToItem(addr)
|
item := addressToItem(addr)
|
||||||
|
|
||||||
switch mode {
|
switch mode {
|
||||||
case ModeSetAccess:
|
case chunk.ModeSetAccess:
|
||||||
// add to pull, insert to gc
|
// add to pull, insert to gc
|
||||||
|
|
||||||
// need to get access timestamp here as it is not
|
// need to get access timestamp here as it is not
|
||||||
@ -87,9 +73,14 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
|
|||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
item.StoreTimestamp = i.StoreTimestamp
|
item.StoreTimestamp = i.StoreTimestamp
|
||||||
|
item.BinID = i.BinID
|
||||||
case leveldb.ErrNotFound:
|
case leveldb.ErrNotFound:
|
||||||
db.pushIndex.DeleteInBatch(batch, item)
|
db.pushIndex.DeleteInBatch(batch, item)
|
||||||
item.StoreTimestamp = now()
|
item.StoreTimestamp = now()
|
||||||
|
item.BinID, err = db.binIDs.Inc(uint64(db.po(item.Address)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -112,7 +103,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
|
|||||||
db.gcIndex.PutInBatch(batch, item)
|
db.gcIndex.PutInBatch(batch, item)
|
||||||
gcSizeChange++
|
gcSizeChange++
|
||||||
|
|
||||||
case ModeSetSync:
|
case chunk.ModeSetSync:
|
||||||
// delete from push, insert to gc
|
// delete from push, insert to gc
|
||||||
|
|
||||||
// need to get access timestamp here as it is not
|
// need to get access timestamp here as it is not
|
||||||
@ -131,6 +122,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
item.StoreTimestamp = i.StoreTimestamp
|
item.StoreTimestamp = i.StoreTimestamp
|
||||||
|
item.BinID = i.BinID
|
||||||
|
|
||||||
i, err = db.retrievalAccessIndex.Get(item)
|
i, err = db.retrievalAccessIndex.Get(item)
|
||||||
switch err {
|
switch err {
|
||||||
@ -149,7 +141,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
|
|||||||
db.gcIndex.PutInBatch(batch, item)
|
db.gcIndex.PutInBatch(batch, item)
|
||||||
gcSizeChange++
|
gcSizeChange++
|
||||||
|
|
||||||
case modeSetRemove:
|
case chunk.ModeSetRemove:
|
||||||
// delete from retrieve, pull, gc
|
// delete from retrieve, pull, gc
|
||||||
|
|
||||||
// need to get access timestamp here as it is not
|
// need to get access timestamp here as it is not
|
||||||
@ -169,6 +161,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
item.StoreTimestamp = i.StoreTimestamp
|
item.StoreTimestamp = i.StoreTimestamp
|
||||||
|
item.BinID = i.BinID
|
||||||
|
|
||||||
db.retrievalDataIndex.DeleteInBatch(batch, item)
|
db.retrievalDataIndex.DeleteInBatch(batch, item)
|
||||||
db.retrievalAccessIndex.DeleteInBatch(batch, item)
|
db.retrievalAccessIndex.DeleteInBatch(batch, item)
|
||||||
|
@ -17,9 +17,11 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,23 +30,23 @@ func TestModeSetAccess(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
wantTimestamp := time.Now().UTC().UnixNano()
|
wantTimestamp := time.Now().UTC().UnixNano()
|
||||||
defer setNow(func() (t int64) {
|
defer setNow(func() (t int64) {
|
||||||
return wantTimestamp
|
return wantTimestamp
|
||||||
})()
|
})()
|
||||||
|
|
||||||
err := db.NewSetter(ModeSetAccess).Set(chunk.Address())
|
err := db.Set(context.Background(), chunk.ModeSetAccess, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
|
t.Run("pull index", newPullIndexTest(db, ch, 1, nil))
|
||||||
|
|
||||||
t.Run("pull index count", newItemsCountTest(db.pullIndex, 1))
|
t.Run("pull index count", newItemsCountTest(db.pullIndex, 1))
|
||||||
|
|
||||||
t.Run("gc index", newGCIndexTest(db, chunk, wantTimestamp, wantTimestamp))
|
t.Run("gc index", newGCIndexTest(db, ch, wantTimestamp, wantTimestamp, 1))
|
||||||
|
|
||||||
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
||||||
|
|
||||||
@ -56,28 +58,28 @@ func TestModeSetSync(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
wantTimestamp := time.Now().UTC().UnixNano()
|
wantTimestamp := time.Now().UTC().UnixNano()
|
||||||
defer setNow(func() (t int64) {
|
defer setNow(func() (t int64) {
|
||||||
return wantTimestamp
|
return wantTimestamp
|
||||||
})()
|
})()
|
||||||
|
|
||||||
err := db.NewPutter(ModePutUpload).Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = db.NewSetter(ModeSetSync).Set(chunk.Address())
|
err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, wantTimestamp, wantTimestamp))
|
t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, wantTimestamp, wantTimestamp))
|
||||||
|
|
||||||
t.Run("push index", newPushIndexTest(db, chunk, wantTimestamp, leveldb.ErrNotFound))
|
t.Run("push index", newPushIndexTest(db, ch, wantTimestamp, leveldb.ErrNotFound))
|
||||||
|
|
||||||
t.Run("gc index", newGCIndexTest(db, chunk, wantTimestamp, wantTimestamp))
|
t.Run("gc index", newGCIndexTest(db, ch, wantTimestamp, wantTimestamp, 1))
|
||||||
|
|
||||||
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
|
||||||
|
|
||||||
@ -89,40 +91,39 @@ func TestModeSetRemove(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := db.NewPutter(ModePutUpload).Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = db.NewSetter(modeSetRemove).Set(chunk.Address())
|
err = db.Set(context.Background(), chunk.ModeSetRemove, ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("retrieve indexes", func(t *testing.T) {
|
t.Run("retrieve indexes", func(t *testing.T) {
|
||||||
wantErr := leveldb.ErrNotFound
|
wantErr := leveldb.ErrNotFound
|
||||||
_, err := db.retrievalDataIndex.Get(addressToItem(chunk.Address()))
|
_, err := db.retrievalDataIndex.Get(addressToItem(ch.Address()))
|
||||||
if err != wantErr {
|
if err != wantErr {
|
||||||
t.Errorf("got error %v, want %v", err, wantErr)
|
t.Errorf("got error %v, want %v", err, wantErr)
|
||||||
}
|
}
|
||||||
t.Run("retrieve data index count", newItemsCountTest(db.retrievalDataIndex, 0))
|
t.Run("retrieve data index count", newItemsCountTest(db.retrievalDataIndex, 0))
|
||||||
|
|
||||||
// access index should not be set
|
// access index should not be set
|
||||||
_, err = db.retrievalAccessIndex.Get(addressToItem(chunk.Address()))
|
_, err = db.retrievalAccessIndex.Get(addressToItem(ch.Address()))
|
||||||
if err != wantErr {
|
if err != wantErr {
|
||||||
t.Errorf("got error %v, want %v", err, wantErr)
|
t.Errorf("got error %v, want %v", err, wantErr)
|
||||||
}
|
}
|
||||||
t.Run("retrieve access index count", newItemsCountTest(db.retrievalAccessIndex, 0))
|
t.Run("retrieve access index count", newItemsCountTest(db.retrievalAccessIndex, 0))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("pull index", newPullIndexTest(db, chunk, 0, leveldb.ErrNotFound))
|
t.Run("pull index", newPullIndexTest(db, ch, 0, leveldb.ErrNotFound))
|
||||||
|
|
||||||
t.Run("pull index count", newItemsCountTest(db.pullIndex, 0))
|
t.Run("pull index count", newItemsCountTest(db.pullIndex, 0))
|
||||||
|
|
||||||
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
|
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
|
||||||
|
|
||||||
t.Run("gc size", newIndexGCSizeTest(db))
|
t.Run("gc size", newIndexGCSizeTest(db))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -61,17 +62,14 @@ func benchmarkRetrievalIndexes(b *testing.B, o *Options, count int) {
|
|||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
db, cleanupFunc := newTestDB(b, o)
|
db, cleanupFunc := newTestDB(b, o)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
syncer := db.NewSetter(ModeSetSync)
|
|
||||||
requester := db.NewGetter(ModeGetRequest)
|
|
||||||
addrs := make([]chunk.Address, count)
|
addrs := make([]chunk.Address, count)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
chunk := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
err := uploader.Put(chunk)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
addrs[i] = chunk.Address()
|
addrs[i] = ch.Address()
|
||||||
}
|
}
|
||||||
// set update gc test hook to signal when
|
// set update gc test hook to signal when
|
||||||
// update gc goroutine is done by sending to
|
// update gc goroutine is done by sending to
|
||||||
@ -85,12 +83,12 @@ func benchmarkRetrievalIndexes(b *testing.B, o *Options, count int) {
|
|||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
err := syncer.Set(addrs[i])
|
err := db.Set(context.Background(), chunk.ModeSetSync, addrs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = requester.Get(addrs[i])
|
_, err = db.Get(context.Background(), chunk.ModeGetRequest, addrs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -133,7 +131,6 @@ func benchmarkUpload(b *testing.B, o *Options, count int) {
|
|||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
db, cleanupFunc := newTestDB(b, o)
|
db, cleanupFunc := newTestDB(b, o)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
chunks := make([]chunk.Chunk, count)
|
chunks := make([]chunk.Chunk, count)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
chunk := generateTestRandomChunk()
|
chunk := generateTestRandomChunk()
|
||||||
@ -142,7 +139,7 @@ func benchmarkUpload(b *testing.B, o *Options, count int) {
|
|||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
err := uploader.Put(chunks[i])
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, chunks[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
52
swarm/storage/localstore/schema.go
Normal file
52
swarm/storage/localstore/schema.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package localstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The DB schema we want to use. The actual/current DB schema might differ
|
||||||
|
// until migrations are run.
|
||||||
|
const CurrentDbSchema = DbSchemaSanctuary
|
||||||
|
|
||||||
|
// There was a time when we had no schema at all.
|
||||||
|
const DbSchemaNone = ""
|
||||||
|
|
||||||
|
// "purity" is the first formal schema of LevelDB we release together with Swarm 0.3.5
|
||||||
|
const DbSchemaPurity = "purity"
|
||||||
|
|
||||||
|
// "halloween" is here because we had a screw in the garbage collector index.
|
||||||
|
// Because of that we had to rebuild the GC index to get rid of erroneous
|
||||||
|
// entries and that takes a long time. This schema is used for bookkeeping,
|
||||||
|
// so rebuild index will run just once.
|
||||||
|
const DbSchemaHalloween = "halloween"
|
||||||
|
|
||||||
|
const DbSchemaSanctuary = "sanctuary"
|
||||||
|
|
||||||
|
// returns true if legacy database is in the datadir
|
||||||
|
func IsLegacyDatabase(datadir string) bool {
|
||||||
|
|
||||||
|
var (
|
||||||
|
legacyDbSchemaKey = []byte{8}
|
||||||
|
)
|
||||||
|
|
||||||
|
db, err := leveldb.OpenFile(datadir, &opt.Options{OpenFilesCacheCapacity: 128})
|
||||||
|
if err != nil {
|
||||||
|
log.Error("got an error while trying to open leveldb path", "path", datadir, "err", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
data, err := db.Get(legacyDbSchemaKey, nil)
|
||||||
|
if err != nil {
|
||||||
|
if err == leveldb.ErrNotFound {
|
||||||
|
// if we haven't found anything under the legacy db schema key- we are not on legacy
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Error("got an unexpected error fetching legacy name from the database", "err", err)
|
||||||
|
}
|
||||||
|
log.Trace("checking if database scheme is legacy", "schema name", string(data))
|
||||||
|
return string(data) == DbSchemaHalloween || string(data) == DbSchemaPurity
|
||||||
|
}
|
@ -17,28 +17,34 @@
|
|||||||
package localstore
|
package localstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/shed"
|
"github.com/ethereum/go-ethereum/swarm/shed"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
olog "github.com/opentracing/opentracing-go/log"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SubscribePull returns a channel that provides chunk addresses and stored times from pull syncing index.
|
// SubscribePull returns a channel that provides chunk addresses and stored times from pull syncing index.
|
||||||
// Pull syncing index can be only subscribed to a particular proximity order bin. If since
|
// Pull syncing index can be only subscribed to a particular proximity order bin. If since
|
||||||
// is not nil, the iteration will start from the first item stored after that timestamp. If until is not nil,
|
// is not 0, the iteration will start from the first item stored after that id. If until is not 0,
|
||||||
// only chunks stored up to this timestamp will be send to the channel, and the returned channel will be
|
// only chunks stored up to this id will be sent to the channel, and the returned channel will be
|
||||||
// closed. The since-until interval is open on the left and closed on the right (since,until]. Returned stop
|
// closed. The since-until interval is open on since side, and closed on until side: (since,until] <=> [since+1,until]. Returned stop
|
||||||
// function will terminate current and further iterations without errors, and also close the returned channel.
|
// function will terminate current and further iterations without errors, and also close the returned channel.
|
||||||
// Make sure that you check the second returned parameter from the channel to stop iteration when its value
|
// Make sure that you check the second returned parameter from the channel to stop iteration when its value
|
||||||
// is false.
|
// is false.
|
||||||
func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkDescriptor) (c <-chan ChunkDescriptor, stop func()) {
|
func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
|
||||||
chunkDescriptors := make(chan ChunkDescriptor)
|
metricName := "localstore.SubscribePull"
|
||||||
|
metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
|
||||||
|
|
||||||
|
chunkDescriptors := make(chan chunk.Descriptor)
|
||||||
trigger := make(chan struct{}, 1)
|
trigger := make(chan struct{}, 1)
|
||||||
|
|
||||||
db.pullTriggersMu.Lock()
|
db.pullTriggersMu.Lock()
|
||||||
@ -59,18 +65,20 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
|
|||||||
var errStopSubscription = errors.New("stop subscription")
|
var errStopSubscription = errors.New("stop subscription")
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
// close the returned ChunkDescriptor channel at the end to
|
defer metrics.GetOrRegisterCounter(metricName+".stop", nil).Inc(1)
|
||||||
|
// close the returned chunk.Descriptor channel at the end to
|
||||||
// signal that the subscription is done
|
// signal that the subscription is done
|
||||||
defer close(chunkDescriptors)
|
defer close(chunkDescriptors)
|
||||||
// sinceItem is the Item from which the next iteration
|
// sinceItem is the Item from which the next iteration
|
||||||
// should start. The first iteration starts from the first Item.
|
// should start. The first iteration starts from the first Item.
|
||||||
var sinceItem *shed.Item
|
var sinceItem *shed.Item
|
||||||
if since != nil {
|
if since > 0 {
|
||||||
sinceItem = &shed.Item{
|
sinceItem = &shed.Item{
|
||||||
Address: since.Address,
|
Address: db.addressInBin(bin),
|
||||||
StoreTimestamp: since.StoreTimestamp,
|
BinID: since,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
first := true // first iteration flag for SkipStartFromItem
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-trigger:
|
case <-trigger:
|
||||||
@ -78,17 +86,23 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
|
|||||||
// - last index Item is reached
|
// - last index Item is reached
|
||||||
// - subscription stop is called
|
// - subscription stop is called
|
||||||
// - context is done
|
// - context is done
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".iter", nil).Inc(1)
|
||||||
|
|
||||||
|
ctx, sp := spancontext.StartSpan(ctx, metricName+".iter")
|
||||||
|
sp.LogFields(olog.Int("bin", int(bin)), olog.Uint64("since", since), olog.Uint64("until", until))
|
||||||
|
|
||||||
|
iterStart := time.Now()
|
||||||
|
var count int
|
||||||
err := db.pullIndex.Iterate(func(item shed.Item) (stop bool, err error) {
|
err := db.pullIndex.Iterate(func(item shed.Item) (stop bool, err error) {
|
||||||
select {
|
select {
|
||||||
case chunkDescriptors <- ChunkDescriptor{
|
case chunkDescriptors <- chunk.Descriptor{
|
||||||
Address: item.Address,
|
Address: item.Address,
|
||||||
StoreTimestamp: item.StoreTimestamp,
|
BinID: item.BinID,
|
||||||
}:
|
}:
|
||||||
|
count++
|
||||||
// until chunk descriptor is sent
|
// until chunk descriptor is sent
|
||||||
// break the iteration
|
// break the iteration
|
||||||
if until != nil &&
|
if until > 0 && item.BinID >= until {
|
||||||
(item.StoreTimestamp >= until.StoreTimestamp ||
|
|
||||||
bytes.Equal(item.Address, until.Address)) {
|
|
||||||
return true, errStopSubscription
|
return true, errStopSubscription
|
||||||
}
|
}
|
||||||
// set next iteration start item
|
// set next iteration start item
|
||||||
@ -109,19 +123,34 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
|
|||||||
}, &shed.IterateOptions{
|
}, &shed.IterateOptions{
|
||||||
StartFrom: sinceItem,
|
StartFrom: sinceItem,
|
||||||
// sinceItem was sent as the last Address in the previous
|
// sinceItem was sent as the last Address in the previous
|
||||||
// iterator call, skip it in this one
|
// iterator call, skip it in this one, but not the item with
|
||||||
SkipStartFromItem: true,
|
// the provided since bin id as it should be sent to a channel
|
||||||
|
SkipStartFromItem: !first,
|
||||||
Prefix: []byte{bin},
|
Prefix: []byte{bin},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
totalTimeMetric(metricName+".iter", iterStart)
|
||||||
|
|
||||||
|
sp.FinishWithOptions(opentracing.FinishOptions{
|
||||||
|
LogRecords: []opentracing.LogRecord{
|
||||||
|
{
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Fields: []olog.Field{olog.Int("count", count)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errStopSubscription {
|
if err == errStopSubscription {
|
||||||
// stop subscription without any errors
|
// stop subscription without any errors
|
||||||
// if until is reached
|
// if until is reached
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".iter.error", nil).Inc(1)
|
||||||
log.Error("localstore pull subscription iteration", "bin", bin, "since", since, "until", until, "err", err)
|
log.Error("localstore pull subscription iteration", "bin", bin, "since", since, "until", until, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
first = false
|
||||||
case <-stopChan:
|
case <-stopChan:
|
||||||
// terminate the subscription
|
// terminate the subscription
|
||||||
// on stop
|
// on stop
|
||||||
@ -159,35 +188,20 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
|
|||||||
return chunkDescriptors, stop
|
return chunkDescriptors, stop
|
||||||
}
|
}
|
||||||
|
|
||||||
// LastPullSubscriptionChunk returns ChunkDescriptor of the latest Chunk
|
// LastPullSubscriptionBinID returns chunk bin id of the latest Chunk
|
||||||
// in pull syncing index for a provided bin. If there are no chunks in
|
// in pull syncing index for a provided bin. If there are no chunks in
|
||||||
// that bin, chunk.ErrChunkNotFound is returned.
|
// that bin, 0 value is returned.
|
||||||
func (db *DB) LastPullSubscriptionChunk(bin uint8) (c *ChunkDescriptor, err error) {
|
func (db *DB) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
|
||||||
|
metrics.GetOrRegisterCounter("localstore.LastPullSubscriptionBinID", nil).Inc(1)
|
||||||
|
|
||||||
item, err := db.pullIndex.Last([]byte{bin})
|
item, err := db.pullIndex.Last([]byte{bin})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == leveldb.ErrNotFound {
|
if err == leveldb.ErrNotFound {
|
||||||
return nil, chunk.ErrChunkNotFound
|
return 0, nil
|
||||||
}
|
}
|
||||||
return nil, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return &ChunkDescriptor{
|
return item.BinID, nil
|
||||||
Address: item.Address,
|
|
||||||
StoreTimestamp: item.StoreTimestamp,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChunkDescriptor holds information required for Pull syncing. This struct
|
|
||||||
// is provided by subscribing to pull index.
|
|
||||||
type ChunkDescriptor struct {
|
|
||||||
Address chunk.Address
|
|
||||||
StoreTimestamp int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ChunkDescriptor) String() string {
|
|
||||||
if c == nil {
|
|
||||||
return "none"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s stored at %v", c.Address.Hex(), c.StoreTimestamp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// triggerPullSubscriptions is used internally for starting iterations
|
// triggerPullSubscriptions is used internally for starting iterations
|
||||||
@ -209,3 +223,12 @@ func (db *DB) triggerPullSubscriptions(bin uint8) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addressInBin returns an address that is in a specific
|
||||||
|
// proximity order bin from database base key.
|
||||||
|
func (db *DB) addressInBin(bin uint8) (addr chunk.Address) {
|
||||||
|
addr = append([]byte(nil), db.baseKey...)
|
||||||
|
b := bin / 8
|
||||||
|
addr[b] = addr[b] ^ (1 << (7 - bin%8))
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/shed"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestDB_SubscribePull uploads some chunks before and after
|
// TestDB_SubscribePull uploads some chunks before and after
|
||||||
@ -35,15 +36,13 @@ func TestDB_SubscribePull(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
|
|
||||||
addrs := make(map[uint8][]chunk.Address)
|
addrs := make(map[uint8][]chunk.Address)
|
||||||
var addrsMu sync.Mutex
|
var addrsMu sync.Mutex
|
||||||
var wantedChunksCount int
|
var wantedChunksCount int
|
||||||
|
|
||||||
// prepopulate database with some chunks
|
// prepopulate database with some chunks
|
||||||
// before the subscription
|
// before the subscription
|
||||||
uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
|
uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 10)
|
||||||
|
|
||||||
// set a timeout on subscription
|
// set a timeout on subscription
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
@ -54,22 +53,22 @@ func TestDB_SubscribePull(t *testing.T) {
|
|||||||
errChan := make(chan error)
|
errChan := make(chan error)
|
||||||
|
|
||||||
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
||||||
ch, stop := db.SubscribePull(ctx, bin, nil, nil)
|
ch, stop := db.SubscribePull(ctx, bin, 0, 0)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
// receive and validate addresses from the subscription
|
// receive and validate addresses from the subscription
|
||||||
go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
|
go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// upload some chunks just after subscribe
|
// upload some chunks just after subscribe
|
||||||
uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
|
uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 5)
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
|
|
||||||
// upload some chunks after some short time
|
// upload some chunks after some short time
|
||||||
// to ensure that subscription will include them
|
// to ensure that subscription will include them
|
||||||
// in a dynamic environment
|
// in a dynamic environment
|
||||||
uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
|
uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 3)
|
||||||
|
|
||||||
checkErrChan(ctx, t, errChan, wantedChunksCount)
|
checkErrChan(ctx, t, errChan, wantedChunksCount)
|
||||||
}
|
}
|
||||||
@ -82,15 +81,13 @@ func TestDB_SubscribePull_multiple(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
|
|
||||||
addrs := make(map[uint8][]chunk.Address)
|
addrs := make(map[uint8][]chunk.Address)
|
||||||
var addrsMu sync.Mutex
|
var addrsMu sync.Mutex
|
||||||
var wantedChunksCount int
|
var wantedChunksCount int
|
||||||
|
|
||||||
// prepopulate database with some chunks
|
// prepopulate database with some chunks
|
||||||
// before the subscription
|
// before the subscription
|
||||||
uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
|
uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 10)
|
||||||
|
|
||||||
// set a timeout on subscription
|
// set a timeout on subscription
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
@ -106,23 +103,23 @@ func TestDB_SubscribePull_multiple(t *testing.T) {
|
|||||||
// that all of them will write every address error to errChan
|
// that all of them will write every address error to errChan
|
||||||
for j := 0; j < subsCount; j++ {
|
for j := 0; j < subsCount; j++ {
|
||||||
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
||||||
ch, stop := db.SubscribePull(ctx, bin, nil, nil)
|
ch, stop := db.SubscribePull(ctx, bin, 0, 0)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
// receive and validate addresses from the subscription
|
// receive and validate addresses from the subscription
|
||||||
go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
|
go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// upload some chunks just after subscribe
|
// upload some chunks just after subscribe
|
||||||
uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
|
uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 5)
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
|
|
||||||
// upload some chunks after some short time
|
// upload some chunks after some short time
|
||||||
// to ensure that subscription will include them
|
// to ensure that subscription will include them
|
||||||
// in a dynamic environment
|
// in a dynamic environment
|
||||||
uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
|
uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 3)
|
||||||
|
|
||||||
checkErrChan(ctx, t, errChan, wantedChunksCount*subsCount)
|
checkErrChan(ctx, t, errChan, wantedChunksCount*subsCount)
|
||||||
}
|
}
|
||||||
@ -135,61 +132,52 @@ func TestDB_SubscribePull_since(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
|
|
||||||
addrs := make(map[uint8][]chunk.Address)
|
addrs := make(map[uint8][]chunk.Address)
|
||||||
var addrsMu sync.Mutex
|
var addrsMu sync.Mutex
|
||||||
var wantedChunksCount int
|
var wantedChunksCount int
|
||||||
|
|
||||||
lastTimestamp := time.Now().UTC().UnixNano()
|
binIDCounter := make(map[uint8]uint64)
|
||||||
var lastTimestampMu sync.RWMutex
|
var binIDCounterMu sync.RWMutex
|
||||||
defer setNow(func() (t int64) {
|
|
||||||
lastTimestampMu.Lock()
|
|
||||||
defer lastTimestampMu.Unlock()
|
|
||||||
lastTimestamp++
|
|
||||||
return lastTimestamp
|
|
||||||
})()
|
|
||||||
|
|
||||||
uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
|
uploadRandomChunks := func(count int, wanted bool) (first map[uint8]uint64) {
|
||||||
addrsMu.Lock()
|
addrsMu.Lock()
|
||||||
defer addrsMu.Unlock()
|
defer addrsMu.Unlock()
|
||||||
|
|
||||||
last = make(map[uint8]ChunkDescriptor)
|
first = make(map[uint8]uint64)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ch := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(ch)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bin := db.po(ch.Address())
|
bin := db.po(ch.Address())
|
||||||
|
|
||||||
if _, ok := addrs[bin]; !ok {
|
binIDCounterMu.RLock()
|
||||||
addrs[bin] = make([]chunk.Address, 0)
|
binIDCounter[bin]++
|
||||||
}
|
binIDCounterMu.RUnlock()
|
||||||
|
|
||||||
if wanted {
|
if wanted {
|
||||||
|
if _, ok := addrs[bin]; !ok {
|
||||||
|
addrs[bin] = make([]chunk.Address, 0)
|
||||||
|
}
|
||||||
addrs[bin] = append(addrs[bin], ch.Address())
|
addrs[bin] = append(addrs[bin], ch.Address())
|
||||||
wantedChunksCount++
|
wantedChunksCount++
|
||||||
}
|
|
||||||
|
|
||||||
lastTimestampMu.RLock()
|
if _, ok := first[bin]; !ok {
|
||||||
storeTimestamp := lastTimestamp
|
first[bin] = binIDCounter[bin]
|
||||||
lastTimestampMu.RUnlock()
|
}
|
||||||
|
|
||||||
last[bin] = ChunkDescriptor{
|
|
||||||
Address: ch.Address(),
|
|
||||||
StoreTimestamp: storeTimestamp,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return last
|
return first
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepopulate database with some chunks
|
// prepopulate database with some chunks
|
||||||
// before the subscription
|
// before the subscription
|
||||||
last := uploadRandomChunks(30, false)
|
uploadRandomChunks(30, false)
|
||||||
|
|
||||||
uploadRandomChunks(25, true)
|
first := uploadRandomChunks(25, true)
|
||||||
|
|
||||||
// set a timeout on subscription
|
// set a timeout on subscription
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
@ -200,21 +188,18 @@ func TestDB_SubscribePull_since(t *testing.T) {
|
|||||||
errChan := make(chan error)
|
errChan := make(chan error)
|
||||||
|
|
||||||
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
||||||
var since *ChunkDescriptor
|
since, ok := first[bin]
|
||||||
if c, ok := last[bin]; ok {
|
if !ok {
|
||||||
since = &c
|
continue
|
||||||
}
|
}
|
||||||
ch, stop := db.SubscribePull(ctx, bin, since, nil)
|
ch, stop := db.SubscribePull(ctx, bin, since, 0)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
// receive and validate addresses from the subscription
|
// receive and validate addresses from the subscription
|
||||||
go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
|
go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// upload some chunks just after subscribe
|
|
||||||
uploadRandomChunks(15, true)
|
|
||||||
|
|
||||||
checkErrChan(ctx, t, errChan, wantedChunksCount)
|
checkErrChan(ctx, t, errChan, wantedChunksCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -226,30 +211,22 @@ func TestDB_SubscribePull_until(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
|
|
||||||
addrs := make(map[uint8][]chunk.Address)
|
addrs := make(map[uint8][]chunk.Address)
|
||||||
var addrsMu sync.Mutex
|
var addrsMu sync.Mutex
|
||||||
var wantedChunksCount int
|
var wantedChunksCount int
|
||||||
|
|
||||||
lastTimestamp := time.Now().UTC().UnixNano()
|
binIDCounter := make(map[uint8]uint64)
|
||||||
var lastTimestampMu sync.RWMutex
|
var binIDCounterMu sync.RWMutex
|
||||||
defer setNow(func() (t int64) {
|
|
||||||
lastTimestampMu.Lock()
|
|
||||||
defer lastTimestampMu.Unlock()
|
|
||||||
lastTimestamp++
|
|
||||||
return lastTimestamp
|
|
||||||
})()
|
|
||||||
|
|
||||||
uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
|
uploadRandomChunks := func(count int, wanted bool) (last map[uint8]uint64) {
|
||||||
addrsMu.Lock()
|
addrsMu.Lock()
|
||||||
defer addrsMu.Unlock()
|
defer addrsMu.Unlock()
|
||||||
|
|
||||||
last = make(map[uint8]ChunkDescriptor)
|
last = make(map[uint8]uint64)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ch := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(ch)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -264,14 +241,11 @@ func TestDB_SubscribePull_until(t *testing.T) {
|
|||||||
wantedChunksCount++
|
wantedChunksCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
lastTimestampMu.RLock()
|
binIDCounterMu.RLock()
|
||||||
storeTimestamp := lastTimestamp
|
binIDCounter[bin]++
|
||||||
lastTimestampMu.RUnlock()
|
binIDCounterMu.RUnlock()
|
||||||
|
|
||||||
last[bin] = ChunkDescriptor{
|
last[bin] = binIDCounter[bin]
|
||||||
Address: ch.Address(),
|
|
||||||
StoreTimestamp: storeTimestamp,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return last
|
return last
|
||||||
}
|
}
|
||||||
@ -295,11 +269,11 @@ func TestDB_SubscribePull_until(t *testing.T) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ch, stop := db.SubscribePull(ctx, bin, nil, &until)
|
ch, stop := db.SubscribePull(ctx, bin, 0, until)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
// receive and validate addresses from the subscription
|
// receive and validate addresses from the subscription
|
||||||
go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
|
go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// upload some chunks just after subscribe
|
// upload some chunks just after subscribe
|
||||||
@ -316,30 +290,22 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
|
|||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
|
|
||||||
addrs := make(map[uint8][]chunk.Address)
|
addrs := make(map[uint8][]chunk.Address)
|
||||||
var addrsMu sync.Mutex
|
var addrsMu sync.Mutex
|
||||||
var wantedChunksCount int
|
var wantedChunksCount int
|
||||||
|
|
||||||
lastTimestamp := time.Now().UTC().UnixNano()
|
binIDCounter := make(map[uint8]uint64)
|
||||||
var lastTimestampMu sync.RWMutex
|
var binIDCounterMu sync.RWMutex
|
||||||
defer setNow(func() (t int64) {
|
|
||||||
lastTimestampMu.Lock()
|
|
||||||
defer lastTimestampMu.Unlock()
|
|
||||||
lastTimestamp++
|
|
||||||
return lastTimestamp
|
|
||||||
})()
|
|
||||||
|
|
||||||
uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
|
uploadRandomChunks := func(count int, wanted bool) (last map[uint8]uint64) {
|
||||||
addrsMu.Lock()
|
addrsMu.Lock()
|
||||||
defer addrsMu.Unlock()
|
defer addrsMu.Unlock()
|
||||||
|
|
||||||
last = make(map[uint8]ChunkDescriptor)
|
last = make(map[uint8]uint64)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ch := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(ch)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -354,14 +320,11 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
|
|||||||
wantedChunksCount++
|
wantedChunksCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
lastTimestampMu.RLock()
|
binIDCounterMu.RLock()
|
||||||
storeTimestamp := lastTimestamp
|
binIDCounter[bin]++
|
||||||
lastTimestampMu.RUnlock()
|
binIDCounterMu.RUnlock()
|
||||||
|
|
||||||
last[bin] = ChunkDescriptor{
|
last[bin] = binIDCounter[bin]
|
||||||
Address: ch.Address(),
|
|
||||||
StoreTimestamp: storeTimestamp,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return last
|
return last
|
||||||
}
|
}
|
||||||
@ -387,9 +350,10 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
|
|||||||
errChan := make(chan error)
|
errChan := make(chan error)
|
||||||
|
|
||||||
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
||||||
var since *ChunkDescriptor
|
since, ok := upload1[bin]
|
||||||
if c, ok := upload1[bin]; ok {
|
if ok {
|
||||||
since = &c
|
// start from the next uploaded chunk
|
||||||
|
since++
|
||||||
}
|
}
|
||||||
until, ok := upload2[bin]
|
until, ok := upload2[bin]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -397,11 +361,11 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
|
|||||||
// skip this bin from testing
|
// skip this bin from testing
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ch, stop := db.SubscribePull(ctx, bin, since, &until)
|
ch, stop := db.SubscribePull(ctx, bin, since, until)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
// receive and validate addresses from the subscription
|
// receive and validate addresses from the subscription
|
||||||
go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
|
go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// upload some chunks just after subscribe
|
// upload some chunks just after subscribe
|
||||||
@ -412,14 +376,14 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
|
|||||||
|
|
||||||
// uploadRandomChunksBin uploads random chunks to database and adds them to
|
// uploadRandomChunksBin uploads random chunks to database and adds them to
|
||||||
// the map of addresses ber bin.
|
// the map of addresses ber bin.
|
||||||
func uploadRandomChunksBin(t *testing.T, db *DB, uploader *Putter, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
|
func uploadRandomChunksBin(t *testing.T, db *DB, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
|
||||||
addrsMu.Lock()
|
addrsMu.Lock()
|
||||||
defer addrsMu.Unlock()
|
defer addrsMu.Unlock()
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ch := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(ch)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -434,10 +398,10 @@ func uploadRandomChunksBin(t *testing.T, db *DB, uploader *Putter, addrs map[uin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// readPullSubscriptionBin is a helper function that reads all ChunkDescriptors from a channel and
|
// readPullSubscriptionBin is a helper function that reads all chunk.Descriptors from a channel and
|
||||||
// sends error to errChan, even if it is nil, to count the number of ChunkDescriptors
|
// sends error to errChan, even if it is nil, to count the number of chunk.Descriptors
|
||||||
// returned by the channel.
|
// returned by the channel.
|
||||||
func readPullSubscriptionBin(ctx context.Context, bin uint8, ch <-chan ChunkDescriptor, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, errChan chan error) {
|
func readPullSubscriptionBin(ctx context.Context, db *DB, bin uint8, ch <-chan chunk.Descriptor, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, errChan chan error) {
|
||||||
var i int // address index
|
var i int // address index
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -450,9 +414,20 @@ func readPullSubscriptionBin(ctx context.Context, bin uint8, ch <-chan ChunkDesc
|
|||||||
if i+1 > len(addrs[bin]) {
|
if i+1 > len(addrs[bin]) {
|
||||||
err = fmt.Errorf("got more chunk addresses %v, then expected %v, for bin %v", i+1, len(addrs[bin]), bin)
|
err = fmt.Errorf("got more chunk addresses %v, then expected %v, for bin %v", i+1, len(addrs[bin]), bin)
|
||||||
} else {
|
} else {
|
||||||
want := addrs[bin][i]
|
addr := addrs[bin][i]
|
||||||
if !bytes.Equal(got.Address, want) {
|
if !bytes.Equal(got.Address, addr) {
|
||||||
err = fmt.Errorf("got chunk address %v in bin %v %s, want %s", i, bin, got.Address.Hex(), want)
|
err = fmt.Errorf("got chunk bin id %v in bin %v %v, want %v", i, bin, got.Address.Hex(), addr.Hex())
|
||||||
|
} else {
|
||||||
|
want, err := db.retrievalDataIndex.Get(shed.Item{
|
||||||
|
Address: addr,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("got chunk (bin id %v in bin %v) from retrieval index %s: %v", i, bin, addrs[bin][i].Hex(), err)
|
||||||
|
} else {
|
||||||
|
if got.BinID != want.BinID {
|
||||||
|
err = fmt.Errorf("got chunk bin id %v in bin %v %v, want %v", i, bin, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
addrsMu.Unlock()
|
addrsMu.Unlock()
|
||||||
@ -486,27 +461,19 @@ func checkErrChan(ctx context.Context, t *testing.T, errChan chan error, wantedC
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDB_LastPullSubscriptionChunk validates that LastPullSubscriptionChunk
|
// TestDB_LastPullSubscriptionBinID validates that LastPullSubscriptionBinID
|
||||||
// is returning the last chunk descriptor for proximity order bins by
|
// is returning the last chunk descriptor for proximity order bins by
|
||||||
// doing a few rounds of chunk uploads.
|
// doing a few rounds of chunk uploads.
|
||||||
func TestDB_LastPullSubscriptionChunk(t *testing.T) {
|
func TestDB_LastPullSubscriptionBinID(t *testing.T) {
|
||||||
db, cleanupFunc := newTestDB(t, nil)
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
uploader := db.NewPutter(ModePutUpload)
|
|
||||||
|
|
||||||
addrs := make(map[uint8][]chunk.Address)
|
addrs := make(map[uint8][]chunk.Address)
|
||||||
|
|
||||||
lastTimestamp := time.Now().UTC().UnixNano()
|
binIDCounter := make(map[uint8]uint64)
|
||||||
var lastTimestampMu sync.RWMutex
|
var binIDCounterMu sync.RWMutex
|
||||||
defer setNow(func() (t int64) {
|
|
||||||
lastTimestampMu.Lock()
|
|
||||||
defer lastTimestampMu.Unlock()
|
|
||||||
lastTimestamp++
|
|
||||||
return lastTimestamp
|
|
||||||
})()
|
|
||||||
|
|
||||||
last := make(map[uint8]ChunkDescriptor)
|
last := make(map[uint8]uint64)
|
||||||
|
|
||||||
// do a few rounds of uploads and check if
|
// do a few rounds of uploads and check if
|
||||||
// last pull subscription chunk is correct
|
// last pull subscription chunk is correct
|
||||||
@ -516,7 +483,7 @@ func TestDB_LastPullSubscriptionChunk(t *testing.T) {
|
|||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ch := generateTestRandomChunk()
|
ch := generateTestRandomChunk()
|
||||||
|
|
||||||
err := uploader.Put(ch)
|
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -528,32 +495,42 @@ func TestDB_LastPullSubscriptionChunk(t *testing.T) {
|
|||||||
}
|
}
|
||||||
addrs[bin] = append(addrs[bin], ch.Address())
|
addrs[bin] = append(addrs[bin], ch.Address())
|
||||||
|
|
||||||
lastTimestampMu.RLock()
|
binIDCounterMu.RLock()
|
||||||
storeTimestamp := lastTimestamp
|
binIDCounter[bin]++
|
||||||
lastTimestampMu.RUnlock()
|
binIDCounterMu.RUnlock()
|
||||||
|
|
||||||
last[bin] = ChunkDescriptor{
|
last[bin] = binIDCounter[bin]
|
||||||
Address: ch.Address(),
|
|
||||||
StoreTimestamp: storeTimestamp,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// check
|
// check
|
||||||
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
|
||||||
want, ok := last[bin]
|
want, ok := last[bin]
|
||||||
got, err := db.LastPullSubscriptionChunk(bin)
|
got, err := db.LastPullSubscriptionBinID(bin)
|
||||||
if ok {
|
if ok {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("got unexpected error value %v", err)
|
t.Errorf("got unexpected error value %v", err)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(got.Address, want.Address) {
|
}
|
||||||
t.Errorf("got last address %s, want %s", got.Address.Hex(), want.Address.Hex())
|
if got != want {
|
||||||
}
|
t.Errorf("got last bin id %v, want %v", got, want)
|
||||||
} else {
|
|
||||||
if err != chunk.ErrChunkNotFound {
|
|
||||||
t.Errorf("got unexpected error value %v, want %v", err, chunk.ErrChunkNotFound)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestAddressInBin validates that function addressInBin
|
||||||
|
// returns a valid address for every proximity order bin.
|
||||||
|
func TestAddressInBin(t *testing.T) {
|
||||||
|
db, cleanupFunc := newTestDB(t, nil)
|
||||||
|
defer cleanupFunc()
|
||||||
|
|
||||||
|
for po := uint8(0); po < chunk.MaxPO; po++ {
|
||||||
|
addr := db.addressInBin(po)
|
||||||
|
|
||||||
|
got := db.po(addr)
|
||||||
|
|
||||||
|
if got != uint8(po) {
|
||||||
|
t.Errorf("got po %v, want %v", got, po)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -19,10 +19,15 @@ package localstore
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/shed"
|
"github.com/ethereum/go-ethereum/swarm/shed"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
olog "github.com/opentracing/opentracing-go/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SubscribePush returns a channel that provides storage chunks with ordering from push syncing index.
|
// SubscribePush returns a channel that provides storage chunks with ordering from push syncing index.
|
||||||
@ -30,6 +35,9 @@ import (
|
|||||||
// the returned channel without any errors. Make sure that you check the second returned parameter
|
// the returned channel without any errors. Make sure that you check the second returned parameter
|
||||||
// from the channel to stop iteration when its value is false.
|
// from the channel to stop iteration when its value is false.
|
||||||
func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop func()) {
|
func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop func()) {
|
||||||
|
metricName := "localstore.SubscribePush"
|
||||||
|
metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
|
||||||
|
|
||||||
chunks := make(chan chunk.Chunk)
|
chunks := make(chan chunk.Chunk)
|
||||||
trigger := make(chan struct{}, 1)
|
trigger := make(chan struct{}, 1)
|
||||||
|
|
||||||
@ -44,6 +52,7 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop fun
|
|||||||
var stopChanOnce sync.Once
|
var stopChanOnce sync.Once
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
defer metrics.GetOrRegisterCounter(metricName+".done", nil).Inc(1)
|
||||||
// close the returned chunkInfo channel at the end to
|
// close the returned chunkInfo channel at the end to
|
||||||
// signal that the subscription is done
|
// signal that the subscription is done
|
||||||
defer close(chunks)
|
defer close(chunks)
|
||||||
@ -57,6 +66,12 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop fun
|
|||||||
// - last index Item is reached
|
// - last index Item is reached
|
||||||
// - subscription stop is called
|
// - subscription stop is called
|
||||||
// - context is done
|
// - context is done
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".iter", nil).Inc(1)
|
||||||
|
|
||||||
|
ctx, sp := spancontext.StartSpan(ctx, metricName+".iter")
|
||||||
|
|
||||||
|
iterStart := time.Now()
|
||||||
|
var count int
|
||||||
err := db.pushIndex.Iterate(func(item shed.Item) (stop bool, err error) {
|
err := db.pushIndex.Iterate(func(item shed.Item) (stop bool, err error) {
|
||||||
// get chunk data
|
// get chunk data
|
||||||
dataItem, err := db.retrievalDataIndex.Get(item)
|
dataItem, err := db.retrievalDataIndex.Get(item)
|
||||||
@ -66,6 +81,7 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop fun
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case chunks <- chunk.NewChunk(dataItem.Address, dataItem.Data):
|
case chunks <- chunk.NewChunk(dataItem.Address, dataItem.Data):
|
||||||
|
count++
|
||||||
// set next iteration start item
|
// set next iteration start item
|
||||||
// when its chunk is successfully sent to channel
|
// when its chunk is successfully sent to channel
|
||||||
sinceItem = &item
|
sinceItem = &item
|
||||||
@ -87,7 +103,20 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop fun
|
|||||||
// iterator call, skip it in this one
|
// iterator call, skip it in this one
|
||||||
SkipStartFromItem: true,
|
SkipStartFromItem: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
totalTimeMetric(metricName+".iter", iterStart)
|
||||||
|
|
||||||
|
sp.FinishWithOptions(opentracing.FinishOptions{
|
||||||
|
LogRecords: []opentracing.LogRecord{
|
||||||
|
{
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Fields: []olog.Field{olog.Int("count", count)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter(metricName+".iter.error", nil).Inc(1)
|
||||||
log.Error("localstore push subscription iteration", "err", err)
|
log.Error("localstore push subscription iteration", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user